Merge branch 'develop' into fix/python_wrapper
commit
b5c998ab76
|
@ -196,6 +196,42 @@ namespace gtsam {
|
||||||
return this->apply(g, &Ring::div);
|
return this->apply(g, &Ring::div);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compute sum of all values
|
||||||
|
double sum() const {
|
||||||
|
double sum = 0;
|
||||||
|
auto visitor = [&](double y) { sum += y; };
|
||||||
|
this->visit(visitor);
|
||||||
|
return sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Helper method to perform normalization such that all leaves in the
|
||||||
|
* tree sum to 1
|
||||||
|
*
|
||||||
|
* @param sum
|
||||||
|
* @return AlgebraicDecisionTree
|
||||||
|
*/
|
||||||
|
AlgebraicDecisionTree normalize(double sum) const {
|
||||||
|
return this->apply([&sum](const double& x) { return x / sum; });
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find the minimum values amongst all leaves
|
||||||
|
double min() const {
|
||||||
|
double min = std::numeric_limits<double>::max();
|
||||||
|
auto visitor = [&](double x) { min = x < min ? x : min; };
|
||||||
|
this->visit(visitor);
|
||||||
|
return min;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find the maximum values amongst all leaves
|
||||||
|
double max() const {
|
||||||
|
// Get the most negative value
|
||||||
|
double max = -std::numeric_limits<double>::max();
|
||||||
|
auto visitor = [&](double x) { max = x > max ? x : max; };
|
||||||
|
this->visit(visitor);
|
||||||
|
return max;
|
||||||
|
}
|
||||||
|
|
||||||
/** sum out variable */
|
/** sum out variable */
|
||||||
AlgebraicDecisionTree sum(const L& label, size_t cardinality) const {
|
AlgebraicDecisionTree sum(const L& label, size_t cardinality) const {
|
||||||
return this->combine(label, cardinality, &Ring::add);
|
return this->combine(label, cardinality, &Ring::add);
|
||||||
|
|
|
@ -593,6 +593,55 @@ TEST(ADT, zero) {
|
||||||
EXPECT_DOUBLES_EQUAL(0, anotb(x11), 1e-9);
|
EXPECT_DOUBLES_EQUAL(0, anotb(x11), 1e-9);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Example ADT from 0 to 11.
|
||||||
|
ADT exampleADT() {
|
||||||
|
DiscreteKey A(0, 2), B(1, 3), C(2, 2);
|
||||||
|
ADT f(A & B & C, "0 6 2 8 4 10 1 7 3 9 5 11");
|
||||||
|
return f;
|
||||||
|
}
|
||||||
|
/* ************************************************************************** */
|
||||||
|
// Test sum
|
||||||
|
TEST(ADT, Sum) {
|
||||||
|
ADT a = exampleADT();
|
||||||
|
double expected_sum = 0;
|
||||||
|
for (double i = 0; i < 12; i++) {
|
||||||
|
expected_sum += i;
|
||||||
|
}
|
||||||
|
EXPECT_DOUBLES_EQUAL(expected_sum, a.sum(), 1e-9);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************** */
|
||||||
|
// Test normalize
|
||||||
|
TEST(ADT, Normalize) {
|
||||||
|
ADT a = exampleADT();
|
||||||
|
double sum = a.sum();
|
||||||
|
auto actual = a.normalize(sum);
|
||||||
|
|
||||||
|
DiscreteKey A(0, 2), B(1, 3), C(2, 2);
|
||||||
|
DiscreteKeys keys = DiscreteKeys{A, B, C};
|
||||||
|
std::vector<double> cpt{0 / sum, 6 / sum, 2 / sum, 8 / sum,
|
||||||
|
4 / sum, 10 / sum, 1 / sum, 7 / sum,
|
||||||
|
3 / sum, 9 / sum, 5 / sum, 11 / sum};
|
||||||
|
ADT expected(keys, cpt);
|
||||||
|
EXPECT(assert_equal(expected, actual));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************** */
|
||||||
|
// Test min
|
||||||
|
TEST(ADT, Min) {
|
||||||
|
ADT a = exampleADT();
|
||||||
|
double min = a.min();
|
||||||
|
EXPECT_DOUBLES_EQUAL(0.0, min, 1e-9);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************** */
|
||||||
|
// Test max
|
||||||
|
TEST(ADT, Max) {
|
||||||
|
ADT a = exampleADT();
|
||||||
|
double max = a.max();
|
||||||
|
EXPECT_DOUBLES_EQUAL(11.0, max, 1e-9);
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() {
|
int main() {
|
||||||
TestResult tr;
|
TestResult tr;
|
||||||
|
|
|
@ -68,7 +68,7 @@ namespace gtsam {
|
||||||
return fromAngle(theta * degree);
|
return fromAngle(theta * degree);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Named constructor from cos(theta),sin(theta) pair, will *not* normalize!
|
/// Named constructor from cos(theta),sin(theta) pair
|
||||||
static Rot2 fromCosSin(double c, double s);
|
static Rot2 fromCosSin(double c, double s);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -856,7 +856,7 @@ class Cal3_S2Stereo {
|
||||||
gtsam::Matrix K() const;
|
gtsam::Matrix K() const;
|
||||||
gtsam::Point2 principalPoint() const;
|
gtsam::Point2 principalPoint() const;
|
||||||
double baseline() const;
|
double baseline() const;
|
||||||
Vector6 vector() const;
|
gtsam::Vector6 vector() const;
|
||||||
gtsam::Matrix inverse() const;
|
gtsam::Matrix inverse() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,8 @@ class GTSAM_EXPORT GaussianMixture
|
||||||
double logConstant_; ///< log of the normalization constant.
|
double logConstant_; ///< log of the normalization constant.
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Convert a DecisionTree of factors into a DT of Gaussian FGs.
|
* @brief Convert a DecisionTree of factors into
|
||||||
|
* a DecisionTree of Gaussian factor graphs.
|
||||||
*/
|
*/
|
||||||
GaussianFactorGraphTree asGaussianFactorGraphTree() const;
|
GaussianFactorGraphTree asGaussianFactorGraphTree() const;
|
||||||
|
|
||||||
|
@ -214,7 +215,8 @@ class GTSAM_EXPORT GaussianMixture
|
||||||
* @return AlgebraicDecisionTree<Key> A decision tree on the discrete keys
|
* @return AlgebraicDecisionTree<Key> A decision tree on the discrete keys
|
||||||
* only, with the leaf values as the error for each assignment.
|
* only, with the leaf values as the error for each assignment.
|
||||||
*/
|
*/
|
||||||
AlgebraicDecisionTree<Key> errorTree(const VectorValues &continuousValues) const;
|
AlgebraicDecisionTree<Key> errorTree(
|
||||||
|
const VectorValues &continuousValues) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Compute the logProbability of this Gaussian Mixture.
|
* @brief Compute the logProbability of this Gaussian Mixture.
|
||||||
|
|
|
@ -135,7 +135,8 @@ class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor {
|
||||||
* @return AlgebraicDecisionTree<Key> A decision tree with the same keys
|
* @return AlgebraicDecisionTree<Key> A decision tree with the same keys
|
||||||
* as the factors involved, and leaf values as the error.
|
* as the factors involved, and leaf values as the error.
|
||||||
*/
|
*/
|
||||||
AlgebraicDecisionTree<Key> errorTree(const VectorValues &continuousValues) const;
|
AlgebraicDecisionTree<Key> errorTree(
|
||||||
|
const VectorValues &continuousValues) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Compute the log-likelihood, including the log-normalizing constant.
|
* @brief Compute the log-likelihood, including the log-normalizing constant.
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
/**
|
/**
|
||||||
* @file HybridValues.h
|
* @file HybridValues.h
|
||||||
* @date Jul 28, 2022
|
* @date Jul 28, 2022
|
||||||
|
* @author Varun Agrawal
|
||||||
* @author Shangjie Xue
|
* @author Shangjie Xue
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -54,13 +55,13 @@ class GTSAM_EXPORT HybridValues {
|
||||||
HybridValues() = default;
|
HybridValues() = default;
|
||||||
|
|
||||||
/// Construct from DiscreteValues and VectorValues.
|
/// Construct from DiscreteValues and VectorValues.
|
||||||
HybridValues(const VectorValues &cv, const DiscreteValues &dv)
|
HybridValues(const VectorValues& cv, const DiscreteValues& dv)
|
||||||
: continuous_(cv), discrete_(dv){}
|
: continuous_(cv), discrete_(dv) {}
|
||||||
|
|
||||||
/// Construct from all values types.
|
/// Construct from all values types.
|
||||||
HybridValues(const VectorValues& cv, const DiscreteValues& dv,
|
HybridValues(const VectorValues& cv, const DiscreteValues& dv,
|
||||||
const Values& v)
|
const Values& v)
|
||||||
: continuous_(cv), discrete_(dv), nonlinear_(v){}
|
: continuous_(cv), discrete_(dv), nonlinear_(v) {}
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Testable
|
/// @name Testable
|
||||||
|
@ -101,9 +102,7 @@ class GTSAM_EXPORT HybridValues {
|
||||||
bool existsDiscrete(Key j) { return (discrete_.find(j) != discrete_.end()); }
|
bool existsDiscrete(Key j) { return (discrete_.find(j) != discrete_.end()); }
|
||||||
|
|
||||||
/// Check whether a variable with key \c j exists in values.
|
/// Check whether a variable with key \c j exists in values.
|
||||||
bool existsNonlinear(Key j) {
|
bool existsNonlinear(Key j) { return nonlinear_.exists(j); }
|
||||||
return nonlinear_.exists(j);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check whether a variable with key \c j exists.
|
/// Check whether a variable with key \c j exists.
|
||||||
bool exists(Key j) {
|
bool exists(Key j) {
|
||||||
|
@ -128,9 +127,7 @@ class GTSAM_EXPORT HybridValues {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// insert_or_assign() , similar to Values.h
|
/// insert_or_assign() , similar to Values.h
|
||||||
void insert_or_assign(Key j, size_t value) {
|
void insert_or_assign(Key j, size_t value) { discrete_[j] = value; }
|
||||||
discrete_[j] = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Insert all continuous values from \c values. Throws an invalid_argument
|
/** Insert all continuous values from \c values. Throws an invalid_argument
|
||||||
* exception if any keys to be inserted are already used. */
|
* exception if any keys to be inserted are already used. */
|
||||||
|
|
|
@ -333,7 +333,6 @@ TEST(HybridEstimation, Probability) {
|
||||||
for (auto discrete_conditional : *discreteBayesNet) {
|
for (auto discrete_conditional : *discreteBayesNet) {
|
||||||
bayesNet->add(discrete_conditional);
|
bayesNet->add(discrete_conditional);
|
||||||
}
|
}
|
||||||
auto discreteConditional = discreteBayesNet->at(0)->asDiscrete();
|
|
||||||
|
|
||||||
HybridValues hybrid_values = bayesNet->optimize();
|
HybridValues hybrid_values = bayesNet->optimize();
|
||||||
|
|
||||||
|
|
|
@ -680,12 +680,14 @@ conditional 0: Hybrid P( x0 | x1 m0)
|
||||||
R = [ 10.0499 ]
|
R = [ 10.0499 ]
|
||||||
S[x1] = [ -0.0995037 ]
|
S[x1] = [ -0.0995037 ]
|
||||||
d = [ -9.85087 ]
|
d = [ -9.85087 ]
|
||||||
|
logNormalizationConstant: 1.38862
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
1 Leaf p(x0 | x1)
|
1 Leaf p(x0 | x1)
|
||||||
R = [ 10.0499 ]
|
R = [ 10.0499 ]
|
||||||
S[x1] = [ -0.0995037 ]
|
S[x1] = [ -0.0995037 ]
|
||||||
d = [ -9.95037 ]
|
d = [ -9.95037 ]
|
||||||
|
logNormalizationConstant: 1.38862
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
conditional 1: Hybrid P( x1 | x2 m0 m1)
|
conditional 1: Hybrid P( x1 | x2 m0 m1)
|
||||||
|
@ -696,12 +698,14 @@ conditional 1: Hybrid P( x1 | x2 m0 m1)
|
||||||
R = [ 10.099 ]
|
R = [ 10.099 ]
|
||||||
S[x2] = [ -0.0990196 ]
|
S[x2] = [ -0.0990196 ]
|
||||||
d = [ -9.99901 ]
|
d = [ -9.99901 ]
|
||||||
|
logNormalizationConstant: 1.3935
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
0 1 Leaf p(x1 | x2)
|
0 1 Leaf p(x1 | x2)
|
||||||
R = [ 10.099 ]
|
R = [ 10.099 ]
|
||||||
S[x2] = [ -0.0990196 ]
|
S[x2] = [ -0.0990196 ]
|
||||||
d = [ -9.90098 ]
|
d = [ -9.90098 ]
|
||||||
|
logNormalizationConstant: 1.3935
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
1 Choice(m0)
|
1 Choice(m0)
|
||||||
|
@ -709,12 +713,14 @@ conditional 1: Hybrid P( x1 | x2 m0 m1)
|
||||||
R = [ 10.099 ]
|
R = [ 10.099 ]
|
||||||
S[x2] = [ -0.0990196 ]
|
S[x2] = [ -0.0990196 ]
|
||||||
d = [ -10.098 ]
|
d = [ -10.098 ]
|
||||||
|
logNormalizationConstant: 1.3935
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
1 1 Leaf p(x1 | x2)
|
1 1 Leaf p(x1 | x2)
|
||||||
R = [ 10.099 ]
|
R = [ 10.099 ]
|
||||||
S[x2] = [ -0.0990196 ]
|
S[x2] = [ -0.0990196 ]
|
||||||
d = [ -10 ]
|
d = [ -10 ]
|
||||||
|
logNormalizationConstant: 1.3935
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
conditional 2: Hybrid P( x2 | m0 m1)
|
conditional 2: Hybrid P( x2 | m0 m1)
|
||||||
|
@ -726,6 +732,7 @@ conditional 2: Hybrid P( x2 | m0 m1)
|
||||||
d = [ -10.1489 ]
|
d = [ -10.1489 ]
|
||||||
mean: 1 elements
|
mean: 1 elements
|
||||||
x2: -1.0099
|
x2: -1.0099
|
||||||
|
logNormalizationConstant: 1.38857
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
0 1 Leaf p(x2)
|
0 1 Leaf p(x2)
|
||||||
|
@ -733,6 +740,7 @@ conditional 2: Hybrid P( x2 | m0 m1)
|
||||||
d = [ -10.1479 ]
|
d = [ -10.1479 ]
|
||||||
mean: 1 elements
|
mean: 1 elements
|
||||||
x2: -1.0098
|
x2: -1.0098
|
||||||
|
logNormalizationConstant: 1.38857
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
1 Choice(m0)
|
1 Choice(m0)
|
||||||
|
@ -741,6 +749,7 @@ conditional 2: Hybrid P( x2 | m0 m1)
|
||||||
d = [ -10.0504 ]
|
d = [ -10.0504 ]
|
||||||
mean: 1 elements
|
mean: 1 elements
|
||||||
x2: -1.0001
|
x2: -1.0001
|
||||||
|
logNormalizationConstant: 1.38857
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
1 1 Leaf p(x2)
|
1 1 Leaf p(x2)
|
||||||
|
@ -748,6 +757,7 @@ conditional 2: Hybrid P( x2 | m0 m1)
|
||||||
d = [ -10.0494 ]
|
d = [ -10.0494 ]
|
||||||
mean: 1 elements
|
mean: 1 elements
|
||||||
x2: -1
|
x2: -1
|
||||||
|
logNormalizationConstant: 1.38857
|
||||||
No noise model
|
No noise model
|
||||||
|
|
||||||
)";
|
)";
|
||||||
|
|
|
@ -243,5 +243,25 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
double GaussianBayesNet::logNormalizationConstant() const {
|
||||||
|
/*
|
||||||
|
normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma))
|
||||||
|
logConstant = -0.5 * n*log(2*pi) - 0.5 * log det(Sigma)
|
||||||
|
|
||||||
|
log det(Sigma)) = -2.0 * logDeterminant()
|
||||||
|
thus, logConstant = -0.5*n*log(2*pi) + logDeterminant()
|
||||||
|
|
||||||
|
BayesNet logConstant = sum(-0.5*n_i*log(2*pi) + logDeterminant_i())
|
||||||
|
= sum(-0.5*n_i*log(2*pi)) + sum(logDeterminant_i())
|
||||||
|
= sum(-0.5*n_i*log(2*pi)) + bn->logDeterminant()
|
||||||
|
*/
|
||||||
|
double logNormConst = 0.0;
|
||||||
|
for (const sharedConditional& cg : *this) {
|
||||||
|
logNormConst += cg->logNormalizationConstant();
|
||||||
|
}
|
||||||
|
return logNormConst;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
|
||||||
} // namespace gtsam
|
} // namespace gtsam
|
||||||
|
|
|
@ -82,6 +82,12 @@ namespace gtsam {
|
||||||
/** Check equality */
|
/** Check equality */
|
||||||
bool equals(const This& bn, double tol = 1e-9) const;
|
bool equals(const This& bn, double tol = 1e-9) const;
|
||||||
|
|
||||||
|
/// Check exact equality.
|
||||||
|
friend bool operator==(const GaussianBayesNet& lhs,
|
||||||
|
const GaussianBayesNet& rhs) {
|
||||||
|
return lhs.isEqual(rhs);
|
||||||
|
}
|
||||||
|
|
||||||
/// print graph
|
/// print graph
|
||||||
void print(
|
void print(
|
||||||
const std::string& s = "",
|
const std::string& s = "",
|
||||||
|
@ -228,6 +234,14 @@ namespace gtsam {
|
||||||
* @return The determinant */
|
* @return The determinant */
|
||||||
double logDeterminant() const;
|
double logDeterminant() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get the log of the normalization constant corresponding to the
|
||||||
|
* joint Gaussian density represented by this Bayes net.
|
||||||
|
*
|
||||||
|
* @return double
|
||||||
|
*/
|
||||||
|
double logNormalizationConstant() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Backsubstitute with a different RHS vector than the one stored in this BayesNet.
|
* Backsubstitute with a different RHS vector than the one stored in this BayesNet.
|
||||||
* gy=inv(R*inv(Sigma))*gx
|
* gy=inv(R*inv(Sigma))*gx
|
||||||
|
|
|
@ -121,6 +121,7 @@ namespace gtsam {
|
||||||
const auto mean = solve({}); // solve for mean.
|
const auto mean = solve({}); // solve for mean.
|
||||||
mean.print(" mean", formatter);
|
mean.print(" mean", formatter);
|
||||||
}
|
}
|
||||||
|
cout << " logNormalizationConstant: " << logNormalizationConstant() << std::endl;
|
||||||
if (model_)
|
if (model_)
|
||||||
model_->print(" Noise model: ");
|
model_->print(" Noise model: ");
|
||||||
else
|
else
|
||||||
|
@ -184,8 +185,13 @@ namespace gtsam {
|
||||||
double GaussianConditional::logNormalizationConstant() const {
|
double GaussianConditional::logNormalizationConstant() const {
|
||||||
constexpr double log2pi = 1.8378770664093454835606594728112;
|
constexpr double log2pi = 1.8378770664093454835606594728112;
|
||||||
size_t n = d().size();
|
size_t n = d().size();
|
||||||
// log det(Sigma)) = - 2.0 * logDeterminant()
|
// Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1}
|
||||||
return - 0.5 * n * log2pi + logDeterminant();
|
// log det(Sigma) = -log(det(R'R)) = -2*log(det(R))
|
||||||
|
// Hence, log det(Sigma)) = -2.0 * logDeterminant()
|
||||||
|
// which gives log = -0.5*n*log(2*pi) - 0.5*(-2.0 * logDeterminant())
|
||||||
|
// = -0.5*n*log(2*pi) + (0.5*2.0 * logDeterminant())
|
||||||
|
// = -0.5*n*log(2*pi) + logDeterminant()
|
||||||
|
return -0.5 * n * log2pi + logDeterminant();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -263,6 +263,11 @@ namespace gtsam {
|
||||||
/** equals required by Testable for unit testing */
|
/** equals required by Testable for unit testing */
|
||||||
bool equals(const VectorValues& x, double tol = 1e-9) const;
|
bool equals(const VectorValues& x, double tol = 1e-9) const;
|
||||||
|
|
||||||
|
/// Check equality.
|
||||||
|
friend bool operator==(const VectorValues& lhs, const VectorValues& rhs) {
|
||||||
|
return lhs.equals(rhs);
|
||||||
|
}
|
||||||
|
|
||||||
/// @{
|
/// @{
|
||||||
/// @name Advanced Interface
|
/// @name Advanced Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
|
@ -375,7 +375,8 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
|
||||||
void serialize() const;
|
void serialize() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
pair<gtsam::GaussianConditional, gtsam::JacobianFactor*> EliminateQR(const gtsam::GaussianFactorGraph& factors, const gtsam::Ordering& keys);
|
pair<gtsam::GaussianConditional*, gtsam::JacobianFactor*> EliminateQR(
|
||||||
|
const gtsam::GaussianFactorGraph& factors, const gtsam::Ordering& keys);
|
||||||
|
|
||||||
#include <gtsam/linear/HessianFactor.h>
|
#include <gtsam/linear/HessianFactor.h>
|
||||||
virtual class HessianFactor : gtsam::GaussianFactor {
|
virtual class HessianFactor : gtsam::GaussianFactor {
|
||||||
|
@ -510,12 +511,17 @@ virtual class GaussianConditional : gtsam::JacobianFactor {
|
||||||
GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S,
|
GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S,
|
||||||
size_t name2, gtsam::Matrix T,
|
size_t name2, gtsam::Matrix T,
|
||||||
const gtsam::noiseModel::Diagonal* sigmas);
|
const gtsam::noiseModel::Diagonal* sigmas);
|
||||||
|
GaussianConditional(const vector<std::pair<gtsam::Key, gtsam::Matrix>> terms,
|
||||||
|
size_t nrFrontals, gtsam::Vector d,
|
||||||
|
const gtsam::noiseModel::Diagonal* sigmas);
|
||||||
|
|
||||||
// Constructors with no noise model
|
// Constructors with no noise model
|
||||||
GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R);
|
GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R);
|
||||||
GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S);
|
GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S);
|
||||||
GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S,
|
GaussianConditional(size_t key, gtsam::Vector d, gtsam::Matrix R, size_t name1, gtsam::Matrix S,
|
||||||
size_t name2, gtsam::Matrix T);
|
size_t name2, gtsam::Matrix T);
|
||||||
|
GaussianConditional(const gtsam::KeyVector& keys, size_t nrFrontals,
|
||||||
|
const gtsam::VerticalBlockMatrix& augmentedMatrix);
|
||||||
|
|
||||||
// Named constructors
|
// Named constructors
|
||||||
static gtsam::GaussianConditional FromMeanAndStddev(gtsam::Key key,
|
static gtsam::GaussianConditional FromMeanAndStddev(gtsam::Key key,
|
||||||
|
|
|
@ -80,6 +80,8 @@ TEST(GaussianBayesNet, Evaluate1) {
|
||||||
smallBayesNet.at(0)->logNormalizationConstant() +
|
smallBayesNet.at(0)->logNormalizationConstant() +
|
||||||
smallBayesNet.at(1)->logNormalizationConstant(),
|
smallBayesNet.at(1)->logNormalizationConstant(),
|
||||||
1e-9);
|
1e-9);
|
||||||
|
EXPECT_DOUBLES_EQUAL(log(constant), smallBayesNet.logNormalizationConstant(),
|
||||||
|
1e-9);
|
||||||
const double actual = smallBayesNet.evaluate(mean);
|
const double actual = smallBayesNet.evaluate(mean);
|
||||||
EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9);
|
EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9);
|
||||||
}
|
}
|
||||||
|
|
|
@ -516,6 +516,7 @@ TEST(GaussianConditional, Print) {
|
||||||
" d = [ 20 40 ]\n"
|
" d = [ 20 40 ]\n"
|
||||||
" mean: 1 elements\n"
|
" mean: 1 elements\n"
|
||||||
" x0: 20 40\n"
|
" x0: 20 40\n"
|
||||||
|
" logNormalizationConstant: -4.0351\n"
|
||||||
"isotropic dim=2 sigma=3\n";
|
"isotropic dim=2 sigma=3\n";
|
||||||
EXPECT(assert_print_equal(expected, conditional, "GaussianConditional"));
|
EXPECT(assert_print_equal(expected, conditional, "GaussianConditional"));
|
||||||
|
|
||||||
|
@ -530,6 +531,7 @@ TEST(GaussianConditional, Print) {
|
||||||
" S[x1] = [ -1 -2 ]\n"
|
" S[x1] = [ -1 -2 ]\n"
|
||||||
" [ -3 -4 ]\n"
|
" [ -3 -4 ]\n"
|
||||||
" d = [ 20 40 ]\n"
|
" d = [ 20 40 ]\n"
|
||||||
|
" logNormalizationConstant: -4.0351\n"
|
||||||
"isotropic dim=2 sigma=3\n";
|
"isotropic dim=2 sigma=3\n";
|
||||||
EXPECT(assert_print_equal(expected1, conditional1, "GaussianConditional"));
|
EXPECT(assert_print_equal(expected1, conditional1, "GaussianConditional"));
|
||||||
|
|
||||||
|
@ -545,6 +547,7 @@ TEST(GaussianConditional, Print) {
|
||||||
" S[y1] = [ -5 -6 ]\n"
|
" S[y1] = [ -5 -6 ]\n"
|
||||||
" [ -7 -8 ]\n"
|
" [ -7 -8 ]\n"
|
||||||
" d = [ 20 40 ]\n"
|
" d = [ 20 40 ]\n"
|
||||||
|
" logNormalizationConstant: -4.0351\n"
|
||||||
"isotropic dim=2 sigma=3\n";
|
"isotropic dim=2 sigma=3\n";
|
||||||
EXPECT(assert_print_equal(expected2, conditional2, "GaussianConditional"));
|
EXPECT(assert_print_equal(expected2, conditional2, "GaussianConditional"));
|
||||||
}
|
}
|
||||||
|
|
|
@ -424,6 +424,11 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors,
|
||||||
ISAM2Result result(params_.enableDetailedResults);
|
ISAM2Result result(params_.enableDetailedResults);
|
||||||
UpdateImpl update(params_, updateParams);
|
UpdateImpl update(params_, updateParams);
|
||||||
|
|
||||||
|
// Initialize any new variables \Theta_{new} and add
|
||||||
|
// \Theta:=\Theta\cup\Theta_{new}.
|
||||||
|
// Needed before delta update if using Dogleg optimizer.
|
||||||
|
addVariables(newTheta, result.details());
|
||||||
|
|
||||||
// Update delta if we need it to check relinearization later
|
// Update delta if we need it to check relinearization later
|
||||||
if (update.relinarizationNeeded(update_count_))
|
if (update.relinarizationNeeded(update_count_))
|
||||||
updateDelta(updateParams.forceFullSolve);
|
updateDelta(updateParams.forceFullSolve);
|
||||||
|
@ -435,9 +440,7 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors,
|
||||||
update.computeUnusedKeys(newFactors, variableIndex_,
|
update.computeUnusedKeys(newFactors, variableIndex_,
|
||||||
result.keysWithRemovedFactors, &result.unusedKeys);
|
result.keysWithRemovedFactors, &result.unusedKeys);
|
||||||
|
|
||||||
// 2. Initialize any new variables \Theta_{new} and add
|
// 2. Compute new error to check for relinearization
|
||||||
// \Theta:=\Theta\cup\Theta_{new}.
|
|
||||||
addVariables(newTheta, result.details());
|
|
||||||
if (params_.evaluateNonlinearError)
|
if (params_.evaluateNonlinearError)
|
||||||
update.error(nonlinearFactors_, calculateEstimate(), &result.errorBefore);
|
update.error(nonlinearFactors_, calculateEstimate(), &result.errorBefore);
|
||||||
|
|
||||||
|
@ -731,6 +734,7 @@ void ISAM2::updateDelta(bool forceFullSolve) const {
|
||||||
effectiveWildfireThreshold, &delta_);
|
effectiveWildfireThreshold, &delta_);
|
||||||
deltaReplacedMask_.clear();
|
deltaReplacedMask_.clear();
|
||||||
gttoc(Wildfire_update);
|
gttoc(Wildfire_update);
|
||||||
|
|
||||||
} else if (std::holds_alternative<ISAM2DoglegParams>(params_.optimizationParams)) {
|
} else if (std::holds_alternative<ISAM2DoglegParams>(params_.optimizationParams)) {
|
||||||
// If using Dogleg, do a Dogleg step
|
// If using Dogleg, do a Dogleg step
|
||||||
const ISAM2DoglegParams& doglegParams =
|
const ISAM2DoglegParams& doglegParams =
|
||||||
|
@ -769,9 +773,8 @@ void ISAM2::updateDelta(bool forceFullSolve) const {
|
||||||
gttic(Copy_dx_d);
|
gttic(Copy_dx_d);
|
||||||
// Update Delta and linear step
|
// Update Delta and linear step
|
||||||
doglegDelta_ = doglegResult.delta;
|
doglegDelta_ = doglegResult.delta;
|
||||||
delta_ =
|
// Copy the VectorValues containing with the linear solution
|
||||||
doglegResult
|
delta_ = doglegResult.dx_d;
|
||||||
.dx_d; // Copy the VectorValues containing with the linear solution
|
|
||||||
gttoc(Copy_dx_d);
|
gttoc(Copy_dx_d);
|
||||||
} else {
|
} else {
|
||||||
throw std::runtime_error("iSAM2: unknown ISAM2Params type");
|
throw std::runtime_error("iSAM2: unknown ISAM2Params type");
|
||||||
|
|
|
@ -381,17 +381,22 @@ typedef gtsam::GncOptimizer<gtsam::GncParams<gtsam::LevenbergMarquardtParams>> G
|
||||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||||
virtual class LevenbergMarquardtOptimizer : gtsam::NonlinearOptimizer {
|
virtual class LevenbergMarquardtOptimizer : gtsam::NonlinearOptimizer {
|
||||||
LevenbergMarquardtOptimizer(const gtsam::NonlinearFactorGraph& graph,
|
LevenbergMarquardtOptimizer(const gtsam::NonlinearFactorGraph& graph,
|
||||||
const gtsam::Values& initialValues);
|
const gtsam::Values& initialValues,
|
||||||
|
const gtsam::LevenbergMarquardtParams& params =
|
||||||
|
gtsam::LevenbergMarquardtParams());
|
||||||
LevenbergMarquardtOptimizer(const gtsam::NonlinearFactorGraph& graph,
|
LevenbergMarquardtOptimizer(const gtsam::NonlinearFactorGraph& graph,
|
||||||
const gtsam::Values& initialValues,
|
const gtsam::Values& initialValues,
|
||||||
const gtsam::LevenbergMarquardtParams& params);
|
const gtsam::Ordering& ordering,
|
||||||
|
const gtsam::LevenbergMarquardtParams& params =
|
||||||
|
gtsam::LevenbergMarquardtParams());
|
||||||
|
|
||||||
double lambda() const;
|
double lambda() const;
|
||||||
void print(string s = "") const;
|
void print(string s = "") const;
|
||||||
};
|
};
|
||||||
|
|
||||||
#include <gtsam/nonlinear/ISAM2.h>
|
#include <gtsam/nonlinear/ISAM2.h>
|
||||||
class ISAM2GaussNewtonParams {
|
class ISAM2GaussNewtonParams {
|
||||||
ISAM2GaussNewtonParams();
|
ISAM2GaussNewtonParams(double _wildfireThreshold = 0.001);
|
||||||
|
|
||||||
void print(string s = "") const;
|
void print(string s = "") const;
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,9 @@ namespace gtsam
|
||||||
// Gather all keys
|
// Gather all keys
|
||||||
KeySet allKeys;
|
KeySet allKeys;
|
||||||
for(const std::shared_ptr<FACTOR>& factor: factors) {
|
for(const std::shared_ptr<FACTOR>& factor: factors) {
|
||||||
allKeys.insert(factor->begin(), factor->end());
|
// Non-active factors are nullptr
|
||||||
|
if (factor)
|
||||||
|
allKeys.insert(factor->begin(), factor->end());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check keys
|
// Check keys
|
||||||
|
|
|
@ -671,6 +671,21 @@ class AHRS {
|
||||||
//void print(string s) const;
|
//void print(string s) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#include <gtsam_unstable/slam/PartialPriorFactor.h>
|
||||||
|
template <T = {gtsam::Pose2, gtsam::Pose3}>
|
||||||
|
virtual class PartialPriorFactor : gtsam::NoiseModelFactor {
|
||||||
|
PartialPriorFactor(gtsam::Key key, size_t idx, double prior,
|
||||||
|
const gtsam::noiseModel::Base* model);
|
||||||
|
PartialPriorFactor(gtsam::Key key, const std::vector<size_t>& indices,
|
||||||
|
const gtsam::Vector& prior,
|
||||||
|
const gtsam::noiseModel::Base* model);
|
||||||
|
|
||||||
|
// enabling serialization functionality
|
||||||
|
void serialize() const;
|
||||||
|
|
||||||
|
const gtsam::Vector& prior();
|
||||||
|
};
|
||||||
|
|
||||||
// Tectonic SAM Factors
|
// Tectonic SAM Factors
|
||||||
|
|
||||||
#include <gtsam_unstable/slam/TSAMFactors.h>
|
#include <gtsam_unstable/slam/TSAMFactors.h>
|
||||||
|
|
|
@ -50,9 +50,6 @@ namespace gtsam {
|
||||||
Vector prior_; ///< Measurement on tangent space parameters, in compressed form.
|
Vector prior_; ///< Measurement on tangent space parameters, in compressed form.
|
||||||
std::vector<size_t> indices_; ///< Indices of the measured tangent space parameters.
|
std::vector<size_t> indices_; ///< Indices of the measured tangent space parameters.
|
||||||
|
|
||||||
/** default constructor - only use for serialization */
|
|
||||||
PartialPriorFactor() {}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* constructor with just minimum requirements for a factor - allows more
|
* constructor with just minimum requirements for a factor - allows more
|
||||||
* computation in the constructor. This should only be used by subclasses
|
* computation in the constructor. This should only be used by subclasses
|
||||||
|
@ -65,7 +62,8 @@ namespace gtsam {
|
||||||
// Provide access to the Matrix& version of evaluateError:
|
// Provide access to the Matrix& version of evaluateError:
|
||||||
using Base::evaluateError;
|
using Base::evaluateError;
|
||||||
|
|
||||||
~PartialPriorFactor() override {}
|
/** default constructor - only use for serialization */
|
||||||
|
PartialPriorFactor() {}
|
||||||
|
|
||||||
/** Single Element Constructor: Prior on a single parameter at index 'idx' in the tangent vector.*/
|
/** Single Element Constructor: Prior on a single parameter at index 'idx' in the tangent vector.*/
|
||||||
PartialPriorFactor(Key key, size_t idx, double prior, const SharedNoiseModel& model) :
|
PartialPriorFactor(Key key, size_t idx, double prior, const SharedNoiseModel& model) :
|
||||||
|
@ -85,6 +83,8 @@ namespace gtsam {
|
||||||
assert(model->dim() == (size_t)prior.size());
|
assert(model->dim() == (size_t)prior.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
~PartialPriorFactor() override {}
|
||||||
|
|
||||||
/// @return a deep copy of this factor
|
/// @return a deep copy of this factor
|
||||||
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||||
return std::static_pointer_cast<gtsam::NonlinearFactor>(
|
return std::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||||
|
|
|
@ -29,11 +29,8 @@ if(POLICY CMP0057)
|
||||||
cmake_policy(SET CMP0057 NEW)
|
cmake_policy(SET CMP0057 NEW)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Prefer system pybind11 first, if not found, rely on bundled version:
|
# Use bundled pybind11 version
|
||||||
find_package(pybind11 CONFIG QUIET)
|
add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11)
|
||||||
if (NOT pybind11_FOUND)
|
|
||||||
add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Set the wrapping script variable
|
# Set the wrapping script variable
|
||||||
set(PYBIND_WRAP_SCRIPT "${PROJECT_SOURCE_DIR}/wrap/scripts/pybind_wrap.py")
|
set(PYBIND_WRAP_SCRIPT "${PROJECT_SOURCE_DIR}/wrap/scripts/pybind_wrap.py")
|
||||||
|
@ -189,6 +186,8 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
||||||
gtsam::BinaryMeasurementsPoint3
|
gtsam::BinaryMeasurementsPoint3
|
||||||
gtsam::BinaryMeasurementsUnit3
|
gtsam::BinaryMeasurementsUnit3
|
||||||
gtsam::BinaryMeasurementsRot3
|
gtsam::BinaryMeasurementsRot3
|
||||||
|
gtsam::SimWall2DVector
|
||||||
|
gtsam::SimPolygon2DVector
|
||||||
gtsam::CameraSetCal3_S2
|
gtsam::CameraSetCal3_S2
|
||||||
gtsam::CameraSetCal3Bundler
|
gtsam::CameraSetCal3Bundler
|
||||||
gtsam::CameraSetCal3Unified
|
gtsam::CameraSetCal3Unified
|
||||||
|
@ -272,6 +271,20 @@ add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET}
|
||||||
WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}
|
WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}
|
||||||
VERBATIM)
|
VERBATIM)
|
||||||
|
|
||||||
|
# if (NOT WIN32) # WIN32=1 is target platform is Windows
|
||||||
|
# add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET}
|
||||||
|
# COMMAND stubgen -q -p gtsam -o ./stubs && cp -a stubs/gtsam/ gtsam && ${PYTHON_EXECUTABLE} -m pip install --user .
|
||||||
|
# DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}
|
||||||
|
# WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY})
|
||||||
|
# else()
|
||||||
|
# #TODO(Varun) Find equivalent cp on Windows
|
||||||
|
# add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET}
|
||||||
|
# COMMAND ${PYTHON_EXECUTABLE} -m pip install --user .
|
||||||
|
# DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}
|
||||||
|
# WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY})
|
||||||
|
# endif()
|
||||||
|
|
||||||
|
|
||||||
# Custom make command to run all GTSAM Python tests
|
# Custom make command to run all GTSAM Python tests
|
||||||
add_custom_target(
|
add_custom_target(
|
||||||
python-test
|
python-test
|
||||||
|
|
|
@ -1,2 +1,3 @@
|
||||||
-r requirements.txt
|
-r requirements.txt
|
||||||
pyparsing>=2.4.2
|
pyparsing>=2.4.2
|
||||||
|
mypy==1.4.1 #TODO(Varun) A bug in mypy>=1.5.0 causes an unresolved placeholder error when importing numpy>=2.0.0 (https://github.com/python/mypy/issues/17396)
|
|
@ -17,7 +17,7 @@
|
||||||
| InverseKinematicsExampleExpressions.cpp | |
|
| InverseKinematicsExampleExpressions.cpp | |
|
||||||
| ISAM2Example_SmartFactor | |
|
| ISAM2Example_SmartFactor | |
|
||||||
| ISAM2_SmartFactorStereo_IMU | |
|
| ISAM2_SmartFactorStereo_IMU | |
|
||||||
| LocalizationExample | impossible? |
|
| LocalizationExample | :heavy_check_mark: |
|
||||||
| METISOrderingExample | |
|
| METISOrderingExample | |
|
||||||
| OdometryExample | :heavy_check_mark: |
|
| OdometryExample | :heavy_check_mark: |
|
||||||
| PlanarSLAMExample | :heavy_check_mark: |
|
| PlanarSLAMExample | :heavy_check_mark: |
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
"""
|
||||||
|
A simple 2D pose slam example with "GPS" measurements
|
||||||
|
- The robot moves forward 2 meter each iteration
|
||||||
|
- The robot initially faces along the X axis (horizontal, to the right in 2D)
|
||||||
|
- We have full odometry between pose
|
||||||
|
- We have "GPS-like" measurements implemented with a custom factor
|
||||||
|
"""
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
import gtsam
|
||||||
|
from gtsam import BetweenFactorPose2, Pose2, noiseModel
|
||||||
|
from gtsam_unstable import PartialPriorFactorPose2
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# 1. Create a factor graph container and add factors to it.
|
||||||
|
graph = gtsam.NonlinearFactorGraph()
|
||||||
|
|
||||||
|
# 2a. Add odometry factors
|
||||||
|
# For simplicity, we will use the same noise model for each odometry factor
|
||||||
|
odometryNoise = noiseModel.Diagonal.Sigmas(np.asarray([0.2, 0.2, 0.1]))
|
||||||
|
|
||||||
|
# Create odometry (Between) factors between consecutive poses
|
||||||
|
graph.push_back(
|
||||||
|
BetweenFactorPose2(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise))
|
||||||
|
graph.push_back(
|
||||||
|
BetweenFactorPose2(2, 3, Pose2(2.0, 0.0, 0.0), odometryNoise))
|
||||||
|
|
||||||
|
# 2b. Add "GPS-like" measurements
|
||||||
|
# We will use PartialPrior factor for this.
|
||||||
|
unaryNoise = noiseModel.Diagonal.Sigmas(np.array([0.1,
|
||||||
|
0.1])) # 10cm std on x,y
|
||||||
|
|
||||||
|
graph.push_back(
|
||||||
|
PartialPriorFactorPose2(1, [0, 1], np.asarray([0.0, 0.0]), unaryNoise))
|
||||||
|
graph.push_back(
|
||||||
|
PartialPriorFactorPose2(2, [0, 1], np.asarray([2.0, 0.0]), unaryNoise))
|
||||||
|
graph.push_back(
|
||||||
|
PartialPriorFactorPose2(3, [0, 1], np.asarray([4.0, 0.0]), unaryNoise))
|
||||||
|
graph.print("\nFactor Graph:\n")
|
||||||
|
|
||||||
|
# 3. Create the data structure to hold the initialEstimate estimate to the solution
|
||||||
|
# For illustrative purposes, these have been deliberately set to incorrect values
|
||||||
|
initialEstimate = gtsam.Values()
|
||||||
|
initialEstimate.insert(1, Pose2(0.5, 0.0, 0.2))
|
||||||
|
initialEstimate.insert(2, Pose2(2.3, 0.1, -0.2))
|
||||||
|
initialEstimate.insert(3, Pose2(4.1, 0.1, 0.1))
|
||||||
|
initialEstimate.print("\nInitial Estimate:\n")
|
||||||
|
|
||||||
|
# 4. Optimize using Levenberg-Marquardt optimization. The optimizer
|
||||||
|
# accepts an optional set of configuration parameters, controlling
|
||||||
|
# things like convergence criteria, the type of linear system solver
|
||||||
|
# to use, and the amount of information displayed during optimization.
|
||||||
|
# Here we will use the default set of parameters. See the
|
||||||
|
# documentation for the full set of parameters.
|
||||||
|
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
|
||||||
|
result = optimizer.optimize()
|
||||||
|
result.print("Final Result:\n")
|
||||||
|
|
||||||
|
# 5. Calculate and print marginal covariances for all variables
|
||||||
|
marginals = gtsam.Marginals(graph, result)
|
||||||
|
print("x1 covariance:\n", marginals.marginalCovariance(1))
|
||||||
|
print("x2 covariance:\n", marginals.marginalCovariance(2))
|
||||||
|
print("x3 covariance:\n", marginals.marginalCovariance(3))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
#include <pybind11/eigen.h>
|
#include <pybind11/eigen.h>
|
||||||
#include <pybind11/stl_bind.h>
|
#include <pybind11/stl_bind.h>
|
||||||
|
#include <pybind11/stl.h>
|
||||||
#include <pybind11/pybind11.h>
|
#include <pybind11/pybind11.h>
|
||||||
#include <pybind11/functional.h>
|
#include <pybind11/functional.h>
|
||||||
#include <pybind11/iostream.h>
|
#include <pybind11/iostream.h>
|
||||||
|
|
|
@ -13,6 +13,7 @@ package_data = {
|
||||||
"./*.so",
|
"./*.so",
|
||||||
"./*.dll",
|
"./*.dll",
|
||||||
"./*.pyd",
|
"./*.pyd",
|
||||||
|
"*.pyi", "**/*.pyi", # Add the type hints
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,10 +24,9 @@
|
||||||
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||||
#include <gtsam/slam/BetweenFactor.h>
|
#include <gtsam/slam/BetweenFactor.h>
|
||||||
#include <gtsam/inference/Symbol.h>
|
#include <gtsam/nonlinear/ISAM2.h>
|
||||||
#include <gtsam/linear/JacobianFactor.h>
|
#include <gtsam/slam/SmartProjectionPoseFactor.h>
|
||||||
#include <gtsam/linear/GaussianBayesTree.h>
|
#include "examples/SFMdata.h"
|
||||||
#include <gtsam/base/numericalDerivative.h>
|
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
|
@ -36,7 +35,6 @@ using namespace gtsam;
|
||||||
|
|
||||||
// Convenience for named keys
|
// Convenience for named keys
|
||||||
using symbol_shorthand::X;
|
using symbol_shorthand::X;
|
||||||
using symbol_shorthand::L;
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(DoglegOptimizer, ComputeBlend) {
|
TEST(DoglegOptimizer, ComputeBlend) {
|
||||||
|
@ -185,6 +183,128 @@ TEST(DoglegOptimizer, Constraint) {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
/**
|
||||||
|
* Test created to fix issue in ISAM2 when using the DogLegOptimizer.
|
||||||
|
* Originally reported by kvmanohar22 in issue #301
|
||||||
|
* https://github.com/borglab/gtsam/issues/301
|
||||||
|
*
|
||||||
|
* This test is based on a script provided by kvmanohar22
|
||||||
|
* to help reproduce the issue.
|
||||||
|
*/
|
||||||
|
TEST(DogLegOptimizer, VariableUpdate) {
|
||||||
|
// Make the typename short so it looks much cleaner
|
||||||
|
typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor;
|
||||||
|
|
||||||
|
// create a typedef to the camera type
|
||||||
|
typedef PinholePose<Cal3_S2> Camera;
|
||||||
|
// Define the camera calibration parameters
|
||||||
|
Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0));
|
||||||
|
|
||||||
|
// Define the camera observation noise model
|
||||||
|
noiseModel::Isotropic::shared_ptr measurementNoise =
|
||||||
|
noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v
|
||||||
|
|
||||||
|
// Create the set of ground-truth landmarks and poses
|
||||||
|
vector<Point3> points = createPoints();
|
||||||
|
vector<Pose3> poses = createPoses();
|
||||||
|
|
||||||
|
// Create a factor graph
|
||||||
|
NonlinearFactorGraph graph;
|
||||||
|
|
||||||
|
ISAM2DoglegParams doglegparams = ISAM2DoglegParams();
|
||||||
|
doglegparams.verbose = false;
|
||||||
|
ISAM2Params isam2_params;
|
||||||
|
isam2_params.evaluateNonlinearError = true;
|
||||||
|
isam2_params.relinearizeThreshold = 0.0;
|
||||||
|
isam2_params.enableRelinearization = true;
|
||||||
|
isam2_params.optimizationParams = doglegparams;
|
||||||
|
isam2_params.relinearizeSkip = 1;
|
||||||
|
ISAM2 isam2(isam2_params);
|
||||||
|
|
||||||
|
// Simulated measurements from each camera pose, adding them to the factor
|
||||||
|
// graph
|
||||||
|
unordered_map<int, SmartFactor::shared_ptr> smart_factors;
|
||||||
|
for (size_t j = 0; j < points.size(); ++j) {
|
||||||
|
// every landmark represent a single landmark, we use shared pointer to init
|
||||||
|
// the factor, and then insert measurements.
|
||||||
|
SmartFactor::shared_ptr smartfactor(new SmartFactor(measurementNoise, K));
|
||||||
|
|
||||||
|
for (size_t i = 0; i < poses.size(); ++i) {
|
||||||
|
// generate the 2D measurement
|
||||||
|
Camera camera(poses[i], K);
|
||||||
|
Point2 measurement = camera.project(points[j]);
|
||||||
|
|
||||||
|
// call add() function to add measurement into a single factor, here we
|
||||||
|
// need to add:
|
||||||
|
// 1. the 2D measurement
|
||||||
|
// 2. the corresponding camera's key
|
||||||
|
// 3. camera noise model
|
||||||
|
// 4. camera calibration
|
||||||
|
|
||||||
|
// add only first 3 measurements and update the later measurements
|
||||||
|
// incrementally
|
||||||
|
if (i < 3) smartfactor->add(measurement, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert the smart factor in the graph
|
||||||
|
smart_factors[j] = smartfactor;
|
||||||
|
graph.push_back(smartfactor);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a prior on pose x0. This indirectly specifies where the origin is.
|
||||||
|
// 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
|
||||||
|
noiseModel::Diagonal::shared_ptr noise = noiseModel::Diagonal::Sigmas(
|
||||||
|
(Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1)).finished());
|
||||||
|
graph.emplace_shared<PriorFactor<Pose3> >(0, poses[0], noise);
|
||||||
|
|
||||||
|
// Because the structure-from-motion problem has a scale ambiguity, the
|
||||||
|
// problem is still under-constrained. Here we add a prior on the second pose
|
||||||
|
// x1, so this will fix the scale by indicating the distance between x0 and
|
||||||
|
// x1. Because these two are fixed, the rest of the poses will be also be
|
||||||
|
// fixed.
|
||||||
|
graph.emplace_shared<PriorFactor<Pose3> >(1, poses[1],
|
||||||
|
noise); // add directly to graph
|
||||||
|
|
||||||
|
// Create the initial estimate to the solution
|
||||||
|
// Intentionally initialize the variables off from the ground truth
|
||||||
|
Values initialEstimate;
|
||||||
|
Pose3 delta(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
|
||||||
|
for (size_t i = 0; i < 3; ++i)
|
||||||
|
initialEstimate.insert(i, poses[i].compose(delta));
|
||||||
|
// initialEstimate.print("Initial Estimates:\n");
|
||||||
|
|
||||||
|
// Optimize the graph and print results
|
||||||
|
isam2.update(graph, initialEstimate);
|
||||||
|
Values result = isam2.calculateEstimate();
|
||||||
|
// result.print("Results:\n");
|
||||||
|
|
||||||
|
// we add new measurements from this pose
|
||||||
|
size_t pose_idx = 3;
|
||||||
|
|
||||||
|
// Now update existing smart factors with new observations
|
||||||
|
for (size_t j = 0; j < points.size(); ++j) {
|
||||||
|
SmartFactor::shared_ptr smartfactor = smart_factors[j];
|
||||||
|
|
||||||
|
// add the 4th measurement
|
||||||
|
Camera camera(poses[pose_idx], K);
|
||||||
|
Point2 measurement = camera.project(points[j]);
|
||||||
|
smartfactor->add(measurement, pose_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
graph.resize(0);
|
||||||
|
initialEstimate.clear();
|
||||||
|
|
||||||
|
// update initial estimate for the new pose
|
||||||
|
initialEstimate.insert(pose_idx, poses[pose_idx].compose(delta));
|
||||||
|
|
||||||
|
// this should break the system
|
||||||
|
isam2.update(graph, initialEstimate);
|
||||||
|
result = isam2.calculateEstimate();
|
||||||
|
EXPECT(std::find(result.keys().begin(), result.keys().end(), pose_idx) !=
|
||||||
|
result.keys().end());
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -994,6 +994,56 @@ TEST(ISAM2, calculate_nnz)
|
||||||
EXPECT_LONGS_EQUAL(expected, actual);
|
EXPECT_LONGS_EQUAL(expected, actual);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class FixActiveFactor : public NoiseModelFactorN<Vector2> {
|
||||||
|
using Base = NoiseModelFactorN<Vector2>;
|
||||||
|
bool is_active_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
FixActiveFactor(const gtsam::Key& key, const bool active)
|
||||||
|
: Base(nullptr, key), is_active_(active) {}
|
||||||
|
|
||||||
|
virtual bool active(const gtsam::Values &values) const override {
|
||||||
|
return is_active_;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual Vector
|
||||||
|
evaluateError(const Vector2& x,
|
||||||
|
Base::OptionalMatrixTypeT<Vector2> H = nullptr) const override {
|
||||||
|
if (H) {
|
||||||
|
*H = Vector2::Identity();
|
||||||
|
}
|
||||||
|
return Vector2::Zero();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST(ActiveFactorTesting, Issue1596) {
|
||||||
|
// Issue1596: When a derived Nonlinear Factor is not active, the linearization returns a nullptr (NonlinearFactor.cpp:156), which
|
||||||
|
// causes an error when `EliminateSymbolic` is called (SymbolicFactor-inst.h:45) due to not checking if the factor is nullptr.
|
||||||
|
const gtsam::Key key{Symbol('x', 0)};
|
||||||
|
|
||||||
|
ISAM2 isam;
|
||||||
|
Values values;
|
||||||
|
NonlinearFactorGraph graph;
|
||||||
|
|
||||||
|
// Insert an active factor
|
||||||
|
values.insert<Vector2>(key, Vector2::Zero());
|
||||||
|
graph.emplace_shared<FixActiveFactor>(key, true);
|
||||||
|
|
||||||
|
// No problem here
|
||||||
|
isam.update(graph, values);
|
||||||
|
|
||||||
|
graph = NonlinearFactorGraph();
|
||||||
|
|
||||||
|
// Inserting a factor that is never active
|
||||||
|
graph.emplace_shared<FixActiveFactor>(key, false);
|
||||||
|
|
||||||
|
// This call throws the error if the pointer is not validated on (SymbolicFactor-inst.h:45)
|
||||||
|
isam.update(graph);
|
||||||
|
|
||||||
|
// If the bug is fixed, this line is reached.
|
||||||
|
EXPECT(isam.getFactorsUnsafe().size() == 2);
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
Loading…
Reference in New Issue