Merge pull request #1198 from borglab/release/4.2a7
commit
77008c056e
|
@ -17,3 +17,5 @@
|
|||
# for QtCreator:
|
||||
CMakeLists.txt.user*
|
||||
xcode/
|
||||
/Dockerfile
|
||||
/python/gtsam/notebooks/.ipynb_checkpoints/ellipses-checkpoint.ipynb
|
||||
|
|
|
@ -10,7 +10,7 @@ endif()
|
|||
set (GTSAM_VERSION_MAJOR 4)
|
||||
set (GTSAM_VERSION_MINOR 2)
|
||||
set (GTSAM_VERSION_PATCH 0)
|
||||
set (GTSAM_PRERELEASE_VERSION "a6")
|
||||
set (GTSAM_PRERELEASE_VERSION "a7")
|
||||
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
||||
|
||||
if (${GTSAM_VERSION_PATCH} EQUAL 0)
|
||||
|
|
|
@ -16,6 +16,7 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
|
||||
set(Python_VERSION_MAJOR ${PYTHON_VERSION_MAJOR})
|
||||
set(Python_VERSION_MINOR ${PYTHON_VERSION_MINOR})
|
||||
set(Python_VERSION_PATCH ${PYTHON_VERSION_PATCH})
|
||||
set(Python_EXECUTABLE ${PYTHON_EXECUTABLE})
|
||||
|
||||
else()
|
||||
|
@ -31,11 +32,12 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
|
||||
set(Python_VERSION_MAJOR ${Python3_VERSION_MAJOR})
|
||||
set(Python_VERSION_MINOR ${Python3_VERSION_MINOR})
|
||||
set(Python_VERSION_PATCH ${Python3_VERSION_PATCH})
|
||||
|
||||
endif()
|
||||
|
||||
set(GTSAM_PYTHON_VERSION
|
||||
"${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}"
|
||||
"${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}.${Python_VERSION_PATCH}"
|
||||
CACHE STRING "The version of Python to build the wrappers against."
|
||||
FORCE)
|
||||
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#pragma once
|
||||
#include <gtsam/config.h> // Configuration from CMake
|
||||
#include <Eigen/Dense>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
#ifndef OPTIONALJACOBIAN_NOBOOST
|
||||
#include <boost/optional.hpp>
|
||||
|
@ -96,6 +98,24 @@ public:
|
|||
usurp(dynamic->data());
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Constructor from an Eigen::Ref *value*. Will not usurp if dimension is wrong
|
||||
* @note This is important so we don't overwrite someone else's memory!
|
||||
*/
|
||||
template<class MATRIX>
|
||||
OptionalJacobian(Eigen::Ref<MATRIX> dynamic_ref) :
|
||||
map_(nullptr) {
|
||||
if (dynamic_ref.rows() == Rows && dynamic_ref.cols() == Cols && !dynamic_ref.IsRowMajor) {
|
||||
usurp(dynamic_ref.data());
|
||||
} else {
|
||||
throw std::invalid_argument(
|
||||
std::string("OptionalJacobian called with wrong dimensions or "
|
||||
"storage order.\n"
|
||||
"Expected: ") +
|
||||
"(" + std::to_string(Rows) + ", " + std::to_string(Cols) + ")");
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef OPTIONALJACOBIAN_NOBOOST
|
||||
|
||||
/// Constructor with boost::none just makes empty
|
||||
|
|
|
@ -160,7 +160,7 @@ namespace gtsam {
|
|||
const typename Base::LabelFormatter& labelFormatter =
|
||||
&DefaultFormatter) const {
|
||||
auto valueFormatter = [](const double& v) {
|
||||
return (boost::format("%4.4g") % v).str();
|
||||
return (boost::format("%4.8g") % v).str();
|
||||
};
|
||||
Base::print(s, labelFormatter, valueFormatter);
|
||||
}
|
||||
|
|
|
@ -59,33 +59,41 @@ namespace gtsam {
|
|||
/** constant stored in this leaf */
|
||||
Y constant_;
|
||||
|
||||
/** Constructor from constant */
|
||||
Leaf(const Y& constant) :
|
||||
constant_(constant) {}
|
||||
/** The number of assignments contained within this leaf.
|
||||
* Particularly useful when leaves have been pruned.
|
||||
*/
|
||||
size_t nrAssignments_;
|
||||
|
||||
/** return the constant */
|
||||
/// Constructor from constant
|
||||
Leaf(const Y& constant, size_t nrAssignments = 1)
|
||||
: constant_(constant), nrAssignments_(nrAssignments) {}
|
||||
|
||||
/// Return the constant
|
||||
const Y& constant() const {
|
||||
return constant_;
|
||||
}
|
||||
|
||||
/// Return the number of assignments contained within this leaf.
|
||||
size_t nrAssignments() const { return nrAssignments_; }
|
||||
|
||||
/// Leaf-Leaf equality
|
||||
bool sameLeaf(const Leaf& q) const override {
|
||||
return constant_ == q.constant_;
|
||||
}
|
||||
|
||||
/// polymorphic equality: is q is a leaf, could be
|
||||
/// polymorphic equality: is q a leaf and is it the same as this leaf?
|
||||
bool sameLeaf(const Node& q) const override {
|
||||
return (q.isLeaf() && q.sameLeaf(*this));
|
||||
}
|
||||
|
||||
/** equality up to tolerance */
|
||||
/// equality up to tolerance
|
||||
bool equals(const Node& q, const CompareFunc& compare) const override {
|
||||
const Leaf* other = dynamic_cast<const Leaf*>(&q);
|
||||
if (!other) return false;
|
||||
return compare(this->constant_, other->constant_);
|
||||
}
|
||||
|
||||
/** print */
|
||||
/// print
|
||||
void print(const std::string& s, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter) const override {
|
||||
std::cout << s << " Leaf " << valueFormatter(constant_) << std::endl;
|
||||
|
@ -108,14 +116,14 @@ namespace gtsam {
|
|||
|
||||
/** apply unary operator */
|
||||
NodePtr apply(const Unary& op) const override {
|
||||
NodePtr f(new Leaf(op(constant_)));
|
||||
NodePtr f(new Leaf(op(constant_), nrAssignments_));
|
||||
return f;
|
||||
}
|
||||
|
||||
/// Apply unary operator with assignment
|
||||
NodePtr apply(const UnaryAssignment& op,
|
||||
const Assignment<L>& choices) const override {
|
||||
NodePtr f(new Leaf(op(choices, constant_)));
|
||||
const Assignment<L>& assignment) const override {
|
||||
NodePtr f(new Leaf(op(assignment, constant_), nrAssignments_));
|
||||
return f;
|
||||
}
|
||||
|
||||
|
@ -130,7 +138,8 @@ namespace gtsam {
|
|||
|
||||
// Applying binary operator to two leaves results in a leaf
|
||||
NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const override {
|
||||
NodePtr h(new Leaf(op(fL.constant_, constant_))); // fL op gL
|
||||
// fL op gL
|
||||
NodePtr h(new Leaf(op(fL.constant_, constant_), nrAssignments_));
|
||||
return h;
|
||||
}
|
||||
|
||||
|
@ -141,7 +150,7 @@ namespace gtsam {
|
|||
|
||||
/** choose a branch, create new memory ! */
|
||||
NodePtr choose(const L& label, size_t index) const override {
|
||||
return NodePtr(new Leaf(constant()));
|
||||
return NodePtr(new Leaf(constant(), nrAssignments()));
|
||||
}
|
||||
|
||||
bool isLeaf() const override { return true; }
|
||||
|
@ -159,7 +168,10 @@ namespace gtsam {
|
|||
std::vector<NodePtr> branches_;
|
||||
|
||||
private:
|
||||
/** incremental allSame */
|
||||
/**
|
||||
* Incremental allSame.
|
||||
* Records if all the branches are the same leaf.
|
||||
*/
|
||||
size_t allSame_;
|
||||
|
||||
using ChoicePtr = boost::shared_ptr<const Choice>;
|
||||
|
@ -172,15 +184,22 @@ namespace gtsam {
|
|||
#endif
|
||||
}
|
||||
|
||||
/** If all branches of a choice node f are the same, just return a branch */
|
||||
/// If all branches of a choice node f are the same, just return a branch.
|
||||
static NodePtr Unique(const ChoicePtr& f) {
|
||||
#ifndef DT_NO_PRUNING
|
||||
#ifndef GTSAM_DT_NO_PRUNING
|
||||
if (f->allSame_) {
|
||||
assert(f->branches().size() > 0);
|
||||
NodePtr f0 = f->branches_[0];
|
||||
assert(f0->isLeaf());
|
||||
|
||||
size_t nrAssignments = 0;
|
||||
for(auto branch: f->branches()) {
|
||||
assert(branch->isLeaf());
|
||||
nrAssignments +=
|
||||
boost::dynamic_pointer_cast<const Leaf>(branch)->nrAssignments();
|
||||
}
|
||||
NodePtr newLeaf(
|
||||
new Leaf(boost::dynamic_pointer_cast<const Leaf>(f0)->constant()));
|
||||
new Leaf(boost::dynamic_pointer_cast<const Leaf>(f0)->constant(),
|
||||
nrAssignments));
|
||||
return newLeaf;
|
||||
} else
|
||||
#endif
|
||||
|
@ -189,15 +208,13 @@ namespace gtsam {
|
|||
|
||||
bool isLeaf() const override { return false; }
|
||||
|
||||
/** Constructor, given choice label and mandatory expected branch count */
|
||||
/// Constructor, given choice label and mandatory expected branch count.
|
||||
Choice(const L& label, size_t count) :
|
||||
label_(label), allSame_(true) {
|
||||
branches_.reserve(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct from applying binary op to two Choice nodes
|
||||
*/
|
||||
/// Construct from applying binary op to two Choice nodes.
|
||||
Choice(const Choice& f, const Choice& g, const Binary& op) :
|
||||
allSame_(true) {
|
||||
// Choose what to do based on label
|
||||
|
@ -225,6 +242,7 @@ namespace gtsam {
|
|||
}
|
||||
}
|
||||
|
||||
/// Return the label of this choice node.
|
||||
const L& label() const {
|
||||
return label_;
|
||||
}
|
||||
|
@ -246,7 +264,7 @@ namespace gtsam {
|
|||
branches_.push_back(node);
|
||||
}
|
||||
|
||||
/** print (as a tree) */
|
||||
/// print (as a tree).
|
||||
void print(const std::string& s, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter) const override {
|
||||
std::cout << s << " Choice(";
|
||||
|
@ -292,7 +310,7 @@ namespace gtsam {
|
|||
return (q.isLeaf() && q.sameLeaf(*this));
|
||||
}
|
||||
|
||||
/** equality */
|
||||
/// equality
|
||||
bool equals(const Node& q, const CompareFunc& compare) const override {
|
||||
const Choice* other = dynamic_cast<const Choice*>(&q);
|
||||
if (!other) return false;
|
||||
|
@ -305,7 +323,7 @@ namespace gtsam {
|
|||
return true;
|
||||
}
|
||||
|
||||
/** evaluate */
|
||||
/// evaluate
|
||||
const Y& operator()(const Assignment<L>& x) const override {
|
||||
#ifndef NDEBUG
|
||||
typename Assignment<L>::const_iterator it = x.find(label_);
|
||||
|
@ -320,13 +338,13 @@ namespace gtsam {
|
|||
return (*child)(x);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct from applying unary op to a Choice node
|
||||
*/
|
||||
/// Construct from applying unary op to a Choice node.
|
||||
Choice(const L& label, const Choice& f, const Unary& op) :
|
||||
label_(label), allSame_(true) {
|
||||
branches_.reserve(f.branches_.size()); // reserve space
|
||||
for (const NodePtr& branch : f.branches_) push_back(branch->apply(op));
|
||||
for (const NodePtr& branch : f.branches_) {
|
||||
push_back(branch->apply(op));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -337,28 +355,28 @@ namespace gtsam {
|
|||
* @param f The original choice node to apply the op on.
|
||||
* @param op Function to apply on the choice node. Takes Assignment and
|
||||
* value as arguments.
|
||||
* @param choices The Assignment that will go to op.
|
||||
* @param assignment The Assignment that will go to op.
|
||||
*/
|
||||
Choice(const L& label, const Choice& f, const UnaryAssignment& op,
|
||||
const Assignment<L>& choices)
|
||||
const Assignment<L>& assignment)
|
||||
: label_(label), allSame_(true) {
|
||||
branches_.reserve(f.branches_.size()); // reserve space
|
||||
|
||||
Assignment<L> choices_ = choices;
|
||||
Assignment<L> assignment_ = assignment;
|
||||
|
||||
for (size_t i = 0; i < f.branches_.size(); i++) {
|
||||
choices_[label_] = i; // Set assignment for label to i
|
||||
assignment_[label_] = i; // Set assignment for label to i
|
||||
|
||||
const NodePtr branch = f.branches_[i];
|
||||
push_back(branch->apply(op, choices_));
|
||||
push_back(branch->apply(op, assignment_));
|
||||
|
||||
// Remove the choice so we are backtracking
|
||||
auto choice_it = choices_.find(label_);
|
||||
choices_.erase(choice_it);
|
||||
// Remove the assignment so we are backtracking
|
||||
auto assignment_it = assignment_.find(label_);
|
||||
assignment_.erase(assignment_it);
|
||||
}
|
||||
}
|
||||
|
||||
/** apply unary operator */
|
||||
/// apply unary operator.
|
||||
NodePtr apply(const Unary& op) const override {
|
||||
auto r = boost::make_shared<Choice>(label_, *this, op);
|
||||
return Unique(r);
|
||||
|
@ -366,8 +384,8 @@ namespace gtsam {
|
|||
|
||||
/// Apply unary operator with assignment
|
||||
NodePtr apply(const UnaryAssignment& op,
|
||||
const Assignment<L>& choices) const override {
|
||||
auto r = boost::make_shared<Choice>(label_, *this, op, choices);
|
||||
const Assignment<L>& assignment) const override {
|
||||
auto r = boost::make_shared<Choice>(label_, *this, op, assignment);
|
||||
return Unique(r);
|
||||
}
|
||||
|
||||
|
@ -640,7 +658,7 @@ namespace gtsam {
|
|||
// If leaf, apply unary conversion "op" and create a unique leaf.
|
||||
using MXLeaf = typename DecisionTree<M, X>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const MXLeaf>(f)) {
|
||||
return NodePtr(new Leaf(Y_of_X(leaf->constant())));
|
||||
return NodePtr(new Leaf(Y_of_X(leaf->constant()), leaf->nrAssignments()));
|
||||
}
|
||||
|
||||
// Check if Choice
|
||||
|
@ -662,7 +680,16 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/****************************************************************************/
|
||||
// Functor performing depth-first visit without Assignment<L> argument.
|
||||
/**
|
||||
* Functor performing depth-first visit to each leaf with the leaf value as
|
||||
* the argument.
|
||||
*
|
||||
* NOTE: We differentiate between leaves and assignments. Concretely, a 3
|
||||
* binary variable tree will have 2^3=8 assignments, but based on pruning, it
|
||||
* can have less than 8 leaves. For example, if a tree has all assignment
|
||||
* values as 1, then pruning will cause the tree to have only 1 leaf yet 8
|
||||
* assignments.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
struct Visit {
|
||||
using F = std::function<void(const Y&)>;
|
||||
|
@ -691,33 +718,74 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/****************************************************************************/
|
||||
// Functor performing depth-first visit with Assignment<L> argument.
|
||||
/**
|
||||
* Functor performing depth-first visit to each leaf with the Leaf object
|
||||
* passed as an argument.
|
||||
*
|
||||
* NOTE: We differentiate between leaves and assignments. Concretely, a 3
|
||||
* binary variable tree will have 2^3=8 assignments, but based on pruning, it
|
||||
* can have <8 leaves. For example, if a tree has all assignment values as 1,
|
||||
* then pruning will cause the tree to have only 1 leaf yet 8 assignments.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
struct VisitLeaf {
|
||||
using F = std::function<void(const typename DecisionTree<L, Y>::Leaf&)>;
|
||||
explicit VisitLeaf(F f) : f(f) {} ///< Construct from folding function.
|
||||
F f; ///< folding function object.
|
||||
|
||||
/// Do a depth-first visit on the tree rooted at node.
|
||||
void operator()(const typename DecisionTree<L, Y>::NodePtr& node) const {
|
||||
using Leaf = typename DecisionTree<L, Y>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const Leaf>(node))
|
||||
return f(*leaf);
|
||||
|
||||
using Choice = typename DecisionTree<L, Y>::Choice;
|
||||
auto choice = boost::dynamic_pointer_cast<const Choice>(node);
|
||||
if (!choice)
|
||||
throw std::invalid_argument("DecisionTree::VisitLeaf: Invalid NodePtr");
|
||||
for (auto&& branch : choice->branches()) (*this)(branch); // recurse!
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename Y>
|
||||
template <typename Func>
|
||||
void DecisionTree<L, Y>::visitLeaf(Func f) const {
|
||||
VisitLeaf<L, Y> visit(f);
|
||||
visit(root_);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
/**
|
||||
* Functor performing depth-first visit to each leaf with the leaf's
|
||||
* `Assignment<L>` and value passed as arguments.
|
||||
*
|
||||
* NOTE: Follows the same pruning semantics as `visit`.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
struct VisitWith {
|
||||
using Choices = Assignment<L>;
|
||||
using F = std::function<void(const Choices&, const Y&)>;
|
||||
using F = std::function<void(const Assignment<L>&, const Y&)>;
|
||||
explicit VisitWith(F f) : f(f) {} ///< Construct from folding function.
|
||||
Choices choices; ///< Assignment, mutating through recursion.
|
||||
Assignment<L> assignment; ///< Assignment, mutating through recursion.
|
||||
F f; ///< folding function object.
|
||||
|
||||
/// Do a depth-first visit on the tree rooted at node.
|
||||
void operator()(const typename DecisionTree<L, Y>::NodePtr& node) {
|
||||
using Leaf = typename DecisionTree<L, Y>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const Leaf>(node))
|
||||
return f(choices, leaf->constant());
|
||||
return f(assignment, leaf->constant());
|
||||
|
||||
using Choice = typename DecisionTree<L, Y>::Choice;
|
||||
auto choice = boost::dynamic_pointer_cast<const Choice>(node);
|
||||
if (!choice)
|
||||
throw std::invalid_argument("DecisionTree::VisitWith: Invalid NodePtr");
|
||||
for (size_t i = 0; i < choice->nrChoices(); i++) {
|
||||
choices[choice->label()] = i; // Set assignment for label to i
|
||||
assignment[choice->label()] = i; // Set assignment for label to i
|
||||
|
||||
(*this)(choice->branches()[i]); // recurse!
|
||||
|
||||
// Remove the choice so we are backtracking
|
||||
auto choice_it = choices.find(choice->label());
|
||||
choices.erase(choice_it);
|
||||
auto choice_it = assignment.find(choice->label());
|
||||
assignment.erase(choice_it);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -747,12 +815,26 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/****************************************************************************/
|
||||
// labels is just done with a visit
|
||||
/**
|
||||
* Get (partial) labels by performing a visit.
|
||||
*
|
||||
* This method performs a depth-first search to go to every leaf and records
|
||||
* the keys assignment which leads to that leaf. Since the tree can be pruned,
|
||||
* there might be a leaf at a lower depth which results in a partial
|
||||
* assignment (i.e. not all keys are specified).
|
||||
*
|
||||
* E.g. given a tree with 3 keys, there may be a branch where the 3rd key has
|
||||
* the same values for all the leaves. This leads to the branch being pruned
|
||||
* so we get a leaf which is arrived at by just the first 2 keys and their
|
||||
* assignments.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
std::set<L> DecisionTree<L, Y>::labels() const {
|
||||
std::set<L> unique;
|
||||
auto f = [&](const Assignment<L>& choices, const Y&) {
|
||||
for (auto&& kv : choices) unique.insert(kv.first);
|
||||
auto f = [&](const Assignment<L>& assignment, const Y&) {
|
||||
for (auto&& kv : assignment) {
|
||||
unique.insert(kv.first);
|
||||
}
|
||||
};
|
||||
visitWith(f);
|
||||
return unique;
|
||||
|
@ -801,8 +883,8 @@ namespace gtsam {
|
|||
throw std::runtime_error(
|
||||
"DecisionTree::apply(unary op) undefined for empty tree.");
|
||||
}
|
||||
Assignment<L> choices;
|
||||
return DecisionTree(root_->apply(op, choices));
|
||||
Assignment<L> assignment;
|
||||
return DecisionTree(root_->apply(op, assignment));
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
|
|
|
@ -105,7 +105,7 @@ namespace gtsam {
|
|||
virtual const Y& operator()(const Assignment<L>& x) const = 0;
|
||||
virtual Ptr apply(const Unary& op) const = 0;
|
||||
virtual Ptr apply(const UnaryAssignment& op,
|
||||
const Assignment<L>& choices) const = 0;
|
||||
const Assignment<L>& assignment) const = 0;
|
||||
virtual Ptr apply_f_op_g(const Node&, const Binary&) const = 0;
|
||||
virtual Ptr apply_g_op_fL(const Leaf&, const Binary&) const = 0;
|
||||
virtual Ptr apply_g_op_fC(const Choice&, const Binary&) const = 0;
|
||||
|
@ -153,7 +153,7 @@ namespace gtsam {
|
|||
/** Create a constant */
|
||||
explicit DecisionTree(const Y& y);
|
||||
|
||||
/** Create a new leaf function splitting on a variable */
|
||||
/// Create tree with 2 assignments `y1`, `y2`, splitting on variable `label`
|
||||
DecisionTree(const L& label, const Y& y1, const Y& y2);
|
||||
|
||||
/** Allow Label+Cardinality for convenience */
|
||||
|
@ -219,9 +219,8 @@ namespace gtsam {
|
|||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/** Make virtual */
|
||||
virtual ~DecisionTree() {
|
||||
}
|
||||
/// Make virtual
|
||||
virtual ~DecisionTree() {}
|
||||
|
||||
/// Check if tree is empty.
|
||||
bool empty() const { return !root_; }
|
||||
|
@ -235,9 +234,11 @@ namespace gtsam {
|
|||
/**
|
||||
* @brief Visit all leaves in depth-first fashion.
|
||||
*
|
||||
* @param f side-effect taking a value.
|
||||
* @param f (side-effect) Function taking a value.
|
||||
*
|
||||
* @note Due to pruning, leaves might not exhaust choices.
|
||||
* @note Due to pruning, the number of leaves may not be the same as the
|
||||
* number of assignments. E.g. if we have a tree on 2 binary variables with
|
||||
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
|
||||
*
|
||||
* Example:
|
||||
* int sum = 0;
|
||||
|
@ -250,13 +251,32 @@ namespace gtsam {
|
|||
/**
|
||||
* @brief Visit all leaves in depth-first fashion.
|
||||
*
|
||||
* @param f side-effect taking an assignment and a value.
|
||||
* @param f (side-effect) Function taking the leaf node pointer.
|
||||
*
|
||||
* @note Due to pruning, leaves might not exhaust choices.
|
||||
* @note Due to pruning, the number of leaves may not be the same as the
|
||||
* number of assignments. E.g. if we have a tree on 2 binary variables with
|
||||
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
|
||||
*
|
||||
* Example:
|
||||
* int sum = 0;
|
||||
* auto visitor = [&](const Assignment<L>& choices, int y) { sum += y; };
|
||||
* auto visitor = [&](int y) { sum += y; };
|
||||
* tree.visitWith(visitor);
|
||||
*/
|
||||
template <typename Func>
|
||||
void visitLeaf(Func f) const;
|
||||
|
||||
/**
|
||||
* @brief Visit all leaves in depth-first fashion.
|
||||
*
|
||||
* @param f (side-effect) Function taking an assignment and a value.
|
||||
*
|
||||
* @note Due to pruning, the number of leaves may not be the same as the
|
||||
* number of assignments. E.g. if we have a tree on 2 binary variables with
|
||||
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
|
||||
*
|
||||
* Example:
|
||||
* int sum = 0;
|
||||
* auto visitor = [&](const Assignment<L>& assignment, int y) { sum += y; };
|
||||
* tree.visitWith(visitor);
|
||||
*/
|
||||
template <typename Func>
|
||||
|
|
|
@ -286,5 +286,43 @@ namespace gtsam {
|
|||
AlgebraicDecisionTree<Key>(keys, table),
|
||||
cardinalities_(keys.cardinalities()) {}
|
||||
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor DecisionTreeFactor::prune(size_t maxNrAssignments) const {
|
||||
const size_t N = maxNrAssignments;
|
||||
|
||||
// Get the probabilities in the decision tree so we can threshold.
|
||||
std::vector<double> probabilities;
|
||||
this->visitLeaf([&](const Leaf& leaf) {
|
||||
size_t nrAssignments = leaf.nrAssignments();
|
||||
double prob = leaf.constant();
|
||||
probabilities.insert(probabilities.end(), nrAssignments, prob);
|
||||
});
|
||||
|
||||
// The number of probabilities can be lower than max_leaves
|
||||
if (probabilities.size() <= N) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::sort(probabilities.begin(), probabilities.end(),
|
||||
std::greater<double>{});
|
||||
|
||||
double threshold = probabilities[N - 1];
|
||||
|
||||
// Now threshold the decision tree
|
||||
size_t total = 0;
|
||||
auto thresholdFunc = [threshold, &total, N](const double& value) {
|
||||
if (value < threshold || total >= N) {
|
||||
return 0.0;
|
||||
} else {
|
||||
total += 1;
|
||||
return value;
|
||||
}
|
||||
};
|
||||
DecisionTree<Key, double> thresholded(*this, thresholdFunc);
|
||||
|
||||
// Create pruned decision tree factor and return.
|
||||
return DecisionTreeFactor(this->discreteKeys(), thresholded);
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -170,6 +170,26 @@ namespace gtsam {
|
|||
/// Return all the discrete keys associated with this factor.
|
||||
DiscreteKeys discreteKeys() const;
|
||||
|
||||
/**
|
||||
* @brief Prune the decision tree of discrete variables.
|
||||
*
|
||||
* Pruning will set the leaves to be "pruned" to 0 indicating a 0
|
||||
* probability. An assignment is pruned if it is not in the top
|
||||
* `maxNrAssignments` values.
|
||||
*
|
||||
* A violation can occur if there are more
|
||||
* duplicate values than `maxNrAssignments`. A violation here is the need to
|
||||
* un-prune the decision tree (e.g. all assignment values are 1.0). We could
|
||||
* have another case where some subset of duplicates exist (e.g. for a tree
|
||||
* with 8 assignments we have 1, 1, 1, 1, 0.8, 0.7, 0.6, 0.5), but this is
|
||||
* not a violation since the for `maxNrAssignments=5` the top values are (1,
|
||||
* 0.8).
|
||||
*
|
||||
* @param maxNrAssignments The maximum number of assignments to keep.
|
||||
* @return DecisionTreeFactor
|
||||
*/
|
||||
DecisionTreeFactor prune(size_t maxNrAssignments) const;
|
||||
|
||||
/// @}
|
||||
/// @name Wrapper support
|
||||
/// @{
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <gtsam/discrete/DiscreteKey.h> // make sure we have traits
|
||||
#include <gtsam/discrete/DiscreteValues.h>
|
||||
// headers first to make sure no missing headers
|
||||
//#define DT_NO_PRUNING
|
||||
//#define GTSAM_DT_NO_PRUNING
|
||||
#include <gtsam/discrete/AlgebraicDecisionTree.h>
|
||||
#include <gtsam/discrete/DecisionTree-inl.h> // for convert only
|
||||
#define DISABLE_TIMING
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
|
||||
// #define DT_DEBUG_MEMORY
|
||||
// #define DT_NO_PRUNING
|
||||
// #define GTSAM_DT_NO_PRUNING
|
||||
#define DISABLE_DOT
|
||||
#include <gtsam/discrete/DecisionTree-inl.h>
|
||||
|
||||
|
@ -323,6 +323,49 @@ TEST(DecisionTree, Containers) {
|
|||
StringContainerTree converted(stringIntTree, container_of_int);
|
||||
}
|
||||
|
||||
/* ************************************************************************** */
|
||||
// Test nrAssignments.
|
||||
TEST(DecisionTree, NrAssignments) {
|
||||
pair<string, size_t> A("A", 2), B("B", 2), C("C", 2);
|
||||
DT tree({A, B, C}, "1 1 1 1 1 1 1 1");
|
||||
EXPECT(tree.root_->isLeaf());
|
||||
auto leaf = boost::dynamic_pointer_cast<const DT::Leaf>(tree.root_);
|
||||
EXPECT_LONGS_EQUAL(8, leaf->nrAssignments());
|
||||
|
||||
DT tree2({C, B, A}, "1 1 1 2 3 4 5 5");
|
||||
/* The tree is
|
||||
Choice(C)
|
||||
0 Choice(B)
|
||||
0 0 Leaf 1
|
||||
0 1 Choice(A)
|
||||
0 1 0 Leaf 1
|
||||
0 1 1 Leaf 2
|
||||
1 Choice(B)
|
||||
1 0 Choice(A)
|
||||
1 0 0 Leaf 3
|
||||
1 0 1 Leaf 4
|
||||
1 1 Leaf 5
|
||||
*/
|
||||
|
||||
auto root = boost::dynamic_pointer_cast<const DT::Choice>(tree2.root_);
|
||||
CHECK(root);
|
||||
auto choice0 = boost::dynamic_pointer_cast<const DT::Choice>(root->branches()[0]);
|
||||
CHECK(choice0);
|
||||
EXPECT(choice0->branches()[0]->isLeaf());
|
||||
auto choice00 = boost::dynamic_pointer_cast<const DT::Leaf>(choice0->branches()[0]);
|
||||
CHECK(choice00);
|
||||
EXPECT_LONGS_EQUAL(2, choice00->nrAssignments());
|
||||
|
||||
auto choice1 = boost::dynamic_pointer_cast<const DT::Choice>(root->branches()[1]);
|
||||
CHECK(choice1);
|
||||
auto choice10 = boost::dynamic_pointer_cast<const DT::Choice>(choice1->branches()[0]);
|
||||
CHECK(choice10);
|
||||
auto choice11 = boost::dynamic_pointer_cast<const DT::Leaf>(choice1->branches()[1]);
|
||||
CHECK(choice11);
|
||||
EXPECT(choice11->isLeaf());
|
||||
EXPECT_LONGS_EQUAL(2, choice11->nrAssignments());
|
||||
}
|
||||
|
||||
/* ************************************************************************** */
|
||||
// Test visit.
|
||||
TEST(DecisionTree, visit) {
|
||||
|
|
|
@ -106,6 +106,41 @@ TEST(DecisionTreeFactor, enumerate) {
|
|||
EXPECT(actual == expected);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check pruning of the decision tree works as expected.
|
||||
TEST(DecisionTreeFactor, Prune) {
|
||||
DiscreteKey A(1, 2), B(2, 2), C(3, 2);
|
||||
DecisionTreeFactor f(A & B & C, "1 5 3 7 2 6 4 8");
|
||||
|
||||
// Only keep the leaves with the top 5 values.
|
||||
size_t maxNrAssignments = 5;
|
||||
auto pruned5 = f.prune(maxNrAssignments);
|
||||
|
||||
// Pruned leaves should be 0
|
||||
DecisionTreeFactor expected(A & B & C, "0 5 0 7 0 6 4 8");
|
||||
EXPECT(assert_equal(expected, pruned5));
|
||||
|
||||
// Check for more extreme pruning where we only keep the top 2 leaves
|
||||
maxNrAssignments = 2;
|
||||
auto pruned2 = f.prune(maxNrAssignments);
|
||||
DecisionTreeFactor expected2(A & B & C, "0 0 0 7 0 0 0 8");
|
||||
EXPECT(assert_equal(expected2, pruned2));
|
||||
|
||||
DiscreteKey D(4, 2);
|
||||
DecisionTreeFactor factor(
|
||||
D & C & B & A,
|
||||
"0.0 0.0 0.0 0.60658897 0.61241912 0.61241969 0.61247685 0.61247742 0.0 "
|
||||
"0.0 0.0 0.99995287 1.0 1.0 1.0 1.0");
|
||||
|
||||
DecisionTreeFactor expected3(
|
||||
D & C & B & A,
|
||||
"0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 "
|
||||
"0.999952870000 1.0 1.0 1.0 1.0");
|
||||
maxNrAssignments = 5;
|
||||
auto pruned3 = factor.prune(maxNrAssignments);
|
||||
EXPECT(assert_equal(expected3, pruned3));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DecisionTreeFactor, DotWithNames) {
|
||||
DiscreteKey A(12, 3), B(5, 2);
|
||||
|
@ -194,4 +229,3 @@ int main() {
|
|||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
|
|
@ -415,16 +415,16 @@ TEST(DiscreteFactorGraph, DotWithNames) {
|
|||
"graph {\n"
|
||||
" size=\"5,5\";\n"
|
||||
"\n"
|
||||
" varC[label=\"C\"];\n"
|
||||
" varA[label=\"A\"];\n"
|
||||
" varB[label=\"B\"];\n"
|
||||
" var0[label=\"C\"];\n"
|
||||
" var1[label=\"A\"];\n"
|
||||
" var2[label=\"B\"];\n"
|
||||
"\n"
|
||||
" factor0[label=\"\", shape=point];\n"
|
||||
" varC--factor0;\n"
|
||||
" varA--factor0;\n"
|
||||
" var0--factor0;\n"
|
||||
" var1--factor0;\n"
|
||||
" factor1[label=\"\", shape=point];\n"
|
||||
" varC--factor1;\n"
|
||||
" varB--factor1;\n"
|
||||
" var0--factor1;\n"
|
||||
" var2--factor1;\n"
|
||||
"}\n";
|
||||
EXPECT(actual == expected);
|
||||
}
|
||||
|
|
|
@ -113,6 +113,18 @@ list<Point2> circleCircleIntersection(Point2 c1, double r1, Point2 c2,
|
|||
return circleCircleIntersection(c1, c2, fh);
|
||||
}
|
||||
|
||||
Point2Pair means(const std::vector<Point2Pair> &abPointPairs) {
|
||||
const size_t n = abPointPairs.size();
|
||||
if (n == 0) throw std::invalid_argument("Point2::mean input Point2Pair vector is empty");
|
||||
Point2 aSum(0, 0), bSum(0, 0);
|
||||
for (const Point2Pair &abPair : abPointPairs) {
|
||||
aSum += abPair.first;
|
||||
bSum += abPair.second;
|
||||
}
|
||||
const double f = 1.0 / n;
|
||||
return {aSum * f, bSum * f};
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
ostream &operator<<(ostream &os, const gtsam::Point2Pair &p) {
|
||||
os << p.first << " <-> " << p.second;
|
||||
|
|
|
@ -72,6 +72,9 @@ GTSAM_EXPORT boost::optional<Point2> circleCircleIntersection(double R_d, double
|
|||
*/
|
||||
GTSAM_EXPORT std::list<Point2> circleCircleIntersection(Point2 c1, Point2 c2, boost::optional<Point2> fh);
|
||||
|
||||
/// Calculate the two means of a set of Point2 pairs
|
||||
GTSAM_EXPORT Point2Pair means(const std::vector<Point2Pair> &abPointPairs);
|
||||
|
||||
/**
|
||||
* @brief Intersect 2 circles
|
||||
* @param c1 center of first circle
|
||||
|
|
|
@ -309,54 +309,77 @@ double Pose2::range(const Pose2& pose,
|
|||
}
|
||||
|
||||
/* *************************************************************************
|
||||
* New explanation, from scan.ml
|
||||
* It finds the angle using a linear method:
|
||||
* q = Pose2::transformFrom(p) = t + R*p
|
||||
* Align finds the angle using a linear method:
|
||||
* a = Pose2::transformFrom(b) = t + R*b
|
||||
* We need to remove the centroids from the data to find the rotation
|
||||
* using dp=[dpx;dpy] and q=[dqx;dqy] we have
|
||||
* |dqx| |c -s| |dpx| |dpx -dpy| |c|
|
||||
* using db=[dbx;dby] and a=[dax;day] we have
|
||||
* |dax| |c -s| |dbx| |dbx -dby| |c|
|
||||
* | | = | | * | | = | | * | | = H_i*cs
|
||||
* |dqy| |s c| |dpy| |dpy dpx| |s|
|
||||
* |day| |s c| |dby| |dby dbx| |s|
|
||||
* where the Hi are the 2*2 matrices. Then we will minimize the criterion
|
||||
* J = \sum_i norm(q_i - H_i * cs)
|
||||
* J = \sum_i norm(a_i - H_i * cs)
|
||||
* Taking the derivative with respect to cs and setting to zero we have
|
||||
* cs = (\sum_i H_i' * q_i)/(\sum H_i'*H_i)
|
||||
* cs = (\sum_i H_i' * a_i)/(\sum H_i'*H_i)
|
||||
* The hessian is diagonal and just divides by a constant, but this
|
||||
* normalization constant is irrelevant, since we take atan2.
|
||||
* i.e., cos ~ sum(dpx*dqx + dpy*dqy) and sin ~ sum(-dpy*dqx + dpx*dqy)
|
||||
* i.e., cos ~ sum(dbx*dax + dby*day) and sin ~ sum(-dby*dax + dbx*day)
|
||||
* The translation is then found from the centroids
|
||||
* as they also satisfy cq = t + R*cp, hence t = cq - R*cp
|
||||
* as they also satisfy ca = t + R*cb, hence t = ca - R*cb
|
||||
*/
|
||||
|
||||
boost::optional<Pose2> align(const vector<Point2Pair>& pairs) {
|
||||
|
||||
size_t n = pairs.size();
|
||||
if (n<2) return boost::none; // we need at least two pairs
|
||||
boost::optional<Pose2> Pose2::Align(const Point2Pairs &ab_pairs) {
|
||||
const size_t n = ab_pairs.size();
|
||||
if (n < 2) {
|
||||
return boost::none; // we need at least 2 pairs
|
||||
}
|
||||
|
||||
// calculate centroids
|
||||
Point2 cp(0,0), cq(0,0);
|
||||
for(const Point2Pair& pair: pairs) {
|
||||
cp += pair.first;
|
||||
cq += pair.second;
|
||||
Point2 ca(0, 0), cb(0, 0);
|
||||
for (const Point2Pair& pair : ab_pairs) {
|
||||
ca += pair.first;
|
||||
cb += pair.second;
|
||||
}
|
||||
double f = 1.0/n;
|
||||
cp *= f; cq *= f;
|
||||
const double f = 1.0/n;
|
||||
ca *= f;
|
||||
cb *= f;
|
||||
|
||||
// calculate cos and sin
|
||||
double c = 0, s = 0;
|
||||
for(const Point2Pair& pair: pairs) {
|
||||
Point2 dp = pair.first - cp;
|
||||
Point2 dq = pair.second - cq;
|
||||
c += dp.x() * dq.x() + dp.y() * dq.y();
|
||||
s += -dp.y() * dq.x() + dp.x() * dq.y();
|
||||
for (const Point2Pair& pair : ab_pairs) {
|
||||
Point2 da = pair.first - ca;
|
||||
Point2 db = pair.second - cb;
|
||||
c += db.x() * da.x() + db.y() * da.y();
|
||||
s += -db.y() * da.x() + db.x() * da.y();
|
||||
}
|
||||
|
||||
// calculate angle and translation
|
||||
double theta = atan2(s,c);
|
||||
Rot2 R = Rot2::fromAngle(theta);
|
||||
Point2 t = cq - R*cp;
|
||||
const double theta = atan2(s, c);
|
||||
const Rot2 R = Rot2::fromAngle(theta);
|
||||
const Point2 t = ca - R*cb;
|
||||
return Pose2(R, t);
|
||||
}
|
||||
|
||||
boost::optional<Pose2> Pose2::Align(const Matrix& a, const Matrix& b) {
|
||||
if (a.rows() != 2 || b.rows() != 2 || a.cols() != b.cols()) {
|
||||
throw std::invalid_argument(
|
||||
"Pose2:Align expects 2*N matrices of equal shape.");
|
||||
}
|
||||
Point2Pairs ab_pairs;
|
||||
for (Eigen::Index j = 0; j < a.cols(); j++) {
|
||||
ab_pairs.emplace_back(a.col(j), b.col(j));
|
||||
}
|
||||
return Pose2::Align(ab_pairs);
|
||||
}
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
boost::optional<Pose2> align(const Point2Pairs& ba_pairs) {
|
||||
Point2Pairs ab_pairs;
|
||||
for (const Point2Pair &baPair : ba_pairs) {
|
||||
ab_pairs.emplace_back(baPair.second, baPair.first);
|
||||
}
|
||||
return Pose2::Align(ab_pairs);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -92,6 +92,18 @@ public:
|
|||
*this = Expmap(v);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Pose2 by aligning two point pairs
|
||||
* A pose aTb is estimated between pairs (a_point, b_point) such that
|
||||
* a_point = aTb * b_point
|
||||
* Note this allows for noise on the points but in that case the mapping
|
||||
* will not be exact.
|
||||
*/
|
||||
static boost::optional<Pose2> Align(const Point2Pairs& abPointPairs);
|
||||
|
||||
// Version of Pose2::Align that takes 2 matrices.
|
||||
static boost::optional<Pose2> Align(const Matrix& a, const Matrix& b);
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
@ -331,12 +343,19 @@ inline Matrix wedge<Pose2>(const Vector& xi) {
|
|||
return Matrix(Pose2::wedge(xi(0),xi(1),xi(2))).eval();
|
||||
}
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
/**
|
||||
* @deprecated Use static constructor (with reversed pairs!)
|
||||
* Calculate pose between a vector of 2D point correspondences (p,q)
|
||||
* where q = Pose2::transformFrom(p) = t + R*p
|
||||
*/
|
||||
typedef std::pair<Point2,Point2> Point2Pair;
|
||||
GTSAM_EXPORT boost::optional<Pose2> align(const std::vector<Point2Pair>& pairs);
|
||||
GTSAM_EXPORT boost::optional<Pose2>
|
||||
GTSAM_DEPRECATED align(const Point2Pairs& pairs);
|
||||
#endif
|
||||
|
||||
// Convenience typedef
|
||||
using Pose2Pair = std::pair<Pose2, Pose2>;
|
||||
using Pose2Pairs = std::vector<Pose2Pair>;
|
||||
|
||||
template <>
|
||||
struct traits<Pose2> : public internal::LieGroup<Pose2> {};
|
||||
|
|
|
@ -473,12 +473,13 @@ boost::optional<Pose3> Pose3::Align(const Matrix& a, const Matrix& b) {
|
|||
"Pose3:Align expects 3*N matrices of equal shape.");
|
||||
}
|
||||
Point3Pairs abPointPairs;
|
||||
for (size_t j=0; j < a.cols(); j++) {
|
||||
for (Eigen::Index j = 0; j < a.cols(); j++) {
|
||||
abPointPairs.emplace_back(a.col(j), b.col(j));
|
||||
}
|
||||
return Pose3::Align(abPointPairs);
|
||||
}
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
boost::optional<Pose3> align(const Point3Pairs &baPointPairs) {
|
||||
Point3Pairs abPointPairs;
|
||||
for (const Point3Pair &baPair : baPointPairs) {
|
||||
|
@ -486,6 +487,7 @@ boost::optional<Pose3> align(const Point3Pairs &baPointPairs) {
|
|||
}
|
||||
return Pose3::Align(abPointPairs);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::ostream &operator<<(std::ostream &os, const Pose3& pose) {
|
||||
|
|
|
@ -129,6 +129,19 @@ Rot2 Rot2::relativeBearing(const Point2& d, OptionalJacobian<1, 2> H) {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Rot2 Rot2::ClosestTo(const Matrix2& M) {
|
||||
Eigen::JacobiSVD<Matrix2> svd(M, Eigen::ComputeFullU | Eigen::ComputeFullV);
|
||||
const Matrix2& U = svd.matrixU();
|
||||
const Matrix2& V = svd.matrixV();
|
||||
const double det = (U * V.transpose()).determinant();
|
||||
Matrix2 M_prime = (U * Vector2(1, det).asDiagonal() * V.transpose());
|
||||
|
||||
double c = M_prime(0, 0);
|
||||
double s = M_prime(1, 0);
|
||||
return Rot2::fromCosSin(c, s);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
} // gtsam
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* @brief 2D rotation
|
||||
* @date Dec 9, 2009
|
||||
* @author Frank Dellaert
|
||||
* @author John Lambert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
@ -209,6 +210,9 @@ namespace gtsam {
|
|||
/** return 2*2 transpose (inverse) rotation matrix */
|
||||
Matrix2 transpose() const;
|
||||
|
||||
/** Find closest valid rotation matrix, given a 2x2 matrix */
|
||||
static Rot2 ClosestTo(const Matrix2& M);
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
|
|
|
@ -0,0 +1,242 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Similarity2.cpp
|
||||
* @brief Implementation of Similarity2 transform
|
||||
* @author John Lambert, Varun Agrawal
|
||||
*/
|
||||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/Similarity2.h>
|
||||
#include <gtsam/slam/KarcherMeanFactor-inl.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using std::vector;
|
||||
|
||||
namespace internal {
|
||||
|
||||
/// Subtract centroids from point pairs.
|
||||
static Point2Pairs SubtractCentroids(const Point2Pairs& abPointPairs,
|
||||
const Point2Pair& centroids) {
|
||||
Point2Pairs d_abPointPairs;
|
||||
for (const Point2Pair& abPair : abPointPairs) {
|
||||
Point2 da = abPair.first - centroids.first;
|
||||
Point2 db = abPair.second - centroids.second;
|
||||
d_abPointPairs.emplace_back(da, db);
|
||||
}
|
||||
return d_abPointPairs;
|
||||
}
|
||||
|
||||
/// Form inner products x and y and calculate scale.
|
||||
static double CalculateScale(const Point2Pairs& d_abPointPairs,
|
||||
const Rot2& aRb) {
|
||||
double x = 0, y = 0;
|
||||
Point2 da, db;
|
||||
|
||||
for (const Point2Pair& d_abPair : d_abPointPairs) {
|
||||
std::tie(da, db) = d_abPair;
|
||||
const Vector2 da_prime = aRb * db;
|
||||
y += da.transpose() * da_prime;
|
||||
x += da_prime.transpose() * da_prime;
|
||||
}
|
||||
const double s = y / x;
|
||||
return s;
|
||||
}
|
||||
|
||||
/// Form outer product H.
|
||||
static Matrix2 CalculateH(const Point2Pairs& d_abPointPairs) {
|
||||
Matrix2 H = Z_2x2;
|
||||
for (const Point2Pair& d_abPair : d_abPointPairs) {
|
||||
H += d_abPair.first * d_abPair.second.transpose();
|
||||
}
|
||||
return H;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This method estimates the similarity transform from differences point
|
||||
* pairs, given a known or estimated rotation and point centroids.
|
||||
*
|
||||
* @param d_abPointPairs
|
||||
* @param aRb
|
||||
* @param centroids
|
||||
* @return Similarity2
|
||||
*/
|
||||
static Similarity2 Align(const Point2Pairs& d_abPointPairs, const Rot2& aRb,
|
||||
const Point2Pair& centroids) {
|
||||
const double s = CalculateScale(d_abPointPairs, aRb);
|
||||
// dividing aTb by s is required because the registration cost function
|
||||
// minimizes ||a - sRb - t||, whereas Sim(2) computes s(Rb + t)
|
||||
const Point2 aTb = (centroids.first - s * (aRb * centroids.second)) / s;
|
||||
return Similarity2(aRb, aTb, s);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief This method estimates the similarity transform from point pairs,
|
||||
* given a known or estimated rotation.
|
||||
* Refer to:
|
||||
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
|
||||
* Chapter 3
|
||||
*
|
||||
* @param abPointPairs
|
||||
* @param aRb
|
||||
* @return Similarity2
|
||||
*/
|
||||
static Similarity2 AlignGivenR(const Point2Pairs& abPointPairs,
|
||||
const Rot2& aRb) {
|
||||
auto centroids = means(abPointPairs);
|
||||
auto d_abPointPairs = internal::SubtractCentroids(abPointPairs, centroids);
|
||||
return internal::Align(d_abPointPairs, aRb, centroids);
|
||||
}
|
||||
} // namespace internal
|
||||
|
||||
Similarity2::Similarity2() : t_(0, 0), s_(1) {}
|
||||
|
||||
Similarity2::Similarity2(double s) : t_(0, 0), s_(s) {}
|
||||
|
||||
Similarity2::Similarity2(const Rot2& R, const Point2& t, double s)
|
||||
: R_(R), t_(t), s_(s) {}
|
||||
|
||||
Similarity2::Similarity2(const Matrix2& R, const Vector2& t, double s)
|
||||
: R_(Rot2::ClosestTo(R)), t_(t), s_(s) {}
|
||||
|
||||
Similarity2::Similarity2(const Matrix3& T)
|
||||
: R_(Rot2::ClosestTo(T.topLeftCorner<2, 2>())),
|
||||
t_(T.topRightCorner<2, 1>()),
|
||||
s_(1.0 / T(2, 2)) {}
|
||||
|
||||
bool Similarity2::equals(const Similarity2& other, double tol) const {
|
||||
return R_.equals(other.R_, tol) &&
|
||||
traits<Point2>::Equals(t_, other.t_, tol) && s_ < (other.s_ + tol) &&
|
||||
s_ > (other.s_ - tol);
|
||||
}
|
||||
|
||||
bool Similarity2::operator==(const Similarity2& other) const {
|
||||
return R_.matrix() == other.R_.matrix() && t_ == other.t_ && s_ == other.s_;
|
||||
}
|
||||
|
||||
void Similarity2::print(const std::string& s) const {
|
||||
std::cout << std::endl;
|
||||
std::cout << s;
|
||||
rotation().print("\nR:\n");
|
||||
std::cout << "t: " << translation().transpose() << " s: " << scale()
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
Similarity2 Similarity2::identity() { return Similarity2(); }
|
||||
|
||||
Similarity2 Similarity2::operator*(const Similarity2& S) const {
|
||||
return Similarity2(R_ * S.R_, ((1.0 / S.s_) * t_) + R_ * S.t_, s_ * S.s_);
|
||||
}
|
||||
|
||||
Similarity2 Similarity2::inverse() const {
|
||||
const Rot2 Rt = R_.inverse();
|
||||
const Point2 sRt = Rt * (-s_ * t_);
|
||||
return Similarity2(Rt, sRt, 1.0 / s_);
|
||||
}
|
||||
|
||||
Point2 Similarity2::transformFrom(const Point2& p) const {
|
||||
const Point2 q = R_ * p + t_;
|
||||
return s_ * q;
|
||||
}
|
||||
|
||||
Pose2 Similarity2::transformFrom(const Pose2& T) const {
|
||||
Rot2 R = R_.compose(T.rotation());
|
||||
Point2 t = Point2(s_ * (R_ * T.translation() + t_));
|
||||
return Pose2(R, t);
|
||||
}
|
||||
|
||||
Point2 Similarity2::operator*(const Point2& p) const {
|
||||
return transformFrom(p);
|
||||
}
|
||||
|
||||
Similarity2 Similarity2::Align(const Point2Pairs& abPointPairs) {
|
||||
// Refer to Chapter 3 of
|
||||
// http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
|
||||
if (abPointPairs.size() < 2)
|
||||
throw std::runtime_error("input should have at least 2 pairs of points");
|
||||
auto centroids = means(abPointPairs);
|
||||
auto d_abPointPairs = internal::SubtractCentroids(abPointPairs, centroids);
|
||||
Matrix2 H = internal::CalculateH(d_abPointPairs);
|
||||
// ClosestTo finds rotation matrix closest to H in Frobenius sense
|
||||
Rot2 aRb = Rot2::ClosestTo(H);
|
||||
return internal::Align(d_abPointPairs, aRb, centroids);
|
||||
}
|
||||
|
||||
Similarity2 Similarity2::Align(const Pose2Pairs& abPosePairs) {
|
||||
const size_t n = abPosePairs.size();
|
||||
if (n < 2)
|
||||
throw std::runtime_error("input should have at least 2 pairs of poses");
|
||||
|
||||
// calculate rotation
|
||||
vector<Rot2> rotations;
|
||||
Point2Pairs abPointPairs;
|
||||
rotations.reserve(n);
|
||||
abPointPairs.reserve(n);
|
||||
// Below denotes the pose of the i'th object/camera/etc
|
||||
// in frame "a" or frame "b".
|
||||
Pose2 aTi, bTi;
|
||||
for (const Pose2Pair& abPair : abPosePairs) {
|
||||
std::tie(aTi, bTi) = abPair;
|
||||
const Rot2 aRb = aTi.rotation().compose(bTi.rotation().inverse());
|
||||
rotations.emplace_back(aRb);
|
||||
abPointPairs.emplace_back(aTi.translation(), bTi.translation());
|
||||
}
|
||||
const Rot2 aRb_estimate = FindKarcherMean<Rot2>(rotations);
|
||||
|
||||
return internal::AlignGivenR(abPointPairs, aRb_estimate);
|
||||
}
|
||||
|
||||
Vector4 Similarity2::Logmap(const Similarity2& S, //
|
||||
OptionalJacobian<4, 4> Hm) {
|
||||
const Vector2 u = S.t_;
|
||||
const Vector1 w = Rot2::Logmap(S.R_);
|
||||
const double s = log(S.s_);
|
||||
Vector4 result;
|
||||
result << u, w, s;
|
||||
if (Hm) {
|
||||
throw std::runtime_error("Similarity2::Logmap: derivative not implemented");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Similarity2 Similarity2::Expmap(const Vector4& v, //
|
||||
OptionalJacobian<4, 4> Hm) {
|
||||
const Vector2 t = v.head<2>();
|
||||
const Rot2 R = Rot2::Expmap(v.segment<1>(2));
|
||||
const double s = v[3];
|
||||
if (Hm) {
|
||||
throw std::runtime_error("Similarity2::Expmap: derivative not implemented");
|
||||
}
|
||||
return Similarity2(R, t, s);
|
||||
}
|
||||
|
||||
Matrix4 Similarity2::AdjointMap() const {
|
||||
throw std::runtime_error("Similarity2::AdjointMap not implemented");
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const Similarity2& p) {
|
||||
os << "[" << p.rotation().theta() << " " << p.translation().transpose() << " "
|
||||
<< p.scale() << "]\';";
|
||||
return os;
|
||||
}
|
||||
|
||||
Matrix3 Similarity2::matrix() const {
|
||||
Matrix3 T;
|
||||
T.topRows<2>() << R_.matrix(), t_;
|
||||
T.bottomRows<1>() << 0, 0, 1.0 / s_;
|
||||
return T;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,200 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Similarity2.h
|
||||
* @brief Implementation of Similarity2 transform
|
||||
* @author John Lambert, Varun Agrawal
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Lie.h>
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/geometry/Rot2.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// Forward declarations
|
||||
class Pose2;
|
||||
|
||||
/**
|
||||
* 2D similarity transform
|
||||
*/
|
||||
class GTSAM_EXPORT Similarity2 : public LieGroup<Similarity2, 4> {
|
||||
/// @name Pose Concept
|
||||
/// @{
|
||||
typedef Rot2 Rotation;
|
||||
typedef Point2 Translation;
|
||||
/// @}
|
||||
|
||||
private:
|
||||
Rot2 R_;
|
||||
Point2 t_;
|
||||
double s_;
|
||||
|
||||
public:
|
||||
/// @name Constructors
|
||||
/// @{
|
||||
|
||||
/// Default constructor
|
||||
Similarity2();
|
||||
|
||||
/// Construct pure scaling
|
||||
Similarity2(double s);
|
||||
|
||||
/// Construct from GTSAM types
|
||||
Similarity2(const Rot2& R, const Point2& t, double s);
|
||||
|
||||
/// Construct from Eigen types
|
||||
Similarity2(const Matrix2& R, const Vector2& t, double s);
|
||||
|
||||
/// Construct from matrix [R t; 0 s^-1]
|
||||
Similarity2(const Matrix3& T);
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/// Compare with tolerance
|
||||
bool equals(const Similarity2& sim, double tol) const;
|
||||
|
||||
/// Exact equality
|
||||
bool operator==(const Similarity2& other) const;
|
||||
|
||||
/// Print with optional string
|
||||
void print(const std::string& s) const;
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& os, const Similarity2& p);
|
||||
|
||||
/// @}
|
||||
/// @name Group
|
||||
/// @{
|
||||
|
||||
/// Return an identity transform
|
||||
static Similarity2 identity();
|
||||
|
||||
/// Composition
|
||||
Similarity2 operator*(const Similarity2& S) const;
|
||||
|
||||
/// Return the inverse
|
||||
Similarity2 inverse() const;
|
||||
|
||||
/// @}
|
||||
/// @name Group action on Point2
|
||||
/// @{
|
||||
|
||||
/// Action on a point p is s*(R*p+t)
|
||||
Point2 transformFrom(const Point2& p) const;
|
||||
|
||||
/**
|
||||
* Action on a pose T.
|
||||
* |Rs ts| |R t| |Rs*R Rs*t+ts|
|
||||
* |0 1/s| * |0 1| = | 0 1/s |, the result is still a Sim2 object.
|
||||
* To retrieve a Pose2, we normalized the scale value into 1.
|
||||
* |Rs*R Rs*t+ts| |Rs*R s(Rs*t+ts)|
|
||||
* | 0 1/s | = | 0 1 |
|
||||
*
|
||||
* This group action satisfies the compatibility condition.
|
||||
* For more details, refer to: https://en.wikipedia.org/wiki/Group_action
|
||||
*/
|
||||
Pose2 transformFrom(const Pose2& T) const;
|
||||
|
||||
/* syntactic sugar for transformFrom */
|
||||
Point2 operator*(const Point2& p) const;
|
||||
|
||||
/**
|
||||
* Create Similarity2 by aligning at least two point pairs
|
||||
*/
|
||||
static Similarity2 Align(const Point2Pairs& abPointPairs);
|
||||
|
||||
/**
|
||||
* Create the Similarity2 object that aligns at least two pose pairs.
|
||||
* Each pair is of the form (aTi, bTi).
|
||||
* Given a list of pairs in frame a, and a list of pairs in frame b,
|
||||
Align()
|
||||
* will compute the best-fit Similarity2 aSb transformation to align them.
|
||||
* First, the rotation aRb will be computed as the average (Karcher mean)
|
||||
of
|
||||
* many estimates aRb (from each pair). Afterwards, the scale factor will
|
||||
be computed
|
||||
* using the algorithm described here:
|
||||
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
|
||||
*/
|
||||
static Similarity2 Align(const std::vector<Pose2Pair>& abPosePairs);
|
||||
|
||||
/// @}
|
||||
/// @name Lie Group
|
||||
/// @{
|
||||
|
||||
/**
|
||||
* Log map at the identity
|
||||
* \f$ [t_x, t_y, \delta, \lambda] \f$
|
||||
*/
|
||||
static Vector4 Logmap(const Similarity2& S, //
|
||||
OptionalJacobian<4, 4> Hm = boost::none);
|
||||
|
||||
/// Exponential map at the identity
|
||||
static Similarity2 Expmap(const Vector4& v, //
|
||||
OptionalJacobian<4, 4> Hm = boost::none);
|
||||
|
||||
/// Chart at the origin
|
||||
struct ChartAtOrigin {
|
||||
static Similarity2 Retract(const Vector4& v,
|
||||
ChartJacobian H = boost::none) {
|
||||
return Similarity2::Expmap(v, H);
|
||||
}
|
||||
static Vector4 Local(const Similarity2& other,
|
||||
ChartJacobian H = boost::none) {
|
||||
return Similarity2::Logmap(other, H);
|
||||
}
|
||||
};
|
||||
|
||||
/// Project from one tangent space to another
|
||||
Matrix4 AdjointMap() const;
|
||||
|
||||
using LieGroup<Similarity2, 4>::inverse;
|
||||
|
||||
/// @}
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
/// Calculate 4*4 matrix group equivalent
|
||||
Matrix3 matrix() const;
|
||||
|
||||
/// Return a GTSAM rotation
|
||||
Rot2 rotation() const { return R_; }
|
||||
|
||||
/// Return a GTSAM translation
|
||||
Point2 translation() const { return t_; }
|
||||
|
||||
/// Return the scale
|
||||
double scale() const { return s_; }
|
||||
|
||||
/// Dimensionality of tangent space = 4 DOF - used to autodetect sizes
|
||||
inline static size_t Dim() { return 4; }
|
||||
|
||||
/// Dimensionality of tangent space = 4 DOF
|
||||
inline size_t dim() const { return 4; }
|
||||
|
||||
/// @}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct traits<Similarity2> : public internal::LieGroup<Similarity2> {};
|
||||
|
||||
template <>
|
||||
struct traits<const Similarity2> : public internal::LieGroup<Similarity2> {};
|
||||
|
||||
} // namespace gtsam
|
|
@ -26,7 +26,7 @@ namespace gtsam {
|
|||
|
||||
using std::vector;
|
||||
|
||||
namespace {
|
||||
namespace internal {
|
||||
/// Subtract centroids from point pairs.
|
||||
static Point3Pairs subtractCentroids(const Point3Pairs &abPointPairs,
|
||||
const Point3Pair ¢roids) {
|
||||
|
@ -81,10 +81,10 @@ static Similarity3 align(const Point3Pairs &d_abPointPairs, const Rot3 &aRb,
|
|||
static Similarity3 alignGivenR(const Point3Pairs &abPointPairs,
|
||||
const Rot3 &aRb) {
|
||||
auto centroids = means(abPointPairs);
|
||||
auto d_abPointPairs = subtractCentroids(abPointPairs, centroids);
|
||||
auto d_abPointPairs = internal::subtractCentroids(abPointPairs, centroids);
|
||||
return align(d_abPointPairs, aRb, centroids);
|
||||
}
|
||||
} // namespace
|
||||
} // namespace internal
|
||||
|
||||
Similarity3::Similarity3() :
|
||||
t_(0,0,0), s_(1) {
|
||||
|
@ -165,11 +165,11 @@ Similarity3 Similarity3::Align(const Point3Pairs &abPointPairs) {
|
|||
if (abPointPairs.size() < 3)
|
||||
throw std::runtime_error("input should have at least 3 pairs of points");
|
||||
auto centroids = means(abPointPairs);
|
||||
auto d_abPointPairs = subtractCentroids(abPointPairs, centroids);
|
||||
Matrix3 H = calculateH(d_abPointPairs);
|
||||
auto d_abPointPairs = internal::subtractCentroids(abPointPairs, centroids);
|
||||
Matrix3 H = internal::calculateH(d_abPointPairs);
|
||||
// ClosestTo finds rotation matrix closest to H in Frobenius sense
|
||||
Rot3 aRb = Rot3::ClosestTo(H);
|
||||
return align(d_abPointPairs, aRb, centroids);
|
||||
return internal::align(d_abPointPairs, aRb, centroids);
|
||||
}
|
||||
|
||||
Similarity3 Similarity3::Align(const vector<Pose3Pair> &abPosePairs) {
|
||||
|
@ -192,7 +192,7 @@ Similarity3 Similarity3::Align(const vector<Pose3Pair> &abPosePairs) {
|
|||
}
|
||||
const Rot3 aRb_estimate = FindKarcherMean<Rot3>(rotations);
|
||||
|
||||
return alignGivenR(abPointPairs, aRb_estimate);
|
||||
return internal::alignGivenR(abPointPairs, aRb_estimate);
|
||||
}
|
||||
|
||||
Matrix4 Similarity3::wedge(const Vector7 &xi) {
|
||||
|
@ -283,15 +283,11 @@ std::ostream &operator<<(std::ostream &os, const Similarity3& p) {
|
|||
return os;
|
||||
}
|
||||
|
||||
const Matrix4 Similarity3::matrix() const {
|
||||
Matrix4 Similarity3::matrix() const {
|
||||
Matrix4 T;
|
||||
T.topRows<3>() << R_.matrix(), t_;
|
||||
T.bottomRows<1>() << 0, 0, 0, 1.0 / s_;
|
||||
return T;
|
||||
}
|
||||
|
||||
Similarity3::operator Pose3() const {
|
||||
return Pose3(R_, s_ * t_);
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -18,13 +18,12 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/base/Lie.h>
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -34,8 +33,7 @@ class Pose3;
|
|||
/**
|
||||
* 3D similarity transform
|
||||
*/
|
||||
class Similarity3: public LieGroup<Similarity3, 7> {
|
||||
|
||||
class GTSAM_EXPORT Similarity3 : public LieGroup<Similarity3, 7> {
|
||||
/// @name Pose Concept
|
||||
/// @{
|
||||
typedef Rot3 Rotation;
|
||||
|
@ -48,59 +46,58 @@ private:
|
|||
double s_;
|
||||
|
||||
public:
|
||||
|
||||
/// @name Constructors
|
||||
/// @{
|
||||
|
||||
/// Default constructor
|
||||
GTSAM_EXPORT Similarity3();
|
||||
Similarity3();
|
||||
|
||||
/// Construct pure scaling
|
||||
GTSAM_EXPORT Similarity3(double s);
|
||||
Similarity3(double s);
|
||||
|
||||
/// Construct from GTSAM types
|
||||
GTSAM_EXPORT Similarity3(const Rot3& R, const Point3& t, double s);
|
||||
Similarity3(const Rot3& R, const Point3& t, double s);
|
||||
|
||||
/// Construct from Eigen types
|
||||
GTSAM_EXPORT Similarity3(const Matrix3& R, const Vector3& t, double s);
|
||||
Similarity3(const Matrix3& R, const Vector3& t, double s);
|
||||
|
||||
/// Construct from matrix [R t; 0 s^-1]
|
||||
GTSAM_EXPORT Similarity3(const Matrix4& T);
|
||||
Similarity3(const Matrix4& T);
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/// Compare with tolerance
|
||||
GTSAM_EXPORT bool equals(const Similarity3& sim, double tol) const;
|
||||
bool equals(const Similarity3& sim, double tol) const;
|
||||
|
||||
/// Exact equality
|
||||
GTSAM_EXPORT bool operator==(const Similarity3& other) const;
|
||||
bool operator==(const Similarity3& other) const;
|
||||
|
||||
/// Print with optional string
|
||||
GTSAM_EXPORT void print(const std::string& s) const;
|
||||
void print(const std::string& s) const;
|
||||
|
||||
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Similarity3& p);
|
||||
friend std::ostream& operator<<(std::ostream& os, const Similarity3& p);
|
||||
|
||||
/// @}
|
||||
/// @name Group
|
||||
/// @{
|
||||
|
||||
/// Return an identity transform
|
||||
GTSAM_EXPORT static Similarity3 identity();
|
||||
static Similarity3 identity();
|
||||
|
||||
/// Composition
|
||||
GTSAM_EXPORT Similarity3 operator*(const Similarity3& S) const;
|
||||
Similarity3 operator*(const Similarity3& S) const;
|
||||
|
||||
/// Return the inverse
|
||||
GTSAM_EXPORT Similarity3 inverse() const;
|
||||
Similarity3 inverse() const;
|
||||
|
||||
/// @}
|
||||
/// @name Group action on Point3
|
||||
/// @{
|
||||
|
||||
/// Action on a point p is s*(R*p+t)
|
||||
GTSAM_EXPORT Point3 transformFrom(const Point3& p, //
|
||||
Point3 transformFrom(const Point3& p, //
|
||||
OptionalJacobian<3, 7> H1 = boost::none, //
|
||||
OptionalJacobian<3, 3> H2 = boost::none) const;
|
||||
|
||||
|
@ -115,15 +112,15 @@ public:
|
|||
* This group action satisfies the compatibility condition.
|
||||
* For more details, refer to: https://en.wikipedia.org/wiki/Group_action
|
||||
*/
|
||||
GTSAM_EXPORT Pose3 transformFrom(const Pose3& T) const;
|
||||
Pose3 transformFrom(const Pose3& T) const;
|
||||
|
||||
/** syntactic sugar for transformFrom */
|
||||
GTSAM_EXPORT Point3 operator*(const Point3& p) const;
|
||||
Point3 operator*(const Point3& p) const;
|
||||
|
||||
/**
|
||||
* Create Similarity3 by aligning at least three point pairs
|
||||
*/
|
||||
GTSAM_EXPORT static Similarity3 Align(const std::vector<Point3Pair>& abPointPairs);
|
||||
static Similarity3 Align(const std::vector<Point3Pair>& abPointPairs);
|
||||
|
||||
/**
|
||||
* Create the Similarity3 object that aligns at least two pose pairs.
|
||||
|
@ -131,11 +128,11 @@ public:
|
|||
* Given a list of pairs in frame a, and a list of pairs in frame b, Align()
|
||||
* will compute the best-fit Similarity3 aSb transformation to align them.
|
||||
* First, the rotation aRb will be computed as the average (Karcher mean) of
|
||||
* many estimates aRb (from each pair). Afterwards, the scale factor will be computed
|
||||
* using the algorithm described here:
|
||||
* many estimates aRb (from each pair). Afterwards, the scale factor will be
|
||||
* computed using the algorithm described here:
|
||||
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
|
||||
*/
|
||||
GTSAM_EXPORT static Similarity3 Align(const std::vector<Pose3Pair>& abPosePairs);
|
||||
static Similarity3 Align(const std::vector<Pose3Pair>& abPosePairs);
|
||||
|
||||
/// @}
|
||||
/// @name Lie Group
|
||||
|
@ -144,20 +141,22 @@ public:
|
|||
/** Log map at the identity
|
||||
* \f$ [R_x,R_y,R_z, t_x, t_y, t_z, \lambda] \f$
|
||||
*/
|
||||
GTSAM_EXPORT static Vector7 Logmap(const Similarity3& s, //
|
||||
static Vector7 Logmap(const Similarity3& s, //
|
||||
OptionalJacobian<7, 7> Hm = boost::none);
|
||||
|
||||
/** Exponential map at the identity
|
||||
*/
|
||||
GTSAM_EXPORT static Similarity3 Expmap(const Vector7& v, //
|
||||
static Similarity3 Expmap(const Vector7& v, //
|
||||
OptionalJacobian<7, 7> Hm = boost::none);
|
||||
|
||||
/// Chart at the origin
|
||||
struct ChartAtOrigin {
|
||||
static Similarity3 Retract(const Vector7& v, ChartJacobian H = boost::none) {
|
||||
static Similarity3 Retract(const Vector7& v,
|
||||
ChartJacobian H = boost::none) {
|
||||
return Similarity3::Expmap(v, H);
|
||||
}
|
||||
static Vector7 Local(const Similarity3& other, ChartJacobian H = boost::none) {
|
||||
static Vector7 Local(const Similarity3& other,
|
||||
ChartJacobian H = boost::none) {
|
||||
return Similarity3::Logmap(other, H);
|
||||
}
|
||||
};
|
||||
|
@ -170,46 +169,32 @@ public:
|
|||
* @return 4*4 element of Lie algebra that can be exponentiated
|
||||
* TODO(frank): rename to Hat, make part of traits
|
||||
*/
|
||||
GTSAM_EXPORT static Matrix4 wedge(const Vector7& xi);
|
||||
static Matrix4 wedge(const Vector7& xi);
|
||||
|
||||
/// Project from one tangent space to another
|
||||
GTSAM_EXPORT Matrix7 AdjointMap() const;
|
||||
Matrix7 AdjointMap() const;
|
||||
|
||||
/// @}
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
/// Calculate 4*4 matrix group equivalent
|
||||
GTSAM_EXPORT const Matrix4 matrix() const;
|
||||
Matrix4 matrix() const;
|
||||
|
||||
/// Return a GTSAM rotation
|
||||
const Rot3& rotation() const {
|
||||
return R_;
|
||||
}
|
||||
Rot3 rotation() const { return R_; }
|
||||
|
||||
/// Return a GTSAM translation
|
||||
const Point3& translation() const {
|
||||
return t_;
|
||||
}
|
||||
Point3 translation() const { return t_; }
|
||||
|
||||
/// Return the scale
|
||||
double scale() const {
|
||||
return s_;
|
||||
}
|
||||
|
||||
/// Convert to a rigid body pose (R, s*t)
|
||||
/// TODO(frank): why is this here? Red flag! Definitely don't have it as a cast.
|
||||
GTSAM_EXPORT operator Pose3() const;
|
||||
double scale() const { return s_; }
|
||||
|
||||
/// Dimensionality of tangent space = 7 DOF - used to autodetect sizes
|
||||
inline static size_t Dim() {
|
||||
return 7;
|
||||
}
|
||||
inline static size_t Dim() { return 7; }
|
||||
|
||||
/// Dimensionality of tangent space = 7 DOF
|
||||
inline size_t dim() const {
|
||||
return 7;
|
||||
}
|
||||
inline size_t dim() const { return 7; }
|
||||
|
||||
/// @}
|
||||
/// @name Helper functions
|
||||
|
|
|
@ -372,6 +372,9 @@ class Pose2 {
|
|||
Pose2(const gtsam::Rot2& r, const gtsam::Point2& t);
|
||||
Pose2(Vector v);
|
||||
|
||||
static boost::optional<gtsam::Pose2> Align(const gtsam::Point2Pairs& abPointPairs);
|
||||
static boost::optional<gtsam::Pose2> Align(const gtsam::Matrix& a, const gtsam::Matrix& b);
|
||||
|
||||
// Testable
|
||||
void print(string s = "") const;
|
||||
bool equals(const gtsam::Pose2& pose, double tol) const;
|
||||
|
@ -424,8 +427,6 @@ class Pose2 {
|
|||
void serialize() const;
|
||||
};
|
||||
|
||||
boost::optional<gtsam::Pose2> align(const gtsam::Point2Pairs& pairs);
|
||||
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
class Pose3 {
|
||||
// Standard Constructors
|
||||
|
@ -546,6 +547,12 @@ class EssentialMatrix {
|
|||
// Standard Constructors
|
||||
EssentialMatrix(const gtsam::Rot3& aRb, const gtsam::Unit3& aTb);
|
||||
|
||||
// Constructors from Pose3
|
||||
gtsam::EssentialMatrix FromPose3(const gtsam::Pose3& _1P2_);
|
||||
|
||||
gtsam::EssentialMatrix FromPose3(const gtsam::Pose3& _1P2_,
|
||||
Eigen::Ref<Eigen::MatrixXd> H);
|
||||
|
||||
// Testable
|
||||
void print(string s = "") const;
|
||||
bool equals(const gtsam::EssentialMatrix& pose, double tol) const;
|
||||
|
@ -583,7 +590,13 @@ class Cal3_S2 {
|
|||
|
||||
// Action on Point2
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// Standard Interface
|
||||
double fx() const;
|
||||
|
@ -622,7 +635,13 @@ virtual class Cal3DS2_Base {
|
|||
|
||||
// Action on Point2
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
@ -679,7 +698,13 @@ virtual class Cal3Unified : gtsam::Cal3DS2_Base {
|
|||
// Note: the signature of this functions differ from the functions
|
||||
// with equal name in the base class.
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
@ -705,7 +730,13 @@ class Cal3Fisheye {
|
|||
|
||||
// Action on Point2
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// Standard Interface
|
||||
double fx() const;
|
||||
|
@ -768,7 +799,13 @@ class Cal3Bundler {
|
|||
|
||||
// Action on Point2
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// Standard Interface
|
||||
double fx() const;
|
||||
|
@ -806,12 +843,25 @@ class CalibratedCamera {
|
|||
|
||||
// Action on Point3
|
||||
gtsam::Point2 project(const gtsam::Point3& point) const;
|
||||
gtsam::Point2 project(const gtsam::Point3& point,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint);
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const;
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dpose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dp,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_ddepth);
|
||||
|
||||
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
|
||||
|
||||
// Standard Interface
|
||||
gtsam::Pose3 pose() const;
|
||||
double range(const gtsam::Point3& point) const;
|
||||
double range(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint);
|
||||
double range(const gtsam::Pose3& pose) const;
|
||||
double range(const gtsam::Pose3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpose);
|
||||
double range(const gtsam::CalibratedCamera& camera) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
|
@ -823,6 +873,7 @@ template <CALIBRATION>
|
|||
class PinholeCamera {
|
||||
// Standard Constructors and Named Constructors
|
||||
PinholeCamera();
|
||||
PinholeCamera(const gtsam::PinholeCamera<CALIBRATION> other);
|
||||
PinholeCamera(const gtsam::Pose3& pose);
|
||||
PinholeCamera(const gtsam::Pose3& pose, const CALIBRATION& K);
|
||||
static This Level(const CALIBRATION& K, const gtsam::Pose2& pose,
|
||||
|
@ -849,14 +900,123 @@ class PinholeCamera {
|
|||
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
|
||||
pair<gtsam::Point2, bool> projectSafe(const gtsam::Point3& pw) const;
|
||||
gtsam::Point2 project(const gtsam::Point3& point);
|
||||
gtsam::Point2 project(const gtsam::Point3& point,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal);
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const;
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dpose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dp,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_ddepth,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dcal);
|
||||
|
||||
gtsam::Point2 reprojectionError(const gtsam::Point3& pw, const gtsam::Point2& measured,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal);
|
||||
|
||||
double range(const gtsam::Point3& point);
|
||||
double range(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint);
|
||||
double range(const gtsam::Pose3& pose);
|
||||
double range(const gtsam::Pose3& pose, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpose);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
// Forward declaration of PinholeCameraCalX is defined here.
|
||||
#include <gtsam/geometry/SimpleCamera.h>
|
||||
// Some typedefs for common camera types
|
||||
// PinholeCameraCal3_S2 is the same as SimpleCamera above
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3_S2> PinholeCameraCal3_S2;
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3DS2> PinholeCameraCal3DS2;
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3Unified> PinholeCameraCal3Unified;
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3Bundler> PinholeCameraCal3Bundler;
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3Fisheye> PinholeCameraCal3Fisheye;
|
||||
|
||||
#include <gtsam/geometry/PinholePose.h>
|
||||
template <CALIBRATION>
|
||||
class PinholePose {
|
||||
// Standard Constructors and Named Constructors
|
||||
PinholePose();
|
||||
PinholePose(const gtsam::PinholePose<CALIBRATION> other);
|
||||
PinholePose(const gtsam::Pose3& pose);
|
||||
PinholePose(const gtsam::Pose3& pose, const CALIBRATION* K);
|
||||
static This Level(const gtsam::Pose2& pose, double height);
|
||||
static This Lookat(const gtsam::Point3& eye, const gtsam::Point3& target,
|
||||
const gtsam::Point3& upVector, const CALIBRATION* K);
|
||||
|
||||
// Testable
|
||||
void print(string s = "PinholePose") const;
|
||||
bool equals(const This& camera, double tol) const;
|
||||
|
||||
// Standard Interface
|
||||
gtsam::Pose3 pose() const;
|
||||
CALIBRATION calibration() const;
|
||||
|
||||
// Manifold
|
||||
This retract(Vector d) const;
|
||||
Vector localCoordinates(const This& T2) const;
|
||||
size_t dim() const;
|
||||
static size_t Dim();
|
||||
|
||||
// Transformations and measurement functions
|
||||
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
|
||||
pair<gtsam::Point2, bool> projectSafe(const gtsam::Point3& pw) const;
|
||||
gtsam::Point2 project(const gtsam::Point3& point);
|
||||
gtsam::Point2 project(const gtsam::Point3& point,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal);
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const;
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dpose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dp,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_ddepth,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dcal);
|
||||
double range(const gtsam::Point3& point);
|
||||
double range(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint);
|
||||
double range(const gtsam::Pose3& pose);
|
||||
double range(const gtsam::Pose3& pose, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpose);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
typedef gtsam::PinholePose<gtsam::Cal3_S2> PinholePoseCal3_S2;
|
||||
typedef gtsam::PinholePose<gtsam::Cal3DS2> PinholePoseCal3DS2;
|
||||
typedef gtsam::PinholePose<gtsam::Cal3Unified> PinholePoseCal3Unified;
|
||||
typedef gtsam::PinholePose<gtsam::Cal3Bundler> PinholePoseCal3Bundler;
|
||||
typedef gtsam::PinholePose<gtsam::Cal3Fisheye> PinholePoseCal3Fisheye;
|
||||
|
||||
#include <gtsam/geometry/Similarity2.h>
|
||||
class Similarity2 {
|
||||
// Standard Constructors
|
||||
Similarity2();
|
||||
Similarity2(double s);
|
||||
Similarity2(const gtsam::Rot2& R, const gtsam::Point2& t, double s);
|
||||
Similarity2(const Matrix& R, const Vector& t, double s);
|
||||
Similarity2(const Matrix& T);
|
||||
|
||||
gtsam::Point2 transformFrom(const gtsam::Point2& p) const;
|
||||
gtsam::Pose2 transformFrom(const gtsam::Pose2& T);
|
||||
|
||||
static gtsam::Similarity2 Align(const gtsam::Point2Pairs& abPointPairs);
|
||||
static gtsam::Similarity2 Align(const gtsam::Pose2Pairs& abPosePairs);
|
||||
|
||||
// Standard Interface
|
||||
bool equals(const gtsam::Similarity2& sim, double tol) const;
|
||||
Matrix matrix() const;
|
||||
gtsam::Rot2& rotation();
|
||||
gtsam::Point2& translation();
|
||||
double scale() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Similarity3.h>
|
||||
class Similarity3 {
|
||||
// Standard Constructors
|
||||
|
@ -873,22 +1033,13 @@ class Similarity3 {
|
|||
static gtsam::Similarity3 Align(const gtsam::Pose3Pairs& abPosePairs);
|
||||
|
||||
// Standard Interface
|
||||
const Matrix matrix() const;
|
||||
const gtsam::Rot3& rotation();
|
||||
const gtsam::Point3& translation();
|
||||
bool equals(const gtsam::Similarity3& sim, double tol) const;
|
||||
Matrix matrix() const;
|
||||
gtsam::Rot3& rotation();
|
||||
gtsam::Point3& translation();
|
||||
double scale() const;
|
||||
};
|
||||
|
||||
// Forward declaration of PinholeCameraCalX is defined here.
|
||||
#include <gtsam/geometry/SimpleCamera.h>
|
||||
// Some typedefs for common camera types
|
||||
// PinholeCameraCal3_S2 is the same as SimpleCamera above
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3_S2> PinholeCameraCal3_S2;
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3DS2> PinholeCameraCal3DS2;
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3Unified> PinholeCameraCal3Unified;
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3Bundler> PinholeCameraCal3Bundler;
|
||||
typedef gtsam::PinholeCamera<gtsam::Cal3Fisheye> PinholeCameraCal3Fisheye;
|
||||
|
||||
template <T>
|
||||
class CameraSet {
|
||||
CameraSet();
|
||||
|
@ -920,66 +1071,166 @@ class StereoCamera {
|
|||
static size_t Dim();
|
||||
|
||||
// Transformations and measurement functions
|
||||
gtsam::StereoPoint2 project(const gtsam::Point3& point);
|
||||
gtsam::StereoPoint2 project(const gtsam::Point3& point) const;
|
||||
gtsam::Point3 backproject(const gtsam::StereoPoint2& p) const;
|
||||
|
||||
// project with Jacobian
|
||||
gtsam::StereoPoint2 project2(const gtsam::Point3& point,
|
||||
Eigen::Ref<Eigen::MatrixXd> H1,
|
||||
Eigen::Ref<Eigen::MatrixXd> H2) const;
|
||||
|
||||
gtsam::Point3 backproject2(const gtsam::StereoPoint2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> H1,
|
||||
Eigen::Ref<Eigen::MatrixXd> H2) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
class TriangulationResult {
|
||||
enum Status { VALID, DEGENERATE, BEHIND_CAMERA, OUTLIER, FAR_POINT };
|
||||
Status status;
|
||||
TriangulationResult(const gtsam::Point3& p);
|
||||
const gtsam::Point3& get() const;
|
||||
static TriangulationResult Degenerate();
|
||||
static TriangulationResult Outlier();
|
||||
static TriangulationResult FarPoint();
|
||||
static TriangulationResult BehindCamera();
|
||||
bool valid() const;
|
||||
bool degenerate() const;
|
||||
bool outlier() const;
|
||||
bool farPoint() const;
|
||||
bool behindCamera() const;
|
||||
};
|
||||
|
||||
class TriangulationParameters {
|
||||
double rankTolerance;
|
||||
bool enableEPI;
|
||||
double landmarkDistanceThreshold;
|
||||
double dynamicOutlierRejectionThreshold;
|
||||
SharedNoiseModel noiseModel;
|
||||
TriangulationParameters(const double rankTolerance = 1.0,
|
||||
const bool enableEPI = false,
|
||||
double landmarkDistanceThreshold = -1,
|
||||
double dynamicOutlierRejectionThreshold = -1,
|
||||
const gtsam::SharedNoiseModel& noiseModel = nullptr);
|
||||
};
|
||||
|
||||
// Templates appear not yet supported for free functions - issue raised at
|
||||
// borglab/wrap#14 to add support
|
||||
|
||||
// Cal3_S2 versions
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3_S2* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3DS2* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Bundler* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3_S2& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3_S2* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3_S2& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::TriangulationResult triangulateSafe(
|
||||
const gtsam::CameraSetCal3_S2& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::TriangulationParameters& params);
|
||||
|
||||
// Cal3DS2 versions
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3DS2* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3DS2& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3DS2* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3DS2& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::TriangulationResult triangulateSafe(
|
||||
const gtsam::CameraSetCal3DS2& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::TriangulationParameters& params);
|
||||
|
||||
// Cal3Bundler versions
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Bundler* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Bundler& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Bundler* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Bundler& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::TriangulationResult triangulateSafe(
|
||||
const gtsam::CameraSetCal3Bundler& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::TriangulationParameters& params);
|
||||
|
||||
// Cal3Fisheye versions
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Fisheye* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Fisheye& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Fisheye* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Fisheye& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::TriangulationResult triangulateSafe(
|
||||
const gtsam::CameraSetCal3Fisheye& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::TriangulationParameters& params);
|
||||
|
||||
// Cal3Unified versions
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Unified* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Unified& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
double rank_tol, bool optimize,
|
||||
const gtsam::SharedNoiseModel& model = nullptr);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3_S2* sharedCal,
|
||||
gtsam::Cal3Unified* sharedCal,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3DS2* sharedCal,
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Unified& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
|
||||
gtsam::Cal3Bundler* sharedCal,
|
||||
gtsam::TriangulationResult triangulateSafe(
|
||||
const gtsam::CameraSetCal3Unified& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3_S2& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Bundler& cameras,
|
||||
const gtsam::Point2Vector& measurements,
|
||||
const gtsam::Point3& initialEstimate);
|
||||
const gtsam::TriangulationParameters& params);
|
||||
|
||||
|
||||
|
||||
#include <gtsam/geometry/BearingRange.h>
|
||||
template <POSE, POINT, BEARING, RANGE>
|
||||
|
|
|
@ -718,14 +718,10 @@ TEST( Pose2, range_pose )
|
|||
|
||||
TEST(Pose2, align_1) {
|
||||
Pose2 expected(Rot2::fromAngle(0), Point2(10, 10));
|
||||
|
||||
vector<Point2Pair> correspondences;
|
||||
Point2Pair pq1(make_pair(Point2(0,0), Point2(10,10)));
|
||||
Point2Pair pq2(make_pair(Point2(20,10), Point2(30,20)));
|
||||
correspondences += pq1, pq2;
|
||||
|
||||
boost::optional<Pose2> actual = align(correspondences);
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
Point2Pairs ab_pairs {{Point2(10, 10), Point2(0, 0)},
|
||||
{Point2(30, 20), Point2(20, 10)}};
|
||||
boost::optional<Pose2> aTb = Pose2::Align(ab_pairs);
|
||||
EXPECT(assert_equal(expected, *aTb));
|
||||
}
|
||||
|
||||
TEST(Pose2, align_2) {
|
||||
|
@ -733,37 +729,34 @@ TEST(Pose2, align_2) {
|
|||
Rot2 R = Rot2::fromAngle(M_PI/2.0);
|
||||
Pose2 expected(R, t);
|
||||
|
||||
vector<Point2Pair> correspondences;
|
||||
Point2 p1(0,0), p2(10,0);
|
||||
Point2 q1 = expected.transformFrom(p1), q2 = expected.transformFrom(p2);
|
||||
EXPECT(assert_equal(Point2(20,10),q1));
|
||||
EXPECT(assert_equal(Point2(20,20),q2));
|
||||
Point2Pair pq1(make_pair(p1, q1));
|
||||
Point2Pair pq2(make_pair(p2, q2));
|
||||
correspondences += pq1, pq2;
|
||||
Point2 b1(0, 0), b2(10, 0);
|
||||
Point2Pairs ab_pairs {{expected.transformFrom(b1), b1},
|
||||
{expected.transformFrom(b2), b2}};
|
||||
|
||||
boost::optional<Pose2> actual = align(correspondences);
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
boost::optional<Pose2> aTb = Pose2::Align(ab_pairs);
|
||||
EXPECT(assert_equal(expected, *aTb));
|
||||
}
|
||||
|
||||
namespace align_3 {
|
||||
Point2 t(10, 10);
|
||||
Pose2 expected(Rot2::fromAngle(2*M_PI/3), t);
|
||||
Point2 p1(0,0), p2(10,0), p3(10,10);
|
||||
Point2 q1 = expected.transformFrom(p1), q2 = expected.transformFrom(p2), q3 = expected.transformFrom(p3);
|
||||
Point2 b1(0, 0), b2(10, 0), b3(10, 10);
|
||||
Point2 a1 = expected.transformFrom(b1),
|
||||
a2 = expected.transformFrom(b2),
|
||||
a3 = expected.transformFrom(b3);
|
||||
}
|
||||
|
||||
TEST(Pose2, align_3) {
|
||||
using namespace align_3;
|
||||
|
||||
vector<Point2Pair> correspondences;
|
||||
Point2Pair pq1(make_pair(p1, q1));
|
||||
Point2Pair pq2(make_pair(p2, q2));
|
||||
Point2Pair pq3(make_pair(p3, q3));
|
||||
correspondences += pq1, pq2, pq3;
|
||||
Point2Pairs ab_pairs;
|
||||
Point2Pair ab1(make_pair(a1, b1));
|
||||
Point2Pair ab2(make_pair(a2, b2));
|
||||
Point2Pair ab3(make_pair(a3, b3));
|
||||
ab_pairs += ab1, ab2, ab3;
|
||||
|
||||
boost::optional<Pose2> actual = align(correspondences);
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
boost::optional<Pose2> aTb = Pose2::Align(ab_pairs);
|
||||
EXPECT(assert_equal(expected, *aTb));
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -772,26 +765,27 @@ namespace {
|
|||
/* ************************************************************************* */
|
||||
struct Triangle { size_t i_, j_, k_;};
|
||||
|
||||
boost::optional<Pose2> align2(const Point2Vector& ps, const Point2Vector& qs,
|
||||
boost::optional<Pose2> align2(const Point2Vector& as, const Point2Vector& bs,
|
||||
const pair<Triangle, Triangle>& trianglePair) {
|
||||
const Triangle& t1 = trianglePair.first, t2 = trianglePair.second;
|
||||
vector<Point2Pair> correspondences;
|
||||
correspondences += make_pair(ps[t1.i_],qs[t2.i_]), make_pair(ps[t1.j_],qs[t2.j_]), make_pair(ps[t1.k_],qs[t2.k_]);
|
||||
return align(correspondences);
|
||||
Point2Pairs ab_pairs = {{as[t1.i_], bs[t2.i_]},
|
||||
{as[t1.j_], bs[t2.j_]},
|
||||
{as[t1.k_], bs[t2.k_]}};
|
||||
return Pose2::Align(ab_pairs);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Pose2, align_4) {
|
||||
using namespace align_3;
|
||||
|
||||
Point2Vector ps,qs;
|
||||
ps += p1, p2, p3;
|
||||
qs += q3, q1, q2; // note in 3,1,2 order !
|
||||
Point2Vector as, bs;
|
||||
as += a1, a2, a3;
|
||||
bs += b3, b1, b2; // note in 3,1,2 order !
|
||||
|
||||
Triangle t1; t1.i_=0; t1.j_=1; t1.k_=2;
|
||||
Triangle t2; t2.i_=1; t2.j_=2; t2.k_=0;
|
||||
|
||||
boost::optional<Pose2> actual = align2(ps, qs, make_pair(t1,t2));
|
||||
boost::optional<Pose2> actual = align2(as, bs, {t1, t2});
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testSimilarity2.cpp
|
||||
* @brief Unit tests for Similarity2 class
|
||||
* @author Varun Agrawal
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/base/testLie.h>
|
||||
#include <gtsam/geometry/Similarity2.h>
|
||||
|
||||
#include <functional>
|
||||
|
||||
using namespace std::placeholders;
|
||||
using namespace gtsam;
|
||||
using namespace std;
|
||||
|
||||
GTSAM_CONCEPT_TESTABLE_INST(Similarity2)
|
||||
|
||||
static const Point2 P(0.2, 0.7);
|
||||
static const Rot2 R = Rot2::fromAngle(0.3);
|
||||
static const double s = 4;
|
||||
|
||||
const double degree = M_PI / 180;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Similarity2, Concepts) {
|
||||
BOOST_CONCEPT_ASSERT((IsGroup<Similarity2>));
|
||||
BOOST_CONCEPT_ASSERT((IsManifold<Similarity2>));
|
||||
BOOST_CONCEPT_ASSERT((IsLieGroup<Similarity2>));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Similarity2, Constructors) {
|
||||
Similarity2 sim2_Construct1;
|
||||
Similarity2 sim2_Construct2(s);
|
||||
Similarity2 sim2_Construct3(R, P, s);
|
||||
Similarity2 sim2_Construct4(R.matrix(), P, s);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Similarity2, Getters) {
|
||||
Similarity2 sim2_default;
|
||||
EXPECT(assert_equal(Rot2(), sim2_default.rotation()));
|
||||
EXPECT(assert_equal(Point2(0, 0), sim2_default.translation()));
|
||||
EXPECT_DOUBLES_EQUAL(1.0, sim2_default.scale(), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -458,18 +458,18 @@ TEST(Similarity3, Optimization2) {
|
|||
Values result;
|
||||
result = LevenbergMarquardtOptimizer(graph, initial).optimize();
|
||||
//result.print("Optimized Estimate\n");
|
||||
Pose3 p1, p2, p3, p4, p5;
|
||||
p1 = Pose3(result.at<Similarity3>(X(1)));
|
||||
p2 = Pose3(result.at<Similarity3>(X(2)));
|
||||
p3 = Pose3(result.at<Similarity3>(X(3)));
|
||||
p4 = Pose3(result.at<Similarity3>(X(4)));
|
||||
p5 = Pose3(result.at<Similarity3>(X(5)));
|
||||
Similarity3 p1, p2, p3, p4, p5;
|
||||
p1 = result.at<Similarity3>(X(1));
|
||||
p2 = result.at<Similarity3>(X(2));
|
||||
p3 = result.at<Similarity3>(X(3));
|
||||
p4 = result.at<Similarity3>(X(4));
|
||||
p5 = result.at<Similarity3>(X(5));
|
||||
|
||||
//p1.print("Pose1");
|
||||
//p2.print("Pose2");
|
||||
//p3.print("Pose3");
|
||||
//p4.print("Pose4");
|
||||
//p5.print("Pose5");
|
||||
//p1.print("Similarity1");
|
||||
//p2.print("Similarity2");
|
||||
//p3.print("Similarity3");
|
||||
//p4.print("Similarity4");
|
||||
//p5.print("Similarity5");
|
||||
|
||||
Similarity3 expected(0.7);
|
||||
EXPECT(assert_equal(expected, result.at<Similarity3>(X(5)), 0.4));
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <gtsam/geometry/Cal3Fisheye.h>
|
||||
#include <gtsam/geometry/Cal3Unified.h>
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/Cal3DS2.h>
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
|
@ -510,18 +511,18 @@ private:
|
|||
};
|
||||
|
||||
/**
|
||||
* TriangulationResult is an optional point, along with the reasons why it is invalid.
|
||||
* TriangulationResult is an optional point, along with the reasons why it is
|
||||
* invalid.
|
||||
*/
|
||||
class TriangulationResult : public boost::optional<Point3> {
|
||||
enum Status {
|
||||
VALID, DEGENERATE, BEHIND_CAMERA, OUTLIER, FAR_POINT
|
||||
};
|
||||
Status status_;
|
||||
TriangulationResult(Status s) :
|
||||
status_(s) {
|
||||
}
|
||||
public:
|
||||
enum Status { VALID, DEGENERATE, BEHIND_CAMERA, OUTLIER, FAR_POINT };
|
||||
Status status;
|
||||
|
||||
private:
|
||||
TriangulationResult(Status s) : status(s) {}
|
||||
|
||||
public:
|
||||
/**
|
||||
* Default constructor, only for serialization
|
||||
*/
|
||||
|
@ -530,54 +531,38 @@ public:
|
|||
/**
|
||||
* Constructor
|
||||
*/
|
||||
TriangulationResult(const Point3& p) :
|
||||
status_(VALID) {
|
||||
reset(p);
|
||||
}
|
||||
TriangulationResult(const Point3& p) : status(VALID) { reset(p); }
|
||||
static TriangulationResult Degenerate() {
|
||||
return TriangulationResult(DEGENERATE);
|
||||
}
|
||||
static TriangulationResult Outlier() {
|
||||
return TriangulationResult(OUTLIER);
|
||||
}
|
||||
static TriangulationResult Outlier() { return TriangulationResult(OUTLIER); }
|
||||
static TriangulationResult FarPoint() {
|
||||
return TriangulationResult(FAR_POINT);
|
||||
}
|
||||
static TriangulationResult BehindCamera() {
|
||||
return TriangulationResult(BEHIND_CAMERA);
|
||||
}
|
||||
bool valid() const {
|
||||
return status_ == VALID;
|
||||
}
|
||||
bool degenerate() const {
|
||||
return status_ == DEGENERATE;
|
||||
}
|
||||
bool outlier() const {
|
||||
return status_ == OUTLIER;
|
||||
}
|
||||
bool farPoint() const {
|
||||
return status_ == FAR_POINT;
|
||||
}
|
||||
bool behindCamera() const {
|
||||
return status_ == BEHIND_CAMERA;
|
||||
}
|
||||
bool valid() const { return status == VALID; }
|
||||
bool degenerate() const { return status == DEGENERATE; }
|
||||
bool outlier() const { return status == OUTLIER; }
|
||||
bool farPoint() const { return status == FAR_POINT; }
|
||||
bool behindCamera() const { return status == BEHIND_CAMERA; }
|
||||
// stream to output
|
||||
friend std::ostream& operator<<(std::ostream& os,
|
||||
const TriangulationResult& result) {
|
||||
if (result)
|
||||
os << "point = " << *result << std::endl;
|
||||
else
|
||||
os << "no point, status = " << result.status_ << std::endl;
|
||||
os << "no point, status = " << result.status << std::endl;
|
||||
return os;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int version) {
|
||||
ar & BOOST_SERIALIZATION_NVP(status_);
|
||||
ar& BOOST_SERIALIZATION_NVP(status);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -644,6 +629,7 @@ TriangulationResult triangulateSafe(const CameraSet<CAMERA>& cameras,
|
|||
// Vector of Cameras - used by the Python/MATLAB wrapper
|
||||
using CameraSetCal3Bundler = CameraSet<PinholeCamera<Cal3Bundler>>;
|
||||
using CameraSetCal3_S2 = CameraSet<PinholeCamera<Cal3_S2>>;
|
||||
using CameraSetCal3DS2 = CameraSet<PinholeCamera<Cal3DS2>>;
|
||||
using CameraSetCal3Fisheye = CameraSet<PinholeCamera<Cal3Fisheye>>;
|
||||
using CameraSetCal3Unified = CameraSet<PinholeCamera<Cal3Unified>>;
|
||||
using CameraSetSpherical = CameraSet<SphericalCamera>;
|
||||
|
|
|
@ -53,8 +53,9 @@ void BayesNet<CONDITIONAL>::dot(std::ostream& os,
|
|||
auto frontals = conditional->frontals();
|
||||
const Key me = frontals.front();
|
||||
auto parents = conditional->parents();
|
||||
for (const Key& p : parents)
|
||||
os << " var" << keyFormatter(p) << "->var" << keyFormatter(me) << "\n";
|
||||
for (const Key& p : parents) {
|
||||
os << " var" << p << "->var" << me << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
os << "}";
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/base/treeTraversal-inst.h>
|
||||
|
||||
#ifdef GTSAM_USE_TBB
|
||||
#include <mutex>
|
||||
#endif
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -120,12 +124,25 @@ struct EliminationData {
|
|||
size_t myIndexInParent;
|
||||
FastVector<sharedFactor> childFactors;
|
||||
boost::shared_ptr<BTNode> bayesTreeNode;
|
||||
#ifdef GTSAM_USE_TBB
|
||||
boost::shared_ptr<std::mutex> writeLock;
|
||||
#endif
|
||||
|
||||
EliminationData(EliminationData* _parentData, size_t nChildren) :
|
||||
parentData(_parentData), bayesTreeNode(boost::make_shared<BTNode>()) {
|
||||
parentData(_parentData), bayesTreeNode(boost::make_shared<BTNode>())
|
||||
#ifdef GTSAM_USE_TBB
|
||||
, writeLock(boost::make_shared<std::mutex>())
|
||||
#endif
|
||||
{
|
||||
if (parentData) {
|
||||
#ifdef GTSAM_USE_TBB
|
||||
parentData->writeLock->lock();
|
||||
#endif
|
||||
myIndexInParent = parentData->childFactors.size();
|
||||
parentData->childFactors.push_back(sharedFactor());
|
||||
#ifdef GTSAM_USE_TBB
|
||||
parentData->writeLock->unlock();
|
||||
#endif
|
||||
} else {
|
||||
myIndexInParent = 0;
|
||||
}
|
||||
|
@ -196,8 +213,15 @@ struct EliminationData {
|
|||
nodesIndex_.insert(std::make_pair(j, myData.bayesTreeNode));
|
||||
|
||||
// Store remaining factor in parent's gathered factors
|
||||
if (!eliminationResult.second->empty())
|
||||
if (!eliminationResult.second->empty()) {
|
||||
#ifdef GTSAM_USE_TBB
|
||||
myData.parentData->writeLock->lock();
|
||||
#endif
|
||||
myData.parentData->childFactors[myData.myIndexInParent] = eliminationResult.second;
|
||||
#ifdef GTSAM_USE_TBB
|
||||
myData.parentData->writeLock->unlock();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
|
|
@ -43,7 +43,7 @@ void DotWriter::drawVariable(Key key, const KeyFormatter& keyFormatter,
|
|||
const boost::optional<Vector2>& position,
|
||||
ostream* os) const {
|
||||
// Label the node with the label from the KeyFormatter
|
||||
*os << " var" << keyFormatter(key) << "[label=\"" << keyFormatter(key)
|
||||
*os << " var" << key << "[label=\"" << keyFormatter(key)
|
||||
<< "\"";
|
||||
if (position) {
|
||||
*os << ", pos=\"" << position->x() << "," << position->y() << "!\"";
|
||||
|
@ -65,13 +65,13 @@ void DotWriter::DrawFactor(size_t i, const boost::optional<Vector2>& position,
|
|||
|
||||
static void ConnectVariables(Key key1, Key key2,
|
||||
const KeyFormatter& keyFormatter, ostream* os) {
|
||||
*os << " var" << keyFormatter(key1) << "--"
|
||||
<< "var" << keyFormatter(key2) << ";\n";
|
||||
*os << " var" << key1 << "--"
|
||||
<< "var" << key2 << ";\n";
|
||||
}
|
||||
|
||||
static void ConnectVariableFactor(Key key, const KeyFormatter& keyFormatter,
|
||||
size_t i, ostream* os) {
|
||||
*os << " var" << keyFormatter(key) << "--"
|
||||
*os << " var" << key << "--"
|
||||
<< "factor" << i << ";\n";
|
||||
}
|
||||
|
||||
|
|
|
@ -91,13 +91,18 @@ namespace gtsam {
|
|||
void GaussianConditional::print(const string &s, const KeyFormatter& formatter) const {
|
||||
cout << s << " p(";
|
||||
for (const_iterator it = beginFrontals(); it != endFrontals(); ++it) {
|
||||
cout << (boost::format("%1%")%(formatter(*it))).str() << " ";
|
||||
cout << (boost::format("%1%") % (formatter(*it))).str()
|
||||
<< (nrFrontals() > 1 ? " " : "");
|
||||
}
|
||||
|
||||
if (nrParents()) {
|
||||
cout << " |";
|
||||
for (const_iterator it = beginParents(); it != endParents(); ++it) {
|
||||
cout << " " << (boost::format("%1%") % (formatter(*it))).str();
|
||||
}
|
||||
}
|
||||
cout << ")" << endl;
|
||||
|
||||
cout << formatMatrixIndented(" R = ", R()) << endl;
|
||||
for (const_iterator it = beginParents() ; it != endParents() ; ++it) {
|
||||
cout << formatMatrixIndented((boost::format(" S[%1%] = ")%(formatter(*it))).str(), getA(it))
|
||||
|
|
|
@ -109,7 +109,8 @@ namespace gtsam {
|
|||
/// @{
|
||||
|
||||
/** print */
|
||||
void print(const std::string& = "GaussianConditional",
|
||||
void print(
|
||||
const std::string& = "GaussianConditional",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/** equals function */
|
||||
|
|
|
@ -54,23 +54,31 @@ namespace noiseModel {
|
|||
// clang-format on
|
||||
namespace mEstimator {
|
||||
|
||||
//---------------------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Pure virtual class for all robust error function classes.
|
||||
*
|
||||
* It provides the machinery for block vs scalar reweighting strategies, in
|
||||
* addition to defining the interface of derived classes.
|
||||
*/
|
||||
class GTSAM_EXPORT Base {
|
||||
public:
|
||||
/** the rows can be weighted independently according to the error
|
||||
* or uniformly with the norm of the right hand side */
|
||||
enum ReweightScheme { Scalar, Block };
|
||||
typedef boost::shared_ptr<Base> shared_ptr;
|
||||
|
||||
protected:
|
||||
/** the rows can be weighted independently according to the error
|
||||
* or uniformly with the norm of the right hand side */
|
||||
/// Strategy for reweighting \sa ReweightScheme
|
||||
ReweightScheme reweight_;
|
||||
|
||||
public:
|
||||
Base(const ReweightScheme reweight = Block) : reweight_(reweight) {}
|
||||
virtual ~Base() {}
|
||||
|
||||
/*
|
||||
/// Returns the reweight scheme, as explained in ReweightScheme
|
||||
ReweightScheme reweightScheme() const { return reweight_; }
|
||||
|
||||
/**
|
||||
* This method is responsible for returning the total penalty for a given
|
||||
* amount of error. For example, this method is responsible for implementing
|
||||
* the quadratic function for an L2 penalty, the absolute value function for
|
||||
|
@ -80,16 +88,20 @@ class GTSAM_EXPORT Base {
|
|||
* error vector, then it prevents implementations of asymmeric loss
|
||||
* functions. It would be better for this function to accept the vector and
|
||||
* internally call the norm if necessary.
|
||||
*
|
||||
* This returns \rho(x) in \ref mEstimator
|
||||
*/
|
||||
virtual double loss(double distance) const { return 0; };
|
||||
virtual double loss(double distance) const { return 0; }
|
||||
|
||||
/*
|
||||
/**
|
||||
* This method is responsible for returning the weight function for a given
|
||||
* amount of error. The weight function is related to the analytic derivative
|
||||
* of the loss function. See
|
||||
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
||||
* for details. This method is required when optimizing cost functions with
|
||||
* robust penalties using iteratively re-weighted least squares.
|
||||
*
|
||||
* This returns w(x) in \ref mEstimator
|
||||
*/
|
||||
virtual double weight(double distance) const = 0;
|
||||
|
||||
|
@ -124,7 +136,15 @@ class GTSAM_EXPORT Base {
|
|||
}
|
||||
};
|
||||
|
||||
/// Null class should behave as Gaussian
|
||||
/** "Null" robust loss function, equivalent to a Gaussian pdf noise model, or
|
||||
* plain least-squares (non-robust).
|
||||
*
|
||||
* This model has no additional parameters.
|
||||
*
|
||||
* - Loss \rho(x) = 0.5 x²
|
||||
* - Derivative \phi(x) = x
|
||||
* - Weight w(x) = \phi(x)/x = 1
|
||||
*/
|
||||
class GTSAM_EXPORT Null : public Base {
|
||||
public:
|
||||
typedef boost::shared_ptr<Null> shared_ptr;
|
||||
|
@ -146,7 +166,14 @@ class GTSAM_EXPORT Null : public Base {
|
|||
}
|
||||
};
|
||||
|
||||
/// Fair implements the "Fair" robust error model (Zhang97ivc)
|
||||
/** Implementation of the "Fair" robust error model (Zhang97ivc)
|
||||
*
|
||||
* This model has a scalar parameter "c".
|
||||
*
|
||||
* - Loss \rho(x) = c² (|x|/c - log(1+|x|/c))
|
||||
* - Derivative \phi(x) = x/(1+|x|/c)
|
||||
* - Weight w(x) = \phi(x)/x = 1/(1+|x|/c)
|
||||
*/
|
||||
class GTSAM_EXPORT Fair : public Base {
|
||||
protected:
|
||||
double c_;
|
||||
|
@ -160,6 +187,7 @@ class GTSAM_EXPORT Fair : public Base {
|
|||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return c_; }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
@ -171,7 +199,14 @@ class GTSAM_EXPORT Fair : public Base {
|
|||
}
|
||||
};
|
||||
|
||||
/// Huber implements the "Huber" robust error model (Zhang97ivc)
|
||||
/** The "Huber" robust error model (Zhang97ivc).
|
||||
*
|
||||
* This model has a scalar parameter "k".
|
||||
*
|
||||
* - Loss \rho(x) = 0.5 x² if |x|<k, 0.5 k² + k|x-k| otherwise
|
||||
* - Derivative \phi(x) = x if |x|<k, k sgn(x) otherwise
|
||||
* - Weight w(x) = \phi(x)/x = 1 if |x|<k, k/|x| otherwise
|
||||
*/
|
||||
class GTSAM_EXPORT Huber : public Base {
|
||||
protected:
|
||||
double k_;
|
||||
|
@ -185,6 +220,7 @@ class GTSAM_EXPORT Huber : public Base {
|
|||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return k_; }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
@ -196,12 +232,19 @@ class GTSAM_EXPORT Huber : public Base {
|
|||
}
|
||||
};
|
||||
|
||||
/// Cauchy implements the "Cauchy" robust error model (Lee2013IROS). Contributed
|
||||
/// by:
|
||||
/// Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
|
||||
/// Information Technology, Karlsruhe, Germany.
|
||||
/// oberlaender@fzi.de
|
||||
/// Thanks Jan!
|
||||
/** Implementation of the "Cauchy" robust error model (Lee2013IROS).
|
||||
* Contributed by:
|
||||
* Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
|
||||
* Information Technology, Karlsruhe, Germany.
|
||||
* oberlaender@fzi.de
|
||||
* Thanks Jan!
|
||||
*
|
||||
* This model has a scalar parameter "k".
|
||||
*
|
||||
* - Loss \rho(x) = 0.5 k² log(1+x²/k²)
|
||||
* - Derivative \phi(x) = (k²x)/(x²+k²)
|
||||
* - Weight w(x) = \phi(x)/x = k²/(x²+k²)
|
||||
*/
|
||||
class GTSAM_EXPORT Cauchy : public Base {
|
||||
protected:
|
||||
double k_, ksquared_;
|
||||
|
@ -215,6 +258,7 @@ class GTSAM_EXPORT Cauchy : public Base {
|
|||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return k_; }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
@ -223,10 +267,18 @@ class GTSAM_EXPORT Cauchy : public Base {
|
|||
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
|
||||
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar &BOOST_SERIALIZATION_NVP(k_);
|
||||
ar &BOOST_SERIALIZATION_NVP(ksquared_);
|
||||
}
|
||||
};
|
||||
|
||||
/// Tukey implements the "Tukey" robust error model (Zhang97ivc)
|
||||
/** Implementation of the "Tukey" robust error model (Zhang97ivc).
|
||||
*
|
||||
* This model has a scalar parameter "c".
|
||||
*
|
||||
* - Loss \rho(x) = c² (1 - (1-x²/c²)³)/6 if |x|<c, c²/6 otherwise
|
||||
* - Derivative \phi(x) = x(1-x²/c²)² if |x|<c, 0 otherwise
|
||||
* - Weight w(x) = \phi(x)/x = (1-x²/c²)² if |x|<c, 0 otherwise
|
||||
*/
|
||||
class GTSAM_EXPORT Tukey : public Base {
|
||||
protected:
|
||||
double c_, csquared_;
|
||||
|
@ -240,6 +292,7 @@ class GTSAM_EXPORT Tukey : public Base {
|
|||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return c_; }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
@ -251,7 +304,14 @@ class GTSAM_EXPORT Tukey : public Base {
|
|||
}
|
||||
};
|
||||
|
||||
/// Welsch implements the "Welsch" robust error model (Zhang97ivc)
|
||||
/** Implementation of the "Welsch" robust error model (Zhang97ivc).
|
||||
*
|
||||
* This model has a scalar parameter "c".
|
||||
*
|
||||
* - Loss \rho(x) = -0.5 c² (exp(-x²/c²) - 1)
|
||||
* - Derivative \phi(x) = x exp(-x²/c²)
|
||||
* - Weight w(x) = \phi(x)/x = exp(-x²/c²)
|
||||
*/
|
||||
class GTSAM_EXPORT Welsch : public Base {
|
||||
protected:
|
||||
double c_, csquared_;
|
||||
|
@ -265,6 +325,7 @@ class GTSAM_EXPORT Welsch : public Base {
|
|||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return c_; }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
@ -273,15 +334,20 @@ class GTSAM_EXPORT Welsch : public Base {
|
|||
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
|
||||
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar &BOOST_SERIALIZATION_NVP(c_);
|
||||
ar &BOOST_SERIALIZATION_NVP(csquared_);
|
||||
}
|
||||
};
|
||||
|
||||
/// GemanMcClure implements the "Geman-McClure" robust error model
|
||||
/// (Zhang97ivc).
|
||||
///
|
||||
/// Note that Geman-McClure weight function uses the parameter c == 1.0,
|
||||
/// but here it's allowed to use different values, so we actually have
|
||||
/// the generalized Geman-McClure from (Agarwal15phd).
|
||||
/** Implementation of the "Geman-McClure" robust error model (Zhang97ivc).
|
||||
*
|
||||
* Note that Geman-McClure weight function uses the parameter c == 1.0,
|
||||
* but here it's allowed to use different values, so we actually have
|
||||
* the generalized Geman-McClure from (Agarwal15phd).
|
||||
*
|
||||
* - Loss \rho(x) = 0.5 (c²x²)/(c²+x²)
|
||||
* - Derivative \phi(x) = xc⁴/(c²+x²)²
|
||||
* - Weight w(x) = \phi(x)/x = c⁴/(c²+x²)²
|
||||
*/
|
||||
class GTSAM_EXPORT GemanMcClure : public Base {
|
||||
public:
|
||||
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
|
||||
|
@ -293,6 +359,7 @@ class GTSAM_EXPORT GemanMcClure : public Base {
|
|||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return c_; }
|
||||
|
||||
protected:
|
||||
double c_;
|
||||
|
@ -307,11 +374,18 @@ class GTSAM_EXPORT GemanMcClure : public Base {
|
|||
}
|
||||
};
|
||||
|
||||
/// DCS implements the Dynamic Covariance Scaling robust error model
|
||||
/// from the paper Robust Map Optimization (Agarwal13icra).
|
||||
///
|
||||
/// Under the special condition of the parameter c == 1.0 and not
|
||||
/// forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
|
||||
/** DCS implements the Dynamic Covariance Scaling robust error model
|
||||
* from the paper Robust Map Optimization (Agarwal13icra).
|
||||
*
|
||||
* Under the special condition of the parameter c == 1.0 and not
|
||||
* forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
|
||||
*
|
||||
* This model has a scalar parameter "c" (with "units" of squared error).
|
||||
*
|
||||
* - Loss \rho(x) = (c²x² + cx⁴)/(x²+c)² (for any "x")
|
||||
* - Derivative \phi(x) = 2c²x/(x²+c)²
|
||||
* - Weight w(x) = \phi(x)/x = 2c²/(x²+c)² if x²>c, 1 otherwise
|
||||
*/
|
||||
class GTSAM_EXPORT DCS : public Base {
|
||||
public:
|
||||
typedef boost::shared_ptr<DCS> shared_ptr;
|
||||
|
@ -323,6 +397,7 @@ class GTSAM_EXPORT DCS : public Base {
|
|||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return c_; }
|
||||
|
||||
protected:
|
||||
double c_;
|
||||
|
@ -337,12 +412,19 @@ class GTSAM_EXPORT DCS : public Base {
|
|||
}
|
||||
};
|
||||
|
||||
/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
|
||||
/// width 2*k, centered at the origin. The resulting penalty within the dead
|
||||
/// zone is always zero, and grows quadratically outside the dead zone. In this
|
||||
/// sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
|
||||
/// robust to outliers. This penalty can be used to create barrier functions in
|
||||
/// a general way.
|
||||
/** L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
|
||||
* width 2*k, centered at the origin. The resulting penalty within the dead
|
||||
* zone is always zero, and grows quadratically outside the dead zone. In this
|
||||
* sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
|
||||
* robust to outliers. This penalty can be used to create barrier functions in
|
||||
* a general way.
|
||||
*
|
||||
* This model has a scalar parameter "k".
|
||||
*
|
||||
* - Loss \rho(x) = 0 if |x|<k, 0.5(k-|x|)² otherwise
|
||||
* - Derivative \phi(x) = 0 if |x|<k, (-k+x) if x>k, (k+x) if x<-k
|
||||
* - Weight w(x) = \phi(x)/x = 0 if |x|<k, (-k+x)/x if x>k, (k+x)/x if x<-k
|
||||
*/
|
||||
class GTSAM_EXPORT L2WithDeadZone : public Base {
|
||||
protected:
|
||||
double k_;
|
||||
|
@ -356,6 +438,7 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
|
|||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return k_; }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
|
|
@ -337,7 +337,6 @@ vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg,
|
|||
DSFVector dsf(n);
|
||||
|
||||
size_t count = 0;
|
||||
double sum = 0.0;
|
||||
for (const size_t index : sortedIndices) {
|
||||
const GaussianFactor &gf = *gfg[index];
|
||||
const auto keys = gf.keys();
|
||||
|
@ -347,7 +346,6 @@ vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg,
|
|||
if (dsf.find(u) != dsf.find(v)) {
|
||||
dsf.merge(u, v);
|
||||
treeIndices.push_back(index);
|
||||
sum += weights[index];
|
||||
if (++count == n - 1) break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -671,6 +671,10 @@ virtual class DummyPreconditionerParameters : gtsam::PreconditionerParameters {
|
|||
DummyPreconditionerParameters();
|
||||
};
|
||||
|
||||
virtual class BlockJacobiPreconditionerParameters : gtsam::PreconditionerParameters {
|
||||
BlockJacobiPreconditionerParameters();
|
||||
};
|
||||
|
||||
#include <gtsam/linear/PCGSolver.h>
|
||||
virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters {
|
||||
PCGSolverParameters();
|
||||
|
|
|
@ -404,13 +404,23 @@ TEST(GaussianConditional, Print) {
|
|||
const Vector2 b(20, 40);
|
||||
const double sigma = 3;
|
||||
|
||||
std::string s = "GaussianConditional";
|
||||
GaussianConditional conditional(X(0), b, Matrix2::Identity(),
|
||||
noiseModel::Isotropic::Sigma(2, sigma));
|
||||
|
||||
auto conditional =
|
||||
// Test printing for no parents.
|
||||
std::string expected =
|
||||
"GaussianConditional p(x0)\n"
|
||||
" R = [ 1 0 ]\n"
|
||||
" [ 0 1 ]\n"
|
||||
" d = [ 20 40 ]\n"
|
||||
"isotropic dim=2 sigma=3\n";
|
||||
EXPECT(assert_print_equal(expected, conditional, "GaussianConditional"));
|
||||
|
||||
auto conditional1 =
|
||||
GaussianConditional::FromMeanAndStddev(X(0), A1, X(1), b, sigma);
|
||||
|
||||
// Test printing for single parent.
|
||||
std::string expected =
|
||||
std::string expected1 =
|
||||
"GaussianConditional p(x0 | x1)\n"
|
||||
" R = [ 1 0 ]\n"
|
||||
" [ 0 1 ]\n"
|
||||
|
@ -418,7 +428,7 @@ TEST(GaussianConditional, Print) {
|
|||
" [ -3 -4 ]\n"
|
||||
" d = [ 20 40 ]\n"
|
||||
"isotropic dim=2 sigma=3\n";
|
||||
EXPECT(assert_print_equal(expected, conditional, s));
|
||||
EXPECT(assert_print_equal(expected1, conditional1, "GaussianConditional"));
|
||||
|
||||
// Test printing for multiple parents.
|
||||
auto conditional2 = GaussianConditional::FromMeanAndStddev(X(0), A1, Y(0), A2,
|
||||
|
@ -433,7 +443,7 @@ TEST(GaussianConditional, Print) {
|
|||
" [ -7 -8 ]\n"
|
||||
" d = [ 20 40 ]\n"
|
||||
"isotropic dim=2 sigma=3\n";
|
||||
EXPECT(assert_print_equal(expected2, conditional2, s));
|
||||
EXPECT(assert_print_equal(expected2, conditional2, "GaussianConditional"));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -90,7 +90,11 @@ class GTSAM_EXPORT PreintegratedAhrsMeasurements : public PreintegratedRotation
|
|||
|
||||
/**
|
||||
* Add a single Gyroscope measurement to the preintegration.
|
||||
* @param measureOmedga Measured angular velocity (in body frame)
|
||||
* Measurements are taken to be in the sensor
|
||||
* frame and conversion to the body frame is handled by `body_P_sensor` in
|
||||
* `PreintegratedRotationParams` (if provided).
|
||||
*
|
||||
* @param measuredOmega Measured angular velocity (as given by the sensor)
|
||||
* @param deltaT Time step
|
||||
*/
|
||||
void integrateMeasurement(const Vector3& measuredOmega, double deltaT);
|
||||
|
|
|
@ -208,8 +208,11 @@ public:
|
|||
|
||||
/**
|
||||
* Add a single IMU measurement to the preintegration.
|
||||
* @param measuredAcc Measured acceleration (in body frame, as given by the
|
||||
* sensor)
|
||||
* Both accelerometer and gyroscope measurements are taken to be in the sensor
|
||||
* frame and conversion to the body frame is handled by `body_P_sensor` in
|
||||
* `PreintegrationParams`.
|
||||
*
|
||||
* @param measuredAcc Measured acceleration (as given by the sensor)
|
||||
* @param measuredOmega Measured angular velocity (as given by the sensor)
|
||||
* @param dt Time interval between two consecutive IMU measurements
|
||||
*/
|
||||
|
|
|
@ -121,7 +121,11 @@ public:
|
|||
|
||||
/**
|
||||
* Add a single IMU measurement to the preintegration.
|
||||
* @param measuredAcc Measured acceleration (in body frame, as given by the sensor)
|
||||
* Both accelerometer and gyroscope measurements are taken to be in the sensor
|
||||
* frame and conversion to the body frame is handled by `body_P_sensor` in
|
||||
* `PreintegrationParams`.
|
||||
*
|
||||
* @param measuredAcc Measured acceleration (as given by the sensor)
|
||||
* @param measuredOmega Measured angular velocity (as given by the sensor)
|
||||
* @param dt Time interval between this and the last IMU measurement
|
||||
*/
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <GeographicLib/Config.h>
|
||||
#include <GeographicLib/LocalCartesian.hpp>
|
||||
|
||||
using namespace std::placeholders;
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
using namespace GeographicLib;
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
|
||||
#include <GeographicLib/LocalCartesian.hpp>
|
||||
|
||||
using namespace std::placeholders;
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
using namespace GeographicLib;
|
||||
|
@ -95,7 +94,7 @@ TEST( MagFactor, Factors ) {
|
|||
(std::bind(&MagFactor2::evaluateError, &f2, scaled, std::placeholders::_1, none, none), bias),//
|
||||
H2, 1e-7));
|
||||
|
||||
// MagFactor2
|
||||
// MagFactor3
|
||||
MagFactor3 f3(1, 2, 3, measured, nRb, model);
|
||||
EXPECT(assert_equal(Z_3x1,f3.evaluateError(s,dir,bias,H1,H2,H3),1e-5));
|
||||
EXPECT(assert_equal((Matrix)numericalDerivative11<Vector,double> //
|
||||
|
|
|
@ -94,7 +94,6 @@ namespace gtsam {
|
|||
Vector evaluateError(const T& x, boost::optional<Matrix&> H = boost::none) const override {
|
||||
if (H) (*H) = Matrix::Identity(traits<T>::GetDimension(x),traits<T>::GetDimension(x));
|
||||
// manifold equivalent of z-x -> Local(x,z)
|
||||
// TODO(ASL) Add Jacobians.
|
||||
return -traits<T>::Local(x, prior_);
|
||||
}
|
||||
|
||||
|
|
|
@ -279,10 +279,11 @@ namespace gtsam {
|
|||
template <typename ValueType>
|
||||
struct handle {
|
||||
ValueType operator()(Key j, const Value* const pointer) {
|
||||
try {
|
||||
auto ptr = dynamic_cast<const GenericValue<ValueType>*>(pointer);
|
||||
if (ptr) {
|
||||
// value returns a const ValueType&, and the return makes a copy !!!!!
|
||||
return dynamic_cast<const GenericValue<ValueType>&>(*pointer).value();
|
||||
} catch (std::bad_cast&) {
|
||||
return ptr->value();
|
||||
} else {
|
||||
throw ValuesIncorrectType(j, typeid(*pointer), typeid(ValueType));
|
||||
}
|
||||
}
|
||||
|
@ -294,11 +295,12 @@ namespace gtsam {
|
|||
// Handle dynamic matrices
|
||||
template <int M, int N>
|
||||
struct handle_matrix<Eigen::Matrix<double, M, N>, true> {
|
||||
Eigen::Matrix<double, M, N> operator()(Key j, const Value* const pointer) {
|
||||
try {
|
||||
inline Eigen::Matrix<double, M, N> operator()(Key j, const Value* const pointer) {
|
||||
auto ptr = dynamic_cast<const GenericValue<Eigen::Matrix<double, M, N>>*>(pointer);
|
||||
if (ptr) {
|
||||
// value returns a const Matrix&, and the return makes a copy !!!!!
|
||||
return dynamic_cast<const GenericValue<Eigen::Matrix<double, M, N>>&>(*pointer).value();
|
||||
} catch (std::bad_cast&) {
|
||||
return ptr->value();
|
||||
} else {
|
||||
// If a fixed matrix was stored, we end up here as well.
|
||||
throw ValuesIncorrectType(j, typeid(*pointer), typeid(Eigen::Matrix<double, M, N>));
|
||||
}
|
||||
|
@ -308,16 +310,18 @@ namespace gtsam {
|
|||
// Handle fixed matrices
|
||||
template <int M, int N>
|
||||
struct handle_matrix<Eigen::Matrix<double, M, N>, false> {
|
||||
Eigen::Matrix<double, M, N> operator()(Key j, const Value* const pointer) {
|
||||
try {
|
||||
inline Eigen::Matrix<double, M, N> operator()(Key j, const Value* const pointer) {
|
||||
auto ptr = dynamic_cast<const GenericValue<Eigen::Matrix<double, M, N>>*>(pointer);
|
||||
if (ptr) {
|
||||
// value returns a const MatrixMN&, and the return makes a copy !!!!!
|
||||
return dynamic_cast<const GenericValue<Eigen::Matrix<double, M, N>>&>(*pointer).value();
|
||||
} catch (std::bad_cast&) {
|
||||
return ptr->value();
|
||||
} else {
|
||||
Matrix A;
|
||||
try {
|
||||
// Check if a dynamic matrix was stored
|
||||
A = handle_matrix<Eigen::MatrixXd, true>()(j, pointer); // will throw if not....
|
||||
} catch (const ValuesIncorrectType&) {
|
||||
auto ptr = dynamic_cast<const GenericValue<Eigen::MatrixXd>*>(pointer);
|
||||
if (ptr) {
|
||||
A = ptr->value();
|
||||
} else {
|
||||
// Or a dynamic vector
|
||||
A = handle_matrix<Eigen::VectorXd, true>()(j, pointer); // will throw if not....
|
||||
}
|
||||
|
@ -364,10 +368,10 @@ namespace gtsam {
|
|||
|
||||
if(item != values_.end()) {
|
||||
// dynamic cast the type and throw exception if incorrect
|
||||
const Value& value = *item->second;
|
||||
try {
|
||||
return dynamic_cast<const GenericValue<ValueType>&>(value).value();
|
||||
} catch (std::bad_cast &) {
|
||||
auto ptr = dynamic_cast<const GenericValue<ValueType>*>(item->second);
|
||||
if (ptr) {
|
||||
return ptr->value();
|
||||
} else {
|
||||
// NOTE(abe): clang warns about potential side effects if done in typeid
|
||||
const Value* value = item->second;
|
||||
throw ValuesIncorrectType(j, typeid(*value), typeid(ValueType));
|
||||
|
|
|
@ -226,6 +226,10 @@ class Values {
|
|||
void insert(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Bundler>& camera);
|
||||
void insert(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Fisheye>& camera);
|
||||
void insert(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Unified>& camera);
|
||||
void insert(size_t j, const gtsam::PinholePose<gtsam::Cal3_S2>& camera);
|
||||
void insert(size_t j, const gtsam::PinholePose<gtsam::Cal3Bundler>& camera);
|
||||
void insert(size_t j, const gtsam::PinholePose<gtsam::Cal3Fisheye>& camera);
|
||||
void insert(size_t j, const gtsam::PinholePose<gtsam::Cal3Unified>& camera);
|
||||
void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
|
||||
void insert(size_t j, const gtsam::NavState& nav_state);
|
||||
void insert(size_t j, double c);
|
||||
|
@ -245,6 +249,10 @@ class Values {
|
|||
void insert(size_t j, const gtsam::ParameterMatrix<14>& X);
|
||||
void insert(size_t j, const gtsam::ParameterMatrix<15>& X);
|
||||
|
||||
template <T = {gtsam::Point2,
|
||||
gtsam::Point3}>
|
||||
void insert(size_t j, const T& val);
|
||||
|
||||
void update(size_t j, const gtsam::Point2& point2);
|
||||
void update(size_t j, const gtsam::Point3& point3);
|
||||
void update(size_t j, const gtsam::Rot2& rot2);
|
||||
|
@ -265,6 +273,10 @@ class Values {
|
|||
void update(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Bundler>& camera);
|
||||
void update(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Fisheye>& camera);
|
||||
void update(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Unified>& camera);
|
||||
void update(size_t j, const gtsam::PinholePose<gtsam::Cal3_S2>& camera);
|
||||
void update(size_t j, const gtsam::PinholePose<gtsam::Cal3Bundler>& camera);
|
||||
void update(size_t j, const gtsam::PinholePose<gtsam::Cal3Fisheye>& camera);
|
||||
void update(size_t j, const gtsam::PinholePose<gtsam::Cal3Unified>& camera);
|
||||
void update(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
|
||||
void update(size_t j, const gtsam::NavState& nav_state);
|
||||
void update(size_t j, Vector vector);
|
||||
|
@ -306,6 +318,10 @@ class Values {
|
|||
void insert_or_assign(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Bundler>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Fisheye>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholeCamera<gtsam::Cal3Unified>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholePose<gtsam::Cal3_S2>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholePose<gtsam::Cal3Bundler>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholePose<gtsam::Cal3Fisheye>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::PinholePose<gtsam::Cal3Unified>& camera);
|
||||
void insert_or_assign(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
|
||||
void insert_or_assign(size_t j, const gtsam::NavState& nav_state);
|
||||
void insert_or_assign(size_t j, Vector vector);
|
||||
|
@ -347,6 +363,10 @@ class Values {
|
|||
gtsam::PinholeCamera<gtsam::Cal3Bundler>,
|
||||
gtsam::PinholeCamera<gtsam::Cal3Fisheye>,
|
||||
gtsam::PinholeCamera<gtsam::Cal3Unified>,
|
||||
gtsam::PinholePose<gtsam::Cal3_S2>,
|
||||
gtsam::PinholePose<gtsam::Cal3Bundler>,
|
||||
gtsam::PinholePose<gtsam::Cal3Fisheye>,
|
||||
gtsam::PinholePose<gtsam::Cal3Unified>,
|
||||
gtsam::imuBias::ConstantBias,
|
||||
gtsam::NavState,
|
||||
Vector,
|
||||
|
@ -464,6 +484,9 @@ virtual class NonlinearOptimizerParams {
|
|||
bool isSequential() const;
|
||||
bool isCholmod() const;
|
||||
bool isIterative() const;
|
||||
|
||||
// This only applies to python since matlab does not have lambda machinery.
|
||||
gtsam::NonlinearOptimizerParams::IterationHook iterationHook;
|
||||
};
|
||||
|
||||
bool checkConvergence(double relativeErrorTreshold,
|
||||
|
|
|
@ -77,10 +77,14 @@ struct GTSAM_EXPORT SfmData {
|
|||
size_t numberCameras() const { return cameras.size(); }
|
||||
|
||||
/// The track formed by series of landmark measurements
|
||||
SfmTrack track(size_t idx) const { return tracks[idx]; }
|
||||
const SfmTrack& track(size_t idx) const { return tracks[idx]; }
|
||||
|
||||
/// The camera pose at frame index `idx`
|
||||
SfmCamera camera(size_t idx) const { return cameras[idx]; }
|
||||
const SfmCamera& camera(size_t idx) const { return cameras[idx]; }
|
||||
|
||||
/// Getters
|
||||
const std::vector<SfmCamera>& cameraList() const { return cameras; }
|
||||
const std::vector<SfmTrack>& trackList() const { return tracks; }
|
||||
|
||||
/**
|
||||
* @brief Create projection factors using keys i and P(j)
|
||||
|
|
|
@ -21,13 +21,16 @@
|
|||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
#include <gtsam/nonlinear/ExpressionFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/sfm/TranslationFactor.h>
|
||||
#include <gtsam/sfm/TranslationRecovery.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/expressions.h>
|
||||
|
||||
#include <set>
|
||||
#include <utility>
|
||||
|
@ -38,16 +41,13 @@ using namespace std;
|
|||
// In Wrappers we have no access to this so have a default ready.
|
||||
static std::mt19937 kRandomNumberGenerator(42);
|
||||
|
||||
TranslationRecovery::TranslationRecovery(
|
||||
const TranslationRecovery::TranslationEdges &relativeTranslations,
|
||||
const LevenbergMarquardtParams &lmParams)
|
||||
: params_(lmParams) {
|
||||
// Some relative translations may be zero. We treat nodes that have a zero
|
||||
// relativeTranslation as a single node.
|
||||
|
||||
// A DSFMap is used to find sets of nodes that have a zero relative
|
||||
// translation. Add the nodes in each edge to the DSFMap, and merge nodes that
|
||||
// are connected by a zero relative translation.
|
||||
DSFMap<Key> getSameTranslationDSFMap(
|
||||
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations) {
|
||||
DSFMap<Key> sameTranslationDSF;
|
||||
for (const auto &edge : relativeTranslations) {
|
||||
Key key1 = sameTranslationDSF.find(edge.key1());
|
||||
|
@ -56,94 +56,152 @@ TranslationRecovery::TranslationRecovery(
|
|||
sameTranslationDSF.merge(key1, key2);
|
||||
}
|
||||
}
|
||||
// Use only those edges for which two keys have a distinct root in the DSFMap.
|
||||
for (const auto &edge : relativeTranslations) {
|
||||
Key key1 = sameTranslationDSF.find(edge.key1());
|
||||
Key key2 = sameTranslationDSF.find(edge.key2());
|
||||
if (key1 == key2) continue;
|
||||
relativeTranslations_.emplace_back(key1, key2, edge.measured(),
|
||||
edge.noiseModel());
|
||||
}
|
||||
// Store the DSF map for post-processing results.
|
||||
sameTranslationNodes_ = sameTranslationDSF.sets();
|
||||
return sameTranslationDSF;
|
||||
}
|
||||
|
||||
NonlinearFactorGraph TranslationRecovery::buildGraph() const {
|
||||
// Removes zero-translation edges from measurements, and combines the nodes in
|
||||
// these edges into a single node.
|
||||
template <typename T>
|
||||
std::vector<BinaryMeasurement<T>> removeSameTranslationNodes(
|
||||
const std::vector<BinaryMeasurement<T>> &edges,
|
||||
const DSFMap<Key> &sameTranslationDSFMap) {
|
||||
std::vector<BinaryMeasurement<T>> newEdges;
|
||||
for (const auto &edge : edges) {
|
||||
Key key1 = sameTranslationDSFMap.find(edge.key1());
|
||||
Key key2 = sameTranslationDSFMap.find(edge.key2());
|
||||
if (key1 == key2) continue;
|
||||
newEdges.emplace_back(key1, key2, edge.measured(), edge.noiseModel());
|
||||
}
|
||||
return newEdges;
|
||||
}
|
||||
|
||||
// Adds nodes that were not optimized for because they were connected
|
||||
// to another node with a zero-translation edge in the input.
|
||||
Values addSameTranslationNodes(const Values &result,
|
||||
const DSFMap<Key> &sameTranslationDSFMap) {
|
||||
Values final_result = result;
|
||||
// Nodes that were not optimized are stored in sameTranslationNodes_ as a map
|
||||
// from a key that was optimized to keys that were not optimized. Iterate over
|
||||
// map and add results for keys not optimized.
|
||||
for (const auto &optimizedAndDuplicateKeys : sameTranslationDSFMap.sets()) {
|
||||
Key optimizedKey = optimizedAndDuplicateKeys.first;
|
||||
std::set<Key> duplicateKeys = optimizedAndDuplicateKeys.second;
|
||||
// Add the result for the duplicate key if it does not already exist.
|
||||
for (const Key duplicateKey : duplicateKeys) {
|
||||
if (final_result.exists(duplicateKey)) continue;
|
||||
final_result.insert<Point3>(duplicateKey,
|
||||
final_result.at<Point3>(optimizedKey));
|
||||
}
|
||||
}
|
||||
return final_result;
|
||||
}
|
||||
|
||||
NonlinearFactorGraph TranslationRecovery::buildGraph(
|
||||
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations) const {
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
// Add all relative translation edges
|
||||
for (auto edge : relativeTranslations_) {
|
||||
// Add translation factors for input translation directions.
|
||||
for (auto edge : relativeTranslations) {
|
||||
graph.emplace_shared<TranslationFactor>(edge.key1(), edge.key2(),
|
||||
edge.measured(), edge.noiseModel());
|
||||
}
|
||||
|
||||
return graph;
|
||||
}
|
||||
|
||||
void TranslationRecovery::addPrior(
|
||||
const double scale, NonlinearFactorGraph *graph,
|
||||
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
|
||||
const double scale,
|
||||
const std::vector<BinaryMeasurement<Point3>> &betweenTranslations,
|
||||
NonlinearFactorGraph *graph,
|
||||
const SharedNoiseModel &priorNoiseModel) const {
|
||||
auto edge = relativeTranslations_.begin();
|
||||
if (edge == relativeTranslations_.end()) return;
|
||||
auto edge = relativeTranslations.begin();
|
||||
if (edge == relativeTranslations.end()) return;
|
||||
graph->emplace_shared<PriorFactor<Point3>>(edge->key1(), Point3(0, 0, 0),
|
||||
priorNoiseModel);
|
||||
|
||||
// Add between factors for optional relative translations.
|
||||
for (auto edge : betweenTranslations) {
|
||||
graph->emplace_shared<BetweenFactor<Point3>>(
|
||||
edge.key1(), edge.key2(), edge.measured(), edge.noiseModel());
|
||||
}
|
||||
|
||||
// Add a scale prior only if no other between factors were added.
|
||||
if (betweenTranslations.empty()) {
|
||||
graph->emplace_shared<PriorFactor<Point3>>(
|
||||
edge->key2(), scale * edge->measured().point3(), edge->noiseModel());
|
||||
}
|
||||
}
|
||||
|
||||
Values TranslationRecovery::initializeRandomly(std::mt19937 *rng) const {
|
||||
Values TranslationRecovery::initializeRandomly(
|
||||
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
|
||||
std::mt19937 *rng, const Values &initialValues) const {
|
||||
uniform_real_distribution<double> randomVal(-1, 1);
|
||||
// Create a lambda expression that checks whether value exists and randomly
|
||||
// initializes if not.
|
||||
Values initial;
|
||||
auto insert = [&](Key j) {
|
||||
if (!initial.exists(j)) {
|
||||
if (initial.exists(j)) return;
|
||||
if (initialValues.exists(j)) {
|
||||
initial.insert<Point3>(j, initialValues.at<Point3>(j));
|
||||
} else {
|
||||
initial.insert<Point3>(
|
||||
j, Point3(randomVal(*rng), randomVal(*rng), randomVal(*rng)));
|
||||
}
|
||||
// Assumes all nodes connected by zero-edges have the same initialization.
|
||||
};
|
||||
|
||||
// Loop over measurements and add a random translation
|
||||
for (auto edge : relativeTranslations_) {
|
||||
for (auto edge : relativeTranslations) {
|
||||
insert(edge.key1());
|
||||
insert(edge.key2());
|
||||
}
|
||||
|
||||
// If there are no valid edges, but zero-distance edges exist, initialize one
|
||||
// of the nodes in a connected component of zero-distance edges.
|
||||
if (initial.empty() && !sameTranslationNodes_.empty()) {
|
||||
for (const auto &optimizedAndDuplicateKeys : sameTranslationNodes_) {
|
||||
Key optimizedKey = optimizedAndDuplicateKeys.first;
|
||||
initial.insert<Point3>(optimizedKey, Point3(0, 0, 0));
|
||||
}
|
||||
}
|
||||
return initial;
|
||||
}
|
||||
|
||||
Values TranslationRecovery::initializeRandomly() const {
|
||||
return initializeRandomly(&kRandomNumberGenerator);
|
||||
Values TranslationRecovery::initializeRandomly(
|
||||
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
|
||||
const Values &initialValues) const {
|
||||
return initializeRandomly(relativeTranslations, &kRandomNumberGenerator,
|
||||
initialValues);
|
||||
}
|
||||
|
||||
Values TranslationRecovery::run(const double scale) const {
|
||||
auto graph = buildGraph();
|
||||
addPrior(scale, &graph);
|
||||
const Values initial = initializeRandomly();
|
||||
LevenbergMarquardtOptimizer lm(graph, initial, params_);
|
||||
Values result = lm.optimize();
|
||||
Values TranslationRecovery::run(
|
||||
const TranslationEdges &relativeTranslations, const double scale,
|
||||
const std::vector<BinaryMeasurement<Point3>> &betweenTranslations,
|
||||
const Values &initialValues) const {
|
||||
// Find edges that have a zero-translation, and recompute relativeTranslations
|
||||
// and betweenTranslations by retaining only one node for every zero-edge.
|
||||
DSFMap<Key> sameTranslationDSFMap =
|
||||
getSameTranslationDSFMap(relativeTranslations);
|
||||
const TranslationEdges nonzeroRelativeTranslations =
|
||||
removeSameTranslationNodes(relativeTranslations, sameTranslationDSFMap);
|
||||
const std::vector<BinaryMeasurement<Point3>> nonzeroBetweenTranslations =
|
||||
removeSameTranslationNodes(betweenTranslations, sameTranslationDSFMap);
|
||||
|
||||
// Nodes that were not optimized are stored in sameTranslationNodes_ as a map
|
||||
// from a key that was optimized to keys that were not optimized. Iterate over
|
||||
// map and add results for keys not optimized.
|
||||
for (const auto &optimizedAndDuplicateKeys : sameTranslationNodes_) {
|
||||
// Create graph of translation factors.
|
||||
NonlinearFactorGraph graph = buildGraph(nonzeroRelativeTranslations);
|
||||
|
||||
// Add global frame prior and scale (either from betweenTranslations or
|
||||
// scale).
|
||||
addPrior(nonzeroRelativeTranslations, scale, nonzeroBetweenTranslations,
|
||||
&graph);
|
||||
|
||||
// Uses initial values from params if provided.
|
||||
Values initial =
|
||||
initializeRandomly(nonzeroRelativeTranslations, initialValues);
|
||||
|
||||
// If there are no valid edges, but zero-distance edges exist, initialize one
|
||||
// of the nodes in a connected component of zero-distance edges.
|
||||
if (initial.empty() && !sameTranslationDSFMap.sets().empty()) {
|
||||
for (const auto &optimizedAndDuplicateKeys : sameTranslationDSFMap.sets()) {
|
||||
Key optimizedKey = optimizedAndDuplicateKeys.first;
|
||||
std::set<Key> duplicateKeys = optimizedAndDuplicateKeys.second;
|
||||
// Add the result for the duplicate key if it does not already exist.
|
||||
for (const Key duplicateKey : duplicateKeys) {
|
||||
if (result.exists(duplicateKey)) continue;
|
||||
result.insert<Point3>(duplicateKey, result.at<Point3>(optimizedKey));
|
||||
initial.insert<Point3>(optimizedKey, Point3(0, 0, 0));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
||||
LevenbergMarquardtOptimizer lm(graph, initial, lmParams_);
|
||||
Values result = lm.optimize();
|
||||
return addSameTranslationNodes(result, sameTranslationDSFMap);
|
||||
}
|
||||
|
||||
TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements(
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
/**
|
||||
* @file TranslationRecovery.h
|
||||
* @author Frank Dellaert
|
||||
* @author Frank Dellaert, Akshay Krishnan
|
||||
* @date March 2020
|
||||
* @brief Recovering translations in an epipolar graph when rotations are given.
|
||||
*/
|
||||
|
@ -57,68 +57,99 @@ class TranslationRecovery {
|
|||
// Translation directions between camera pairs.
|
||||
TranslationEdges relativeTranslations_;
|
||||
|
||||
// Parameters used by the LM Optimizer.
|
||||
LevenbergMarquardtParams params_;
|
||||
|
||||
// Map from a key in the graph to a set of keys that share the same
|
||||
// translation.
|
||||
std::map<Key, std::set<Key>> sameTranslationNodes_;
|
||||
// Parameters.
|
||||
LevenbergMarquardtParams lmParams_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Translation Recovery object
|
||||
*
|
||||
* @param relativeTranslations the relative translations, in world coordinate
|
||||
* frames, vector of BinaryMeasurements of Unit3, where each key of a
|
||||
* measurement is a point in 3D.
|
||||
* @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be
|
||||
* used to modify the parameters for the LM optimizer. By default, uses the
|
||||
* default LM parameters.
|
||||
* @param lmParams parameters for optimization.
|
||||
*/
|
||||
TranslationRecovery(
|
||||
const TranslationEdges &relativeTranslations,
|
||||
const LevenbergMarquardtParams &lmParams = LevenbergMarquardtParams());
|
||||
TranslationRecovery(const LevenbergMarquardtParams &lmParams)
|
||||
: lmParams_(lmParams) {}
|
||||
|
||||
/**
|
||||
* @brief Default constructor.
|
||||
*/
|
||||
TranslationRecovery() = default;
|
||||
|
||||
/**
|
||||
* @brief Build the factor graph to do the optimization.
|
||||
*
|
||||
* @param relativeTranslations unit translation directions between
|
||||
* translations to be estimated
|
||||
* @return NonlinearFactorGraph
|
||||
*/
|
||||
NonlinearFactorGraph buildGraph() const;
|
||||
NonlinearFactorGraph buildGraph(
|
||||
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations) const;
|
||||
|
||||
/**
|
||||
* @brief Add priors on ednpoints of first measurement edge.
|
||||
* @brief Add 3 factors to the graph:
|
||||
* - A prior on the first point to lie at (0, 0, 0)
|
||||
* - If betweenTranslations is non-empty, between factors provided by it.
|
||||
* - If betweenTranslations is empty, a prior on scale of the first
|
||||
* relativeTranslations edge.
|
||||
*
|
||||
* @param relativeTranslations unit translation directions between
|
||||
* translations to be estimated
|
||||
* @param scale scale for first relative translation which fixes gauge.
|
||||
* @param graph factor graph to which prior is added.
|
||||
* @param betweenTranslations relative translations (with scale) between 2
|
||||
* points in world coordinate frame known a priori.
|
||||
* @param priorNoiseModel the noise model to use with the prior.
|
||||
*/
|
||||
void addPrior(const double scale, NonlinearFactorGraph *graph,
|
||||
void addPrior(
|
||||
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
|
||||
const double scale,
|
||||
const std::vector<BinaryMeasurement<Point3>> &betweenTranslations,
|
||||
NonlinearFactorGraph *graph,
|
||||
const SharedNoiseModel &priorNoiseModel =
|
||||
noiseModel::Isotropic::Sigma(3, 0.01)) const;
|
||||
|
||||
/**
|
||||
* @brief Create random initial translations.
|
||||
*
|
||||
* @param relativeTranslations unit translation directions between
|
||||
* translations to be estimated
|
||||
* @param rng random number generator
|
||||
* @param intialValues (optional) initial values from a prior
|
||||
* @return Values
|
||||
*/
|
||||
Values initializeRandomly(std::mt19937 *rng) const;
|
||||
Values initializeRandomly(
|
||||
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
|
||||
std::mt19937 *rng, const Values &initialValues = Values()) const;
|
||||
|
||||
/**
|
||||
* @brief Version of initializeRandomly with a fixed seed.
|
||||
*
|
||||
* @param relativeTranslations unit translation directions between
|
||||
* translations to be estimated
|
||||
* @param initialValues (optional) initial values from a prior
|
||||
* @return Values
|
||||
*/
|
||||
Values initializeRandomly() const;
|
||||
Values initializeRandomly(
|
||||
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
|
||||
const Values &initialValues = Values()) const;
|
||||
|
||||
/**
|
||||
* @brief Build and optimize factor graph.
|
||||
*
|
||||
* @param relativeTranslations the relative translations, in world coordinate
|
||||
* frames, vector of BinaryMeasurements of Unit3, where each key of a
|
||||
* measurement is a point in 3D.
|
||||
* @param scale scale for first relative translation which fixes gauge.
|
||||
* The scale is only used if betweenTranslations is empty.
|
||||
* @param betweenTranslations relative translations (with scale) between 2
|
||||
* points in world coordinate frame known a priori.
|
||||
* @param initialValues intial values for optimization. Initializes randomly
|
||||
* if not provided.
|
||||
* @return Values
|
||||
*/
|
||||
Values run(const double scale = 1.0) const;
|
||||
Values run(
|
||||
const TranslationEdges &relativeTranslations, const double scale = 1.0,
|
||||
const std::vector<BinaryMeasurement<Point3>> &betweenTranslations = {},
|
||||
const Values &initialValues = Values()) const;
|
||||
|
||||
/**
|
||||
* @brief Simulate translation direction measurements
|
||||
|
|
|
@ -4,12 +4,16 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/sfm/SfmTrack.h>
|
||||
class SfmTrack {
|
||||
SfmTrack();
|
||||
SfmTrack(const gtsam::Point3& pt);
|
||||
const Point3& point3() const;
|
||||
|
||||
Point3 p;
|
||||
|
||||
double r;
|
||||
double g;
|
||||
double b;
|
||||
|
@ -34,12 +38,15 @@ class SfmData {
|
|||
static gtsam::SfmData FromBundlerFile(string filename);
|
||||
static gtsam::SfmData FromBalFile(string filename);
|
||||
|
||||
std::vector<gtsam::SfmTrack>& trackList() const;
|
||||
std::vector<gtsam::PinholeCamera<gtsam::Cal3Bundler>>& cameraList() const;
|
||||
|
||||
void addTrack(const gtsam::SfmTrack& t);
|
||||
void addCamera(const gtsam::SfmCamera& cam);
|
||||
size_t numberTracks() const;
|
||||
size_t numberCameras() const;
|
||||
gtsam::SfmTrack track(size_t idx) const;
|
||||
gtsam::PinholeCamera<gtsam::Cal3Bundler> camera(size_t idx) const;
|
||||
gtsam::SfmTrack& track(size_t idx) const;
|
||||
gtsam::PinholeCamera<gtsam::Cal3Bundler>& camera(size_t idx) const;
|
||||
|
||||
gtsam::NonlinearFactorGraph generalSfmFactors(
|
||||
const gtsam::SharedNoiseModel& model =
|
||||
|
@ -83,6 +90,7 @@ class BinaryMeasurement {
|
|||
|
||||
typedef gtsam::BinaryMeasurement<gtsam::Unit3> BinaryMeasurementUnit3;
|
||||
typedef gtsam::BinaryMeasurement<gtsam::Rot3> BinaryMeasurementRot3;
|
||||
typedef gtsam::BinaryMeasurement<gtsam::Point3> BinaryMeasurementPoint3;
|
||||
|
||||
class BinaryMeasurementsUnit3 {
|
||||
BinaryMeasurementsUnit3();
|
||||
|
@ -91,6 +99,20 @@ class BinaryMeasurementsUnit3 {
|
|||
void push_back(const gtsam::BinaryMeasurement<gtsam::Unit3>& measurement);
|
||||
};
|
||||
|
||||
class BinaryMeasurementsPoint3 {
|
||||
BinaryMeasurementsPoint3();
|
||||
size_t size() const;
|
||||
gtsam::BinaryMeasurement<gtsam::Point3> at(size_t idx) const;
|
||||
void push_back(const gtsam::BinaryMeasurement<gtsam::Point3>& measurement);
|
||||
};
|
||||
|
||||
class BinaryMeasurementsRot3 {
|
||||
BinaryMeasurementsRot3();
|
||||
size_t size() const;
|
||||
gtsam::BinaryMeasurement<gtsam::Rot3> at(size_t idx) const;
|
||||
void push_back(const gtsam::BinaryMeasurement<gtsam::Rot3>& measurement);
|
||||
};
|
||||
|
||||
#include <gtsam/sfm/ShonanAveraging.h>
|
||||
|
||||
// TODO(frank): copy/pasta below until we have integer template paremeters in
|
||||
|
@ -184,6 +206,10 @@ class ShonanAveraging2 {
|
|||
};
|
||||
|
||||
class ShonanAveraging3 {
|
||||
ShonanAveraging3(
|
||||
const std::vector<gtsam::BinaryMeasurement<gtsam::Rot3>>& measurements,
|
||||
const gtsam::ShonanAveragingParameters3& parameters =
|
||||
gtsam::ShonanAveragingParameters3());
|
||||
ShonanAveraging3(string g2oFile);
|
||||
ShonanAveraging3(string g2oFile,
|
||||
const gtsam::ShonanAveragingParameters3& parameters);
|
||||
|
@ -252,15 +278,36 @@ class MFAS {
|
|||
};
|
||||
|
||||
#include <gtsam/sfm/TranslationRecovery.h>
|
||||
|
||||
class TranslationRecovery {
|
||||
TranslationRecovery(
|
||||
TranslationRecovery(const gtsam::LevenbergMarquardtParams& lmParams);
|
||||
TranslationRecovery(); // default params.
|
||||
void addPrior(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
|
||||
const double scale,
|
||||
const gtsam::BinaryMeasurementsPoint3& betweenTranslations,
|
||||
gtsam::NonlinearFactorGraph @graph,
|
||||
const gtsam::SharedNoiseModel& priorNoiseModel) const;
|
||||
void addPrior(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
|
||||
const double scale,
|
||||
const gtsam::BinaryMeasurementsPoint3& betweenTranslations,
|
||||
gtsam::NonlinearFactorGraph @graph) const;
|
||||
gtsam::NonlinearFactorGraph buildGraph(
|
||||
const gtsam::BinaryMeasurementsUnit3& relativeTranslations) const;
|
||||
gtsam::Values run(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
|
||||
const double scale,
|
||||
const gtsam::BinaryMeasurementsPoint3& betweenTranslations,
|
||||
const gtsam::Values& initialValues) const;
|
||||
// default random initial values
|
||||
gtsam::Values run(
|
||||
const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
|
||||
const gtsam::LevenbergMarquardtParams& lmParams);
|
||||
TranslationRecovery(
|
||||
const gtsam::BinaryMeasurementsUnit3&
|
||||
relativeTranslations); // default LevenbergMarquardtParams
|
||||
gtsam::Values run(const double scale) const;
|
||||
gtsam::Values run() const; // default scale = 1.0
|
||||
const double scale,
|
||||
const gtsam::BinaryMeasurementsPoint3& betweenTranslations) const;
|
||||
// default empty betweenTranslations
|
||||
gtsam::Values run(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
|
||||
const double scale) const;
|
||||
// default scale = 1.0, empty betweenTranslations
|
||||
gtsam::Values run(
|
||||
const gtsam::BinaryMeasurementsUnit3& relativeTranslations) const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -40,8 +40,7 @@ T FindKarcherMeanImpl(const vector<T, ALLOC>& rotations) {
|
|||
return result.at<T>(kKey);
|
||||
}
|
||||
|
||||
template <class T,
|
||||
typename = typename std::enable_if< std::is_same<gtsam::Rot3, T>::value >::type >
|
||||
template <class T>
|
||||
T FindKarcherMean(const std::vector<T>& rotations) {
|
||||
return FindKarcherMeanImpl(rotations);
|
||||
}
|
||||
|
|
|
@ -223,6 +223,8 @@ parse3DFactors(const std::string &filename,
|
|||
size_t maxIndex = 0);
|
||||
|
||||
using BinaryMeasurementsUnit3 = std::vector<BinaryMeasurement<Unit3>>;
|
||||
using BinaryMeasurementsPoint3 = std::vector<BinaryMeasurement<Point3>>;
|
||||
using BinaryMeasurementsRot3 = std::vector<BinaryMeasurement<Rot3>>;
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
inline boost::optional<IndexedPose> GTSAM_DEPRECATED
|
||||
|
|
|
@ -90,6 +90,22 @@ typedef gtsam::GeneralSFMFactor<gtsam::PinholeCamera<gtsam::Cal3Unified>,
|
|||
gtsam::Point3>
|
||||
GeneralSFMFactorCal3Unified;
|
||||
|
||||
typedef gtsam::GeneralSFMFactor<gtsam::PinholePose<gtsam::Cal3_S2>,
|
||||
gtsam::Point3>
|
||||
GeneralSFMFactorPoseCal3_S2;
|
||||
typedef gtsam::GeneralSFMFactor<gtsam::PinholePose<gtsam::Cal3DS2>,
|
||||
gtsam::Point3>
|
||||
GeneralSFMFactorPoseCal3DS2;
|
||||
typedef gtsam::GeneralSFMFactor<gtsam::PinholePose<gtsam::Cal3Bundler>,
|
||||
gtsam::Point3>
|
||||
GeneralSFMFactorPoseCal3Bundler;
|
||||
typedef gtsam::GeneralSFMFactor<gtsam::PinholePose<gtsam::Cal3Fisheye>,
|
||||
gtsam::Point3>
|
||||
GeneralSFMFactorPoseCal3Fisheye;
|
||||
typedef gtsam::GeneralSFMFactor<gtsam::PinholePose<gtsam::Cal3Unified>,
|
||||
gtsam::Point3>
|
||||
GeneralSFMFactorPoseCal3Unified;
|
||||
|
||||
template <CALIBRATION = {gtsam::Cal3_S2, gtsam::Cal3DS2, gtsam::Cal3Bundler,
|
||||
gtsam::Cal3Fisheye, gtsam::Cal3Unified}>
|
||||
virtual class GeneralSFMFactor2 : gtsam::NoiseModelFactor {
|
||||
|
|
|
@ -5,12 +5,16 @@
|
|||
* @date Nov 4, 2014
|
||||
*/
|
||||
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/nonlinear/PriorFactor.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/navigation/ImuBias.h>
|
||||
#include <gtsam/nonlinear/PriorFactor.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace std::placeholders;
|
||||
using namespace gtsam;
|
||||
using namespace imuBias;
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
@ -28,11 +32,39 @@ TEST(PriorFactor, ConstructorVector3) {
|
|||
|
||||
// Constructor dynamic sized vector
|
||||
TEST(PriorFactor, ConstructorDynamicSizeVector) {
|
||||
Vector v(5); v << 1, 2, 3, 4, 5;
|
||||
Vector v(5);
|
||||
v << 1, 2, 3, 4, 5;
|
||||
SharedNoiseModel model = noiseModel::Isotropic::Sigma(5, 1.0);
|
||||
PriorFactor<Vector> factor(1, v, model);
|
||||
}
|
||||
|
||||
Vector callEvaluateError(const PriorFactor<ConstantBias>& factor,
|
||||
const ConstantBias& bias) {
|
||||
return factor.evaluateError(bias);
|
||||
}
|
||||
|
||||
// Test for imuBias::ConstantBias
|
||||
TEST(PriorFactor, ConstantBias) {
|
||||
Vector3 biasAcc(1, 2, 3);
|
||||
Vector3 biasGyro(0.1, 0.2, 0.3);
|
||||
ConstantBias bias(biasAcc, biasGyro);
|
||||
|
||||
PriorFactor<ConstantBias> factor(1, bias,
|
||||
noiseModel::Isotropic::Sigma(6, 0.1));
|
||||
Values values;
|
||||
values.insert(1, bias);
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0.0, factor.error(values), 1e-8);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
|
||||
ConstantBias incorrectBias(
|
||||
(Vector6() << 1.1, 2.1, 3.1, 0.2, 0.3, 0.4).finished());
|
||||
values.clear();
|
||||
values.insert(1, incorrectBias);
|
||||
EXPECT_DOUBLES_EQUAL(3.0, factor.error(values), 1e-8);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
@ -104,16 +104,16 @@ TEST(SymbolicBayesNet, Dot) {
|
|||
"digraph {\n"
|
||||
" size=\"5,5\";\n"
|
||||
"\n"
|
||||
" vara1[label=\"a1\", pos=\"1,2!\", shape=box];\n"
|
||||
" vara2[label=\"a2\", pos=\"2,2!\", shape=box];\n"
|
||||
" varx1[label=\"x1\", pos=\"1,1!\"];\n"
|
||||
" varx2[label=\"x2\", pos=\"2,1!\"];\n"
|
||||
" varx3[label=\"x3\", pos=\"3,1!\"];\n"
|
||||
" var6989586621679009793[label=\"a1\", pos=\"1,2!\", shape=box];\n"
|
||||
" var6989586621679009794[label=\"a2\", pos=\"2,2!\", shape=box];\n"
|
||||
" var8646911284551352321[label=\"x1\", pos=\"1,1!\"];\n"
|
||||
" var8646911284551352322[label=\"x2\", pos=\"2,1!\"];\n"
|
||||
" var8646911284551352323[label=\"x3\", pos=\"3,1!\"];\n"
|
||||
"\n"
|
||||
" varx1->varx2\n"
|
||||
" vara1->varx2\n"
|
||||
" varx2->varx3\n"
|
||||
" vara2->varx3\n"
|
||||
" var8646911284551352321->var8646911284551352322\n"
|
||||
" var6989586621679009793->var8646911284551352322\n"
|
||||
" var8646911284551352322->var8646911284551352323\n"
|
||||
" var6989586621679009794->var8646911284551352323\n"
|
||||
"}");
|
||||
}
|
||||
|
||||
|
|
|
@ -797,4 +797,30 @@ virtual class ProjectionFactorPPPC : gtsam::NoiseModelFactor {
|
|||
typedef gtsam::ProjectionFactorPPPC<gtsam::Pose3, gtsam::Point3, gtsam::Cal3_S2> ProjectionFactorPPPCCal3_S2;
|
||||
typedef gtsam::ProjectionFactorPPPC<gtsam::Pose3, gtsam::Point3, gtsam::Cal3DS2> ProjectionFactorPPPCCal3DS2;
|
||||
|
||||
#include <gtsam_unstable/slam/ProjectionFactorRollingShutter.h>
|
||||
virtual class ProjectionFactorRollingShutter : gtsam::NoiseModelFactor {
|
||||
ProjectionFactorRollingShutter(const gtsam::Point2& measured, double alpha, const gtsam::noiseModel::Base* noiseModel,
|
||||
size_t poseKey_a, size_t poseKey_b, size_t pointKey, const gtsam::Cal3_S2* K);
|
||||
|
||||
ProjectionFactorRollingShutter(const gtsam::Point2& measured, double alpha, const gtsam::noiseModel::Base* noiseModel,
|
||||
size_t poseKey_a, size_t poseKey_b, size_t pointKey, const gtsam::Cal3_S2* K, gtsam::Pose3& body_P_sensor);
|
||||
|
||||
ProjectionFactorRollingShutter(const gtsam::Point2& measured, double alpha, const gtsam::noiseModel::Base* noiseModel,
|
||||
size_t poseKey_a, size_t poseKey_b, size_t pointKey, const gtsam::Cal3_S2* K, bool throwCheirality,
|
||||
bool verboseCheirality);
|
||||
|
||||
ProjectionFactorRollingShutter(const gtsam::Point2& measured, double alpha, const gtsam::noiseModel::Base* noiseModel,
|
||||
size_t poseKey_a, size_t poseKey_b, size_t pointKey, const gtsam::Cal3_S2* K, bool throwCheirality,
|
||||
bool verboseCheirality, gtsam::Pose3& body_P_sensor);
|
||||
|
||||
gtsam::Point2 measured() const;
|
||||
double alpha() const;
|
||||
gtsam::Cal3_S2* calibration() const;
|
||||
bool verboseCheirality() const;
|
||||
bool throwCheirality() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
} //\namespace gtsam
|
||||
|
|
|
@ -92,7 +92,7 @@ public:
|
|||
} catch( CheiralityException& e) {
|
||||
if (H1) *H1 = Matrix::Zero(2,6);
|
||||
if (H2) *H2 = Matrix::Zero(2,5);
|
||||
if (H3) *H2 = Matrix::Zero(2,1);
|
||||
if (H3) *H3 = Matrix::Zero(2,1);
|
||||
std::cout << e.what() << ": Landmark "<< DefaultKeyFormatter(this->key2()) <<
|
||||
" moved behind camera " << DefaultKeyFormatter(this->key1()) << std::endl;
|
||||
return Vector::Ones(2) * 2.0 * K_->fx();
|
||||
|
|
|
@ -1,8 +1,18 @@
|
|||
/*
|
||||
* testInvDepthFactor.cpp
|
||||
/* ----------------------------------------------------------------------------
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
* See LICENSE for the license information
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testInvDepthFactor3.cpp
|
||||
* @brief Unit tests inverse depth parametrization
|
||||
*
|
||||
* Created on: Apr 13, 2012
|
||||
* Author: cbeall3
|
||||
* @author cbeall3
|
||||
* @author Dominik Van Opdenbosch
|
||||
* @date Apr 13, 2012
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
@ -12,6 +22,7 @@
|
|||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
|
||||
#include <gtsam_unstable/slam/InvDepthFactor3.h>
|
||||
|
||||
|
@ -28,6 +39,11 @@ PinholeCamera<Cal3_S2> level_camera(level_pose, *K);
|
|||
typedef InvDepthFactor3<Pose3, Vector5, double> InverseDepthFactor;
|
||||
typedef NonlinearEquality<Pose3> PoseConstraint;
|
||||
|
||||
Matrix factorError(const Pose3& pose, const Vector5& point, double invDepth,
|
||||
const InverseDepthFactor& factor) {
|
||||
return factor.evaluateError(pose, point, invDepth);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( InvDepthFactor, optimize) {
|
||||
|
||||
|
@ -92,6 +108,55 @@ TEST( InvDepthFactor, optimize) {
|
|||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( InvDepthFactor, Jacobian3D ) {
|
||||
|
||||
// landmark 5 meters infront of camera (camera center at (0,0,1))
|
||||
Point3 landmark(5, 0, 1);
|
||||
|
||||
// get expected projection using pinhole camera
|
||||
Point2 expected_uv = level_camera.project(landmark);
|
||||
|
||||
// get expected landmark representation using backprojection
|
||||
double inv_depth;
|
||||
Vector5 inv_landmark;
|
||||
InvDepthCamera3<Cal3_S2> inv_camera(level_pose, K);
|
||||
std::tie(inv_landmark, inv_depth) = inv_camera.backproject(expected_uv, 5);
|
||||
Vector5 expected_inv_landmark((Vector(5) << 0., 0., 1., 0., 0.).finished());
|
||||
|
||||
CHECK(assert_equal(expected_inv_landmark, inv_landmark, 1e-6));
|
||||
CHECK(assert_equal(inv_depth, 1./5, 1e-6));
|
||||
|
||||
Symbol poseKey('x',1);
|
||||
Symbol pointKey('l',1);
|
||||
Symbol invDepthKey('d',1);
|
||||
InverseDepthFactor factor(expected_uv, sigma, poseKey, pointKey, invDepthKey, K);
|
||||
|
||||
std::vector<Matrix> actualHs(3);
|
||||
factor.unwhitenedError({{poseKey, genericValue(level_pose)},
|
||||
{pointKey, genericValue(inv_landmark)},
|
||||
{invDepthKey,genericValue(inv_depth)}},
|
||||
actualHs);
|
||||
|
||||
const Matrix& H1Actual = actualHs.at(0);
|
||||
const Matrix& H2Actual = actualHs.at(1);
|
||||
const Matrix& H3Actual = actualHs.at(2);
|
||||
|
||||
// Use numerical derivatives to verify the Jacobians
|
||||
Matrix H1Expected, H2Expected, H3Expected;
|
||||
|
||||
std::function<Matrix(const Pose3 &, const Vector5 &, const double &)>
|
||||
func = std::bind(&factorError, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, factor);
|
||||
H1Expected = numericalDerivative31(func, level_pose, inv_landmark, inv_depth);
|
||||
H2Expected = numericalDerivative32(func, level_pose, inv_landmark, inv_depth);
|
||||
H3Expected = numericalDerivative33(func, level_pose, inv_landmark, inv_depth);
|
||||
|
||||
// Verify the Jacobians
|
||||
CHECK(assert_equal(H1Expected, H1Actual, 1e-6))
|
||||
CHECK(assert_equal(H2Expected, H2Actual, 1e-6))
|
||||
CHECK(assert_equal(H3Expected, H3Actual, 1e-6))
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -47,7 +47,9 @@ set(ignore
|
|||
gtsam::Pose3Vector
|
||||
gtsam::Rot3Vector
|
||||
gtsam::KeyVector
|
||||
gtsam::BinaryMeasurementsPoint3
|
||||
gtsam::BinaryMeasurementsUnit3
|
||||
gtsam::BinaryMeasurementsRot3
|
||||
gtsam::DiscreteKey
|
||||
gtsam::KeyPairDoubleMap)
|
||||
|
||||
|
@ -98,11 +100,23 @@ set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam)
|
|||
copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
|
||||
"${GTSAM_MODULE_PATH}")
|
||||
|
||||
# Hack to get python test files copied every time they are modified
|
||||
# Hack to get python test and util files copied every time they are modified
|
||||
file(GLOB GTSAM_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/tests/*.py")
|
||||
foreach(test_file ${GTSAM_PYTHON_TEST_FILES})
|
||||
configure_file(${test_file} "${GTSAM_MODULE_PATH}/tests/${test_file}" COPYONLY)
|
||||
endforeach()
|
||||
file(GLOB GTSAM_PYTHON_UTIL_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/utils/*.py")
|
||||
foreach(util_file ${GTSAM_PYTHON_UTIL_FILES})
|
||||
configure_file(${util_file} "${GTSAM_MODULE_PATH}/utils/${test_file}" COPYONLY)
|
||||
endforeach()
|
||||
file(GLOB GTSAM_PYTHON_PREAMBLE_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/preamble/*.h")
|
||||
foreach(util_file ${GTSAM_PYTHON_PREAMBLE_FILES})
|
||||
configure_file(${util_file} "${GTSAM_MODULE_PATH}/preamble/${test_file}" COPYONLY)
|
||||
endforeach()
|
||||
file(GLOB GTSAM_PYTHON_SPECIALIZATION_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/specializations/*.h")
|
||||
foreach(util_file ${GTSAM_PYTHON_SPECIALIZATION_FILES})
|
||||
configure_file(${util_file} "${GTSAM_MODULE_PATH}/specializations/${test_file}" COPYONLY)
|
||||
endforeach()
|
||||
|
||||
# Common directory for data/datasets stored with the package.
|
||||
# This will store the data in the Python site package directly.
|
||||
|
@ -124,7 +138,9 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
gtsam::Pose3Vector
|
||||
gtsam::KeyVector
|
||||
gtsam::FixedLagSmootherKeyTimestampMapValue
|
||||
gtsam::BinaryMeasurementsPoint3
|
||||
gtsam::BinaryMeasurementsUnit3
|
||||
gtsam::BinaryMeasurementsRot3
|
||||
gtsam::CameraSetCal3_S2
|
||||
gtsam::CameraSetCal3Bundler
|
||||
gtsam::CameraSetCal3Unified
|
||||
|
@ -160,7 +176,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
|
||||
# Hack to get python test files copied every time they are modified
|
||||
file(GLOB GTSAM_UNSTABLE_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable/tests/*.py")
|
||||
foreach(test_file ${GTSAM_PYTHON_TEST_FILES})
|
||||
foreach(test_file ${GTSAM_UNSTABLE_PYTHON_TEST_FILES})
|
||||
configure_file(${test_file} "${GTSAM_UNSTABLE_MODULE_PATH}/tests/${test_file}" COPYONLY)
|
||||
endforeach()
|
||||
|
||||
|
@ -172,7 +188,7 @@ endif()
|
|||
# Add custom target so we can install with `make python-install`
|
||||
set(GTSAM_PYTHON_INSTALL_TARGET python-install)
|
||||
add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET}
|
||||
COMMAND ${PYTHON_EXECUTABLE} -m pip install --user .
|
||||
COMMAND ${PYTHON_EXECUTABLE} -m pip install .
|
||||
DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}
|
||||
WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY})
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ For instructions on updating the version of the [wrap library](https://github.co
|
|||
|
||||
## Requirements
|
||||
|
||||
- Cmake >= 3.15
|
||||
- If you want to build the GTSAM python library for a specific python version (eg 3.6),
|
||||
use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used.
|
||||
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment),
|
||||
|
|
|
@ -123,7 +123,7 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3,
|
|||
w_iZj_inliers = filter_outliers(w_iZj_list)
|
||||
|
||||
# Run the optimizer to obtain translations for normalized directions.
|
||||
wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run()
|
||||
wtc_values = gtsam.TranslationRecovery().run(w_iZj_inliers)
|
||||
|
||||
wTc_values = gtsam.Values()
|
||||
for key in wRc_values.keys():
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Ellipse Scaling\n",
|
||||
"\n",
|
||||
"The code to calculate the percentages included in ellipses with various values of \"k\" in `plot.py`.\n",
|
||||
"\n",
|
||||
"Thanks to @senselessDev, January 26, for providing the code in [PR #1067](https://github.com/borglab/gtsam/pull/1067)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import scipy\n",
|
||||
"import scipy.stats\n",
|
||||
"import numpy as np"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def pct_to_sigma(pct, dof):\n",
|
||||
" return np.sqrt(scipy.stats.chi2.ppf(pct / 100., df=dof))\n",
|
||||
"\n",
|
||||
"def sigma_to_pct(sigma, dof):\n",
|
||||
" return scipy.stats.chi2.cdf(sigma**2, df=dof) * 100."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0D\t 1 \t 2 \t 3 \t 4 \t 5 \n",
|
||||
"1D\t68.26895%\t95.44997%\t99.73002%\t99.99367%\t99.99994%\n",
|
||||
"2D\t39.34693%\t86.46647%\t98.88910%\t99.96645%\t99.99963%\n",
|
||||
"3D\t19.87480%\t73.85359%\t97.07091%\t99.88660%\t99.99846%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for dof in range(0, 4):\n",
|
||||
" print(\"{}D\".format(dof), end=\"\")\n",
|
||||
" for sigma in range(1, 6):\n",
|
||||
" if dof == 0: print(\"\\t {} \".format(sigma), end=\"\")\n",
|
||||
" else: print(\"\\t{:.5f}%\".format(sigma_to_pct(sigma, dof)), end=\"\")\n",
|
||||
" print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"1D\n",
|
||||
"\n",
|
||||
"pct=50.0 -> sigma=0.674489750196\n",
|
||||
"pct=95.0 -> sigma=1.959963984540\n",
|
||||
"pct=99.0 -> sigma=2.575829303549\n",
|
||||
"\n",
|
||||
"2D\n",
|
||||
"\n",
|
||||
"pct=50.0 -> sigma=1.177410022515\n",
|
||||
"pct=95.0 -> sigma=2.447746830681\n",
|
||||
"pct=99.0 -> sigma=3.034854258770\n",
|
||||
"\n",
|
||||
"3D\n",
|
||||
"\n",
|
||||
"pct=50.0 -> sigma=1.538172254455\n",
|
||||
"pct=95.0 -> sigma=2.795483482915\n",
|
||||
"pct=99.0 -> sigma=3.368214175219\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"for dof in range(1, 4):\n",
|
||||
" print(\"{}D\\n\".format(dof))\n",
|
||||
" for pct in [50, 95, 99]:\n",
|
||||
" print(\"pct={:.1f} -> sigma={:.12f}\".format(pct, pct_to_sigma(pct, dof)))\n",
|
||||
" print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"interpreter": {
|
||||
"hash": "4d608302ba82e7596903db5446e6fa05f049271852e8cc6e1cafaafe5fbd9fed"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.8.13 ('gtsfm-v1')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
|
@ -23,8 +23,8 @@ PYBIND11_MAKE_OPAQUE(
|
|||
std::vector<gtsam::Point2, Eigen::aligned_allocator<gtsam::Point2>>);
|
||||
PYBIND11_MAKE_OPAQUE(gtsam::Point2Pairs);
|
||||
PYBIND11_MAKE_OPAQUE(gtsam::Point3Pairs);
|
||||
PYBIND11_MAKE_OPAQUE(gtsam::Pose2Pairs);
|
||||
PYBIND11_MAKE_OPAQUE(gtsam::Pose3Pairs);
|
||||
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Pose3>);
|
||||
PYBIND11_MAKE_OPAQUE(
|
||||
gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Bundler>>);
|
||||
PYBIND11_MAKE_OPAQUE(gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Bundler>>);
|
||||
PYBIND11_MAKE_OPAQUE(gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3_S2>>);
|
||||
|
|
|
@ -10,3 +10,17 @@
|
|||
* Without this they will be automatically converted to a Python object, and all
|
||||
* mutations on Python side will not be reflected on C++.
|
||||
*/
|
||||
|
||||
// Including <stl.h> can cause some serious hard-to-debug bugs!!!
|
||||
// #include <pybind11/stl.h>
|
||||
#include <pybind11/stl_bind.h>
|
||||
|
||||
PYBIND11_MAKE_OPAQUE(
|
||||
std::vector<gtsam::SfmTrack>);
|
||||
|
||||
PYBIND11_MAKE_OPAQUE(
|
||||
std::vector<gtsam::SfmCamera>);
|
||||
PYBIND11_MAKE_OPAQUE(
|
||||
std::vector<gtsam::BinaryMeasurement<gtsam::Unit3>>);
|
||||
PYBIND11_MAKE_OPAQUE(
|
||||
std::vector<gtsam::BinaryMeasurement<gtsam::Rot3>>);
|
||||
|
|
|
@ -16,10 +16,13 @@ py::bind_vector<
|
|||
m_, "Point2Vector");
|
||||
py::bind_vector<std::vector<gtsam::Point2Pair>>(m_, "Point2Pairs");
|
||||
py::bind_vector<std::vector<gtsam::Point3Pair>>(m_, "Point3Pairs");
|
||||
py::bind_vector<std::vector<gtsam::Pose2Pair>>(m_, "Pose2Pairs");
|
||||
py::bind_vector<std::vector<gtsam::Pose3Pair>>(m_, "Pose3Pairs");
|
||||
py::bind_vector<std::vector<gtsam::Pose3>>(m_, "Pose3Vector");
|
||||
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3_S2>>>(
|
||||
m_, "CameraSetCal3_S2");
|
||||
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3DS2>>>(
|
||||
m_, "CameraSetCal3DS2");
|
||||
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Bundler>>>(
|
||||
m_, "CameraSetCal3Bundler");
|
||||
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Unified>>>(
|
||||
|
|
|
@ -11,6 +11,23 @@
|
|||
* and saves one copy operation.
|
||||
*/
|
||||
|
||||
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Point3> > >(
|
||||
m_, "BinaryMeasurementsPoint3");
|
||||
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Unit3> > >(
|
||||
m_, "BinaryMeasurementsUnit3");
|
||||
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Rot3> > >(
|
||||
m_, "BinaryMeasurementsRot3");
|
||||
py::bind_map<gtsam::KeyPairDoubleMap>(m_, "KeyPairDoubleMap");
|
||||
|
||||
py::bind_vector<
|
||||
std::vector<gtsam::SfmTrack> >(
|
||||
m_, "SfmTracks");
|
||||
|
||||
py::bind_vector<
|
||||
std::vector<gtsam::SfmCamera> >(
|
||||
m_, "SfmCameras");
|
||||
|
||||
py::bind_vector<
|
||||
std::vector<std::pair<size_t, gtsam::Point2>>>(
|
||||
m_, "SfmMeasurementVector"
|
||||
);
|
||||
|
|
|
@ -139,6 +139,17 @@ class TestCal3Unified(GtsamTestCase):
|
|||
self.gtsamAssertEquals(z, np.zeros(2))
|
||||
self.gtsamAssertEquals(H @ H.T, 4*np.eye(2))
|
||||
|
||||
Dcal = np.zeros((2, 10), order='F')
|
||||
Dp = np.zeros((2, 2), order='F')
|
||||
camera.calibrate(img_point, Dcal, Dp)
|
||||
|
||||
self.gtsamAssertEquals(Dcal, np.array(
|
||||
[[ 0., 0., 0., -1., 0., 0., 0., 0., 0., 0.],
|
||||
[ 0., 0., 0., 0., -1., 0., 0., 0., 0., 0.]]))
|
||||
self.gtsamAssertEquals(Dp, np.array(
|
||||
[[ 1., -0.],
|
||||
[-0., 1.]]))
|
||||
|
||||
@unittest.skip("triangulatePoint3 currently seems to require perspective projections.")
|
||||
def test_triangulation(self):
|
||||
"""Estimate spatial point from image measurements"""
|
||||
|
|
|
@ -11,12 +11,12 @@ Author: Frank Dellaert
|
|||
|
||||
# pylint: disable=no-name-in-module, invalid-name
|
||||
|
||||
import unittest
|
||||
import textwrap
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import (DiscreteBayesNet, DiscreteConditional, DiscreteFactorGraph,
|
||||
DiscreteKeys, DiscreteDistribution, DiscreteValues, Ordering)
|
||||
from gtsam import (DiscreteBayesNet, DiscreteConditional, DiscreteDistribution,
|
||||
DiscreteFactorGraph, DiscreteKeys, DiscreteValues, Ordering)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
# Some keys:
|
||||
|
@ -152,10 +152,10 @@ class TestDiscreteBayesNet(GtsamTestCase):
|
|||
var4[label="4"];
|
||||
var5[label="5"];
|
||||
var6[label="6"];
|
||||
vara0[label="a0", pos="0,2!"];
|
||||
var6989586621679009792[label="a0", pos="0,2!"];
|
||||
|
||||
var4->var6
|
||||
vara0->var3
|
||||
var6989586621679009792->var3
|
||||
var3->var5
|
||||
var6->var5
|
||||
}"""
|
||||
|
|
|
@ -15,12 +15,10 @@ from __future__ import print_function
|
|||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import (DoglegOptimizer, DoglegParams,
|
||||
DummyPreconditionerParameters, GaussNewtonOptimizer,
|
||||
GaussNewtonParams, GncLMParams, GncLMOptimizer,
|
||||
LevenbergMarquardtOptimizer, LevenbergMarquardtParams,
|
||||
NonlinearFactorGraph, Ordering,
|
||||
PCGSolverParameters, Point2, PriorFactorPoint2, Values)
|
||||
from gtsam import (DoglegOptimizer, DoglegParams, DummyPreconditionerParameters,
|
||||
GaussNewtonOptimizer, GaussNewtonParams, GncLMParams, GncLMOptimizer,
|
||||
LevenbergMarquardtOptimizer, LevenbergMarquardtParams, NonlinearFactorGraph,
|
||||
Ordering, PCGSolverParameters, Point2, PriorFactorPoint2, Values)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
KEY1 = 1
|
||||
|
@ -28,63 +26,83 @@ KEY2 = 2
|
|||
|
||||
|
||||
class TestScenario(GtsamTestCase):
|
||||
def test_optimize(self):
|
||||
"""Do trivial test with three optimizer variants."""
|
||||
fg = NonlinearFactorGraph()
|
||||
|
||||
def setUp(self):
|
||||
"""Set up the optimization problem and ordering"""
|
||||
# create graph
|
||||
self.fg = NonlinearFactorGraph()
|
||||
model = gtsam.noiseModel.Unit.Create(2)
|
||||
fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model))
|
||||
self.fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model))
|
||||
|
||||
# test error at minimum
|
||||
xstar = Point2(0, 0)
|
||||
optimal_values = Values()
|
||||
optimal_values.insert(KEY1, xstar)
|
||||
self.assertEqual(0.0, fg.error(optimal_values), 0.0)
|
||||
self.optimal_values = Values()
|
||||
self.optimal_values.insert(KEY1, xstar)
|
||||
self.assertEqual(0.0, self.fg.error(self.optimal_values), 0.0)
|
||||
|
||||
# test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
|
||||
x0 = Point2(3, 3)
|
||||
initial_values = Values()
|
||||
initial_values.insert(KEY1, x0)
|
||||
self.assertEqual(9.0, fg.error(initial_values), 1e-3)
|
||||
self.initial_values = Values()
|
||||
self.initial_values.insert(KEY1, x0)
|
||||
self.assertEqual(9.0, self.fg.error(self.initial_values), 1e-3)
|
||||
|
||||
# optimize parameters
|
||||
ordering = Ordering()
|
||||
ordering.push_back(KEY1)
|
||||
self.ordering = Ordering()
|
||||
self.ordering.push_back(KEY1)
|
||||
|
||||
# Gauss-Newton
|
||||
def test_gauss_newton(self):
|
||||
gnParams = GaussNewtonParams()
|
||||
gnParams.setOrdering(ordering)
|
||||
actual1 = GaussNewtonOptimizer(fg, initial_values, gnParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual1))
|
||||
gnParams.setOrdering(self.ordering)
|
||||
actual = GaussNewtonOptimizer(self.fg, self.initial_values, gnParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
# Levenberg-Marquardt
|
||||
def test_levenberg_marquardt(self):
|
||||
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||
lmParams.setOrdering(ordering)
|
||||
actual2 = LevenbergMarquardtOptimizer(
|
||||
fg, initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual2))
|
||||
lmParams.setOrdering(self.ordering)
|
||||
actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
# Levenberg-Marquardt
|
||||
def test_levenberg_marquardt_pcg(self):
|
||||
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||
lmParams.setLinearSolverType("ITERATIVE")
|
||||
cgParams = PCGSolverParameters()
|
||||
cgParams.setPreconditionerParams(DummyPreconditionerParameters())
|
||||
lmParams.setIterativeParams(cgParams)
|
||||
actual2 = LevenbergMarquardtOptimizer(
|
||||
fg, initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual2))
|
||||
actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
# Dogleg
|
||||
def test_dogleg(self):
|
||||
dlParams = DoglegParams()
|
||||
dlParams.setOrdering(ordering)
|
||||
actual3 = DoglegOptimizer(fg, initial_values, dlParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual3))
|
||||
dlParams.setOrdering(self.ordering)
|
||||
actual = DoglegOptimizer(self.fg, self.initial_values, dlParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
# Graduated Non-Convexity (GNC)
|
||||
def test_graduated_non_convexity(self):
|
||||
gncParams = GncLMParams()
|
||||
actual4 = GncLMOptimizer(fg, initial_values, gncParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual4))
|
||||
|
||||
actual = GncLMOptimizer(self.fg, self.initial_values, gncParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
||||
def test_iteration_hook(self):
|
||||
# set up iteration hook to track some testable values
|
||||
iteration_count = 0
|
||||
final_error = 0
|
||||
final_values = None
|
||||
def iteration_hook(iter, error_before, error_after):
|
||||
nonlocal iteration_count, final_error, final_values
|
||||
iteration_count = iter
|
||||
final_error = error_after
|
||||
final_values = optimizer.values()
|
||||
# optimize
|
||||
params = LevenbergMarquardtParams.CeresDefaults()
|
||||
params.setOrdering(self.ordering)
|
||||
params.iterationHook = iteration_hook
|
||||
optimizer = LevenbergMarquardtOptimizer(self.fg, self.initial_values, params)
|
||||
actual = optimizer.optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
self.gtsamAssertEquals(final_values, actual)
|
||||
self.assertEqual(self.fg.error(actual), final_error)
|
||||
self.assertEqual(optimizer.iterations(), iteration_count)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
PinholeCamera unit tests.
|
||||
Author: Fan Jiang
|
||||
"""
|
||||
import unittest
|
||||
from math import pi
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPinholeCamera(GtsamTestCase):
|
||||
"""
|
||||
Tests if we can correctly get the camera Jacobians in Python
|
||||
"""
|
||||
def test_jacobian(self):
|
||||
cam1 = gtsam.PinholeCameraCal3Bundler()
|
||||
|
||||
# order is important because Eigen is column major!
|
||||
Dpose = np.zeros((2, 6), order='F')
|
||||
Dpoint = np.zeros((2, 3), order='F')
|
||||
Dcal = np.zeros((2, 3), order='F')
|
||||
cam1.project(np.array([1, 1, 1]), Dpose, Dpoint, Dcal)
|
||||
|
||||
self.gtsamAssertEquals(Dpoint, np.array([[1, 0, -1], [0, 1, -1]]))
|
||||
|
||||
self.gtsamAssertEquals(
|
||||
Dpose,
|
||||
np.array([
|
||||
[1., -2., 1., -1., 0., 1.], #
|
||||
[2., -1., -1., 0., -1., 1.]
|
||||
]))
|
||||
|
||||
self.gtsamAssertEquals(Dcal, np.array([[1., 2., 4.], [1., 2., 4.]]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -70,27 +70,36 @@ class TestPose2(GtsamTestCase):
|
|||
O---O
|
||||
"""
|
||||
pts_a = [
|
||||
Point2(3, 1),
|
||||
Point2(1, 1),
|
||||
Point2(1, 3),
|
||||
Point2(3, 3),
|
||||
]
|
||||
pts_b = [
|
||||
Point2(1, -3),
|
||||
Point2(1, -5),
|
||||
Point2(-1, -5),
|
||||
Point2(-1, -3),
|
||||
]
|
||||
pts_b = [
|
||||
Point2(3, 1),
|
||||
Point2(1, 1),
|
||||
Point2(1, 3),
|
||||
Point2(3, 3),
|
||||
]
|
||||
|
||||
# fmt: on
|
||||
ab_pairs = Point2Pairs(list(zip(pts_a, pts_b)))
|
||||
bTa = gtsam.align(ab_pairs)
|
||||
aTb = bTa.inverse()
|
||||
assert aTb is not None
|
||||
aTb = Pose2.Align(ab_pairs)
|
||||
self.assertIsNotNone(aTb)
|
||||
|
||||
for pt_a, pt_b in zip(pts_a, pts_b):
|
||||
pt_a_ = aTb.transformFrom(pt_b)
|
||||
assert np.allclose(pt_a, pt_a_)
|
||||
np.testing.assert_allclose(pt_a, pt_a_)
|
||||
|
||||
# Matrix version
|
||||
A = np.array(pts_a).T
|
||||
B = np.array(pts_b).T
|
||||
aTb = Pose2.Align(A, B)
|
||||
self.assertIsNotNone(aTb)
|
||||
|
||||
for pt_a, pt_b in zip(pts_a, pts_b):
|
||||
pt_a_ = aTb.transformFrom(pt_b)
|
||||
np.testing.assert_allclose(pt_a, pt_a_)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -0,0 +1,194 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Sim3 unit tests.
|
||||
Author: John Lambert
|
||||
"""
|
||||
# pylint: disable=no-name-in-module
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
from gtsam import Pose2, Pose2Pairs, Rot2, Similarity2
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestSim2(GtsamTestCase):
|
||||
"""Test selected Sim2 methods."""
|
||||
|
||||
def test_align_poses_along_straight_line(self) -> None:
|
||||
"""Test Align Pose2Pairs method.
|
||||
|
||||
Scenario:
|
||||
3 object poses
|
||||
same scale (no gauge ambiguity)
|
||||
world frame has poses rotated about 180 degrees.
|
||||
world and egovehicle frame translated by 15 meters w.r.t. each other
|
||||
"""
|
||||
R180 = Rot2.fromDegrees(180)
|
||||
|
||||
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
|
||||
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
|
||||
eTo0 = Pose2(Rot2(), np.array([5, 0]))
|
||||
eTo1 = Pose2(Rot2(), np.array([10, 0]))
|
||||
eTo2 = Pose2(Rot2(), np.array([15, 0]))
|
||||
|
||||
eToi_list = [eTo0, eTo1, eTo2]
|
||||
|
||||
# Create destination poses
|
||||
# (same three objects, but instead living in the world "w" frame)
|
||||
wTo0 = Pose2(R180, np.array([-10, 0]))
|
||||
wTo1 = Pose2(R180, np.array([-5, 0]))
|
||||
wTo2 = Pose2(R180, np.array([0, 0]))
|
||||
|
||||
wToi_list = [wTo0, wTo1, wTo2]
|
||||
|
||||
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
|
||||
|
||||
# Recover the transformation wSe (i.e. world_S_egovehicle)
|
||||
wSe = Similarity2.Align(we_pairs)
|
||||
|
||||
for wToi, eToi in zip(wToi_list, eToi_list):
|
||||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses_along_straight_line_gauge(self):
|
||||
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
|
||||
|
||||
Scenario:
|
||||
3 object poses
|
||||
with gauge ambiguity (2x scale)
|
||||
world frame has poses rotated by 90 degrees.
|
||||
world and egovehicle frame translated by 11 meters w.r.t. each other
|
||||
"""
|
||||
R90 = Rot2.fromDegrees(90)
|
||||
|
||||
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
|
||||
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
|
||||
eTo0 = Pose2(Rot2(), np.array([1, 0]))
|
||||
eTo1 = Pose2(Rot2(), np.array([2, 0]))
|
||||
eTo2 = Pose2(Rot2(), np.array([4, 0]))
|
||||
|
||||
eToi_list = [eTo0, eTo1, eTo2]
|
||||
|
||||
# Create destination poses
|
||||
# (same three objects, but instead living in the world/city "w" frame)
|
||||
wTo0 = Pose2(R90, np.array([0, 12]))
|
||||
wTo1 = Pose2(R90, np.array([0, 14]))
|
||||
wTo2 = Pose2(R90, np.array([0, 18]))
|
||||
|
||||
wToi_list = [wTo0, wTo1, wTo2]
|
||||
|
||||
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
|
||||
|
||||
# Recover the transformation wSe (i.e. world_S_egovehicle)
|
||||
wSe = Similarity2.Align(we_pairs)
|
||||
|
||||
for wToi, eToi in zip(wToi_list, eToi_list):
|
||||
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
|
||||
|
||||
def test_align_poses_scaled_squares(self):
|
||||
"""Test if Align Pose2Pairs method can account for gauge ambiguity.
|
||||
|
||||
Make sure a big and small square can be aligned.
|
||||
The u's represent a big square (10x10), and v's represents a small square (4x4).
|
||||
|
||||
Scenario:
|
||||
4 object poses
|
||||
with gauge ambiguity (2.5x scale)
|
||||
"""
|
||||
# 0, 90, 180, and 270 degrees yaw
|
||||
R0 = Rot2.fromDegrees(0)
|
||||
R90 = Rot2.fromDegrees(90)
|
||||
R180 = Rot2.fromDegrees(180)
|
||||
R270 = Rot2.fromDegrees(270)
|
||||
|
||||
aTi0 = Pose2(R0, np.array([2, 3]))
|
||||
aTi1 = Pose2(R90, np.array([12, 3]))
|
||||
aTi2 = Pose2(R180, np.array([12, 13]))
|
||||
aTi3 = Pose2(R270, np.array([2, 13]))
|
||||
|
||||
aTi_list = [aTi0, aTi1, aTi2, aTi3]
|
||||
|
||||
bTi0 = Pose2(R0, np.array([4, 3]))
|
||||
bTi1 = Pose2(R90, np.array([8, 3]))
|
||||
bTi2 = Pose2(R180, np.array([8, 7]))
|
||||
bTi3 = Pose2(R270, np.array([4, 7]))
|
||||
|
||||
bTi_list = [bTi0, bTi1, bTi2, bTi3]
|
||||
|
||||
ab_pairs = Pose2Pairs(list(zip(aTi_list, bTi_list)))
|
||||
|
||||
# Recover the transformation wSe (i.e. world_S_egovehicle)
|
||||
aSb = Similarity2.Align(ab_pairs)
|
||||
|
||||
for aTi, bTi in zip(aTi_list, bTi_list):
|
||||
self.gtsamAssertEquals(aTi, aSb.transformFrom(bTi))
|
||||
|
||||
def test_constructor(self) -> None:
|
||||
"""Sim(2) to perform p_b = bSa * p_a"""
|
||||
bRa = Rot2()
|
||||
bta = np.array([1, 2])
|
||||
bsa = 3.0
|
||||
bSa = Similarity2(R=bRa, t=bta, s=bsa)
|
||||
self.assertIsInstance(bSa, Similarity2)
|
||||
np.testing.assert_allclose(bSa.rotation().matrix(), bRa.matrix())
|
||||
np.testing.assert_allclose(bSa.translation(), bta)
|
||||
np.testing.assert_allclose(bSa.scale(), bsa)
|
||||
|
||||
def test_is_eq(self) -> None:
|
||||
"""Ensure object equality works properly (are equal)."""
|
||||
bSa = Similarity2(R=Rot2(), t=np.array([1, 2]), s=3.0)
|
||||
bSa_ = Similarity2(R=Rot2(), t=np.array([1.0, 2.0]), s=3)
|
||||
self.gtsamAssertEquals(bSa, bSa_)
|
||||
|
||||
def test_not_eq_translation(self) -> None:
|
||||
"""Ensure object equality works properly (not equal translation)."""
|
||||
bSa = Similarity2(R=Rot2(), t=np.array([2, 1]), s=3.0)
|
||||
bSa_ = Similarity2(R=Rot2(), t=np.array([1.0, 2.0]), s=3)
|
||||
self.assertNotEqual(bSa, bSa_)
|
||||
|
||||
def test_not_eq_rotation(self) -> None:
|
||||
"""Ensure object equality works properly (not equal rotation)."""
|
||||
bSa = Similarity2(R=Rot2(), t=np.array([2, 1]), s=3.0)
|
||||
bSa_ = Similarity2(R=Rot2.fromDegrees(180), t=np.array([2.0, 1.0]), s=3)
|
||||
self.assertNotEqual(bSa, bSa_)
|
||||
|
||||
def test_not_eq_scale(self) -> None:
|
||||
"""Ensure object equality works properly (not equal scale)."""
|
||||
bSa = Similarity2(R=Rot2(), t=np.array([2, 1]), s=3.0)
|
||||
bSa_ = Similarity2(R=Rot2(), t=np.array([2.0, 1.0]), s=1.0)
|
||||
self.assertNotEqual(bSa, bSa_)
|
||||
|
||||
def test_rotation(self) -> None:
|
||||
"""Ensure rotation component is returned properly."""
|
||||
R = Rot2.fromDegrees(90)
|
||||
t = np.array([1, 2])
|
||||
bSa = Similarity2(R=R, t=t, s=3.0)
|
||||
|
||||
# evaluates to [[0, -1], [1, 0]]
|
||||
expected_R = Rot2.fromDegrees(90)
|
||||
np.testing.assert_allclose(expected_R.matrix(), bSa.rotation().matrix())
|
||||
|
||||
def test_translation(self) -> None:
|
||||
"""Ensure translation component is returned properly."""
|
||||
R = Rot2.fromDegrees(90)
|
||||
t = np.array([1, 2])
|
||||
bSa = Similarity2(R=R, t=t, s=3.0)
|
||||
|
||||
expected_t = np.array([1, 2])
|
||||
np.testing.assert_allclose(expected_t, bSa.translation())
|
||||
|
||||
def test_scale(self) -> None:
|
||||
"""Ensure the scale factor is returned properly."""
|
||||
bRa = Rot2()
|
||||
bta = np.array([1, 2])
|
||||
bsa = 3.0
|
||||
bSa = Similarity2(R=bRa, t=bta, s=bsa)
|
||||
self.assertEqual(bSa.scale(), 3.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -34,8 +34,10 @@ class TestTranslationRecovery(unittest.TestCase):
|
|||
|
||||
def test_constructor(self):
|
||||
"""Construct from binary measurements."""
|
||||
algorithm = gtsam.TranslationRecovery(gtsam.BinaryMeasurementsUnit3())
|
||||
algorithm = gtsam.TranslationRecovery()
|
||||
self.assertIsInstance(algorithm, gtsam.TranslationRecovery)
|
||||
algorithm_params = gtsam.TranslationRecovery(gtsam.LevenbergMarquardtParams())
|
||||
self.assertIsInstance(algorithm_params, gtsam.TranslationRecovery)
|
||||
|
||||
def test_run(self):
|
||||
gt_poses = ExampleValues()
|
||||
|
@ -45,9 +47,9 @@ class TestTranslationRecovery(unittest.TestCase):
|
|||
lmParams = gtsam.LevenbergMarquardtParams()
|
||||
lmParams.setVerbosityLM("silent")
|
||||
|
||||
algorithm = gtsam.TranslationRecovery(measurements, lmParams)
|
||||
algorithm = gtsam.TranslationRecovery(lmParams)
|
||||
scale = 2.0
|
||||
result = algorithm.run(scale)
|
||||
result = algorithm.run(measurements, scale)
|
||||
|
||||
w_aTc = gt_poses.atPose3(2).translation() - gt_poses.atPose3(0).translation()
|
||||
w_aTb = gt_poses.atPose3(1).translation() - gt_poses.atPose3(0).translation()
|
||||
|
|
|
@ -8,26 +8,17 @@ See LICENSE for the license information
|
|||
Test Triangulation
|
||||
Authors: Frank Dellaert & Fan Jiang (Python) & Sushmita Warrier & John Lambert
|
||||
"""
|
||||
# pylint: disable=no-name-in-module, invalid-name, no-member
|
||||
import unittest
|
||||
from typing import Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import (
|
||||
Cal3_S2,
|
||||
Cal3Bundler,
|
||||
CameraSetCal3_S2,
|
||||
CameraSetCal3Bundler,
|
||||
PinholeCameraCal3_S2,
|
||||
PinholeCameraCal3Bundler,
|
||||
Point2,
|
||||
Point2Vector,
|
||||
Point3,
|
||||
Pose3,
|
||||
Pose3Vector,
|
||||
Rot3,
|
||||
)
|
||||
from gtsam import (Cal3_S2, Cal3Bundler, CameraSetCal3_S2,
|
||||
CameraSetCal3Bundler, PinholeCameraCal3_S2,
|
||||
PinholeCameraCal3Bundler, Point2, Point2Vector, Point3,
|
||||
Pose3, Pose3Vector, Rot3, TriangulationParameters,
|
||||
TriangulationResult)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
|
||||
|
@ -218,6 +209,68 @@ class TestTriangulationExample(GtsamTestCase):
|
|||
# using the Huber loss we now have a quite small error!! nice!
|
||||
self.assertTrue(np.allclose(landmark, actual4, atol=0.05))
|
||||
|
||||
def test_outliers_and_far_landmarks(self) -> None:
|
||||
"""Check safe triangulation function."""
|
||||
pose1, pose2 = self.poses
|
||||
|
||||
K1 = Cal3_S2(1500, 1200, 0, 640, 480)
|
||||
# create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
camera1 = PinholeCameraCal3_S2(pose1, K1)
|
||||
|
||||
# create second camera 1 meter to the right of first camera
|
||||
K2 = Cal3_S2(1600, 1300, 0, 650, 440)
|
||||
camera2 = PinholeCameraCal3_S2(pose2, K2)
|
||||
|
||||
# 1. Project two landmarks into two cameras and triangulate
|
||||
z1 = camera1.project(self.landmark)
|
||||
z2 = camera2.project(self.landmark)
|
||||
|
||||
cameras = CameraSetCal3_S2()
|
||||
measurements = Point2Vector()
|
||||
|
||||
cameras.append(camera1)
|
||||
cameras.append(camera2)
|
||||
measurements.append(z1)
|
||||
measurements.append(z2)
|
||||
|
||||
landmarkDistanceThreshold = 10 # landmark is closer than that
|
||||
# all default except landmarkDistanceThreshold:
|
||||
params = TriangulationParameters(1.0, False, landmarkDistanceThreshold)
|
||||
actual: TriangulationResult = gtsam.triangulateSafe(
|
||||
cameras, measurements, params)
|
||||
self.gtsamAssertEquals(actual.get(), self.landmark, 1e-2)
|
||||
self.assertTrue(actual.valid())
|
||||
|
||||
landmarkDistanceThreshold = 4 # landmark is farther than that
|
||||
params2 = TriangulationParameters(
|
||||
1.0, False, landmarkDistanceThreshold)
|
||||
actual = gtsam.triangulateSafe(cameras, measurements, params2)
|
||||
self.assertTrue(actual.farPoint())
|
||||
|
||||
# 3. Add a slightly rotated third camera above with a wrong measurement
|
||||
# (OUTLIER)
|
||||
pose3 = pose1 * Pose3(Rot3.Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1))
|
||||
K3 = Cal3_S2(700, 500, 0, 640, 480)
|
||||
camera3 = PinholeCameraCal3_S2(pose3, K3)
|
||||
z3 = camera3.project(self.landmark)
|
||||
|
||||
cameras.append(camera3)
|
||||
measurements.append(z3 + Point2(10, -10))
|
||||
|
||||
landmarkDistanceThreshold = 10 # landmark is closer than that
|
||||
outlierThreshold = 100 # loose, the outlier is going to pass
|
||||
params3 = TriangulationParameters(1.0, False, landmarkDistanceThreshold,
|
||||
outlierThreshold)
|
||||
actual = gtsam.triangulateSafe(cameras, measurements, params3)
|
||||
self.assertTrue(actual.valid())
|
||||
|
||||
# now set stricter threshold for outlier rejection
|
||||
outlierThreshold = 5 # tighter, the outlier is not going to pass
|
||||
params4 = TriangulationParameters(1.0, False, landmarkDistanceThreshold,
|
||||
outlierThreshold)
|
||||
actual = gtsam.triangulateSafe(cameras, measurements, params4)
|
||||
self.assertTrue(actual.outlier())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -18,7 +18,7 @@ import numpy as np
|
|||
from gtsam import Rot3
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
from gtsam.utils.logging_optimizer import gtsam_optimize
|
||||
from gtsam.utils.logging_optimizer import gtsam_optimize, optimize_using
|
||||
|
||||
KEY = 0
|
||||
MODEL = gtsam.noiseModel.Unit.Create(3)
|
||||
|
@ -34,19 +34,20 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
rotations = {R, R.inverse()} # mean is the identity
|
||||
self.expected = Rot3()
|
||||
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
for R in rotations:
|
||||
graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
|
||||
initial = gtsam.Values()
|
||||
initial.insert(KEY, R)
|
||||
self.params = gtsam.GaussNewtonParams()
|
||||
self.optimizer = gtsam.GaussNewtonOptimizer(
|
||||
graph, initial, self.params)
|
||||
def check(actual):
|
||||
# Check that optimizing yields the identity
|
||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||
# Check that logging output prints out 3 lines (exact intermediate values differ by OS)
|
||||
self.assertEqual(self.capturedOutput.getvalue().count('\n'), 3)
|
||||
# reset stdout catcher
|
||||
self.capturedOutput.truncate(0)
|
||||
self.check = check
|
||||
|
||||
self.lmparams = gtsam.LevenbergMarquardtParams()
|
||||
self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer(
|
||||
graph, initial, self.lmparams
|
||||
)
|
||||
self.graph = gtsam.NonlinearFactorGraph()
|
||||
for R in rotations:
|
||||
self.graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
|
||||
self.initial = gtsam.Values()
|
||||
self.initial.insert(KEY, R)
|
||||
|
||||
# setup output capture
|
||||
self.capturedOutput = StringIO()
|
||||
|
@ -63,22 +64,29 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
def hook(_, error):
|
||||
print(error)
|
||||
|
||||
# Only thing we require from optimizer is an iterate method
|
||||
gtsam_optimize(self.optimizer, self.params, hook)
|
||||
|
||||
# Check that optimizing yields the identity.
|
||||
actual = self.optimizer.values()
|
||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||
# Wrapper function sets the hook and calls optimizer.optimize() for us.
|
||||
params = gtsam.GaussNewtonParams()
|
||||
actual = optimize_using(gtsam.GaussNewtonOptimizer, hook, self.graph, self.initial)
|
||||
self.check(actual)
|
||||
actual = optimize_using(gtsam.GaussNewtonOptimizer, hook, self.graph, self.initial, params)
|
||||
self.check(actual)
|
||||
actual = gtsam_optimize(gtsam.GaussNewtonOptimizer(self.graph, self.initial, params),
|
||||
params, hook)
|
||||
self.check(actual)
|
||||
|
||||
def test_lm_simple_printing(self):
|
||||
"""Make sure we are properly terminating LM"""
|
||||
def hook(_, error):
|
||||
print(error)
|
||||
|
||||
gtsam_optimize(self.lmoptimizer, self.lmparams, hook)
|
||||
|
||||
actual = self.lmoptimizer.values()
|
||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook, self.graph, self.initial)
|
||||
self.check(actual)
|
||||
actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook, self.graph, self.initial,
|
||||
params)
|
||||
self.check(actual)
|
||||
actual = gtsam_optimize(gtsam.LevenbergMarquardtOptimizer(self.graph, self.initial, params),
|
||||
params, hook)
|
||||
|
||||
@unittest.skip("Not a test we want run every time, as needs comet.ml account")
|
||||
def test_comet(self):
|
||||
|
|
|
@ -6,6 +6,53 @@ Author: Jing Wu and Frank Dellaert
|
|||
|
||||
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
|
||||
import gtsam
|
||||
from typing import Any, Callable
|
||||
|
||||
OPTIMIZER_PARAMS_MAP = {
|
||||
gtsam.GaussNewtonOptimizer: gtsam.GaussNewtonParams,
|
||||
gtsam.LevenbergMarquardtOptimizer: gtsam.LevenbergMarquardtParams,
|
||||
gtsam.DoglegOptimizer: gtsam.DoglegParams,
|
||||
gtsam.GncGaussNewtonOptimizer: gtsam.GaussNewtonParams,
|
||||
gtsam.GncLMOptimizer: gtsam.LevenbergMarquardtParams
|
||||
}
|
||||
|
||||
|
||||
def optimize_using(OptimizerClass, hook, *args) -> gtsam.Values:
|
||||
""" Wraps the constructor and "optimize()" call for an Optimizer together and adds an iteration
|
||||
hook.
|
||||
Example usage:
|
||||
```python
|
||||
def hook(optimizer, error):
|
||||
print("iteration {:}, error = {:}".format(optimizer.iterations(), error))
|
||||
solution = optimize_using(gtsam.GaussNewtonOptimizer, hook, graph, init, params)
|
||||
```
|
||||
Iteration hook's args are (optimizer, error) and return type should be None
|
||||
|
||||
Args:
|
||||
OptimizerClass (T): A NonlinearOptimizer class (e.g. GaussNewtonOptimizer,
|
||||
LevenbergMarquardtOptimizer)
|
||||
hook ([T, double] -> None): Function to callback after each iteration. Args are (optimizer,
|
||||
error) and return should be None.
|
||||
*args: Arguments that would be passed into the OptimizerClass constructor, usually:
|
||||
graph, init, [params]
|
||||
Returns:
|
||||
(gtsam.Values): A Values object representing the optimization solution.
|
||||
"""
|
||||
# Add the iteration hook to the NonlinearOptimizerParams
|
||||
for arg in args:
|
||||
if isinstance(arg, gtsam.NonlinearOptimizerParams):
|
||||
arg.iterationHook = lambda iteration, error_before, error_after: hook(
|
||||
optimizer, error_after)
|
||||
break
|
||||
else:
|
||||
params = OPTIMIZER_PARAMS_MAP[OptimizerClass]()
|
||||
params.iterationHook = lambda iteration, error_before, error_after: hook(
|
||||
optimizer, error_after)
|
||||
args = (*args, params)
|
||||
# Construct Optimizer and optimize
|
||||
optimizer = OptimizerClass(*args)
|
||||
hook(optimizer, optimizer.error()) # Call hook once with init values to match behavior below
|
||||
return optimizer.optimize()
|
||||
|
||||
|
||||
def optimize(optimizer, check_convergence, hook):
|
||||
|
@ -21,7 +68,8 @@ def optimize(optimizer, check_convergence, hook):
|
|||
current_error = optimizer.error()
|
||||
hook(optimizer, current_error)
|
||||
|
||||
# Iterative loop
|
||||
# Iterative loop. Cannot use `params.iterationHook` because we don't have access to params
|
||||
# (backwards compatibility issue).
|
||||
while True:
|
||||
# Do next iteration
|
||||
optimizer.iterate()
|
||||
|
@ -36,6 +84,7 @@ def gtsam_optimize(optimizer,
|
|||
params,
|
||||
hook):
|
||||
""" Given an optimizer and params, iterate until convergence.
|
||||
Recommend using optimize_using instead.
|
||||
After each iteration, hook(optimizer) is called.
|
||||
After the function, use values and errors to get the result.
|
||||
Arguments:
|
||||
|
|
|
@ -12,13 +12,26 @@ from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import
|
|||
import gtsam
|
||||
from gtsam import Marginals, Point2, Point3, Pose2, Pose3, Values
|
||||
|
||||
# For future reference: following
|
||||
# https://www.xarg.org/2018/04/how-to-plot-a-covariance-error-ellipse/
|
||||
# we have, in 2D:
|
||||
# def kk(p): return math.sqrt(-2*math.log(1-p)) # k to get p probability mass
|
||||
# def pp(k): return 1-math.exp(-float(k**2)/2.0) # p as a function of k
|
||||
# Some values:
|
||||
# k = 5 => p = 99.9996 %
|
||||
|
||||
# For translation between a scaling of the uncertainty ellipse and the
|
||||
# percentage of inliers see discussion in
|
||||
# [PR 1067](https://github.com/borglab/gtsam/pull/1067)
|
||||
# and the notebook python/gtsam/notebooks/ellipses.ipynb (needs scipy).
|
||||
#
|
||||
# In the following, the default scaling is chosen for 95% inliers, which
|
||||
# translates to the following sigma values:
|
||||
# 1D: 1.959963984540
|
||||
# 2D: 2.447746830681
|
||||
# 3D: 2.795483482915
|
||||
#
|
||||
# Further references are Stochastic Models, Estimation, and Control Vol 1 by Maybeck,
|
||||
# page 366 and https://www.xarg.org/2018/04/how-to-plot-a-covariance-error-ellipse/
|
||||
#
|
||||
# For reference, here are the inlier percentages for some sigma values:
|
||||
# 1 2 3 4 5
|
||||
# 1D 68.26895 95.44997 99.73002 99.99367 99.99994
|
||||
# 2D 39.34693 86.46647 98.88910 99.96645 99.99963
|
||||
# 3D 19.87480 73.85359 97.07091 99.88660 99.99846
|
||||
|
||||
def set_axes_equal(fignum: int) -> None:
|
||||
"""
|
||||
|
@ -81,9 +94,8 @@ def plot_covariance_ellipse_3d(axes,
|
|||
"""
|
||||
Plots a Gaussian as an uncertainty ellipse
|
||||
|
||||
Based on Maybeck Vol 1, page 366
|
||||
k=2.296 corresponds to 1 std, 68.26% of all probability
|
||||
k=11.82 corresponds to 3 std, 99.74% of all probability
|
||||
The ellipse is scaled in such a way that 95% of drawn samples are inliers.
|
||||
Derivation of the scaling factor is explained at the beginning of this file.
|
||||
|
||||
Args:
|
||||
axes (matplotlib.axes.Axes): Matplotlib axes.
|
||||
|
@ -94,7 +106,8 @@ def plot_covariance_ellipse_3d(axes,
|
|||
n: Defines the granularity of the ellipse. Higher values indicate finer ellipses.
|
||||
alpha: Transparency value for the plotted surface in the range [0, 1].
|
||||
"""
|
||||
k = 11.82
|
||||
# this corresponds to 95%, see note above
|
||||
k = 2.795483482915
|
||||
U, S, _ = np.linalg.svd(P)
|
||||
|
||||
radii = k * np.sqrt(S)
|
||||
|
@ -115,12 +128,48 @@ def plot_covariance_ellipse_3d(axes,
|
|||
axes.plot_surface(x, y, z, alpha=alpha, cmap='hot')
|
||||
|
||||
|
||||
def plot_covariance_ellipse_2d(axes,
|
||||
origin: Point2,
|
||||
covariance: np.ndarray) -> None:
|
||||
"""
|
||||
Plots a Gaussian as an uncertainty ellipse
|
||||
|
||||
The ellipse is scaled in such a way that 95% of drawn samples are inliers.
|
||||
Derivation of the scaling factor is explained at the beginning of this file.
|
||||
|
||||
Args:
|
||||
axes (matplotlib.axes.Axes): Matplotlib axes.
|
||||
origin: The origin in the world frame.
|
||||
covariance: The marginal covariance matrix of the 2D point
|
||||
which will be represented as an ellipse.
|
||||
"""
|
||||
|
||||
w, v = np.linalg.eigh(covariance)
|
||||
|
||||
# this corresponds to 95%, see note above
|
||||
k = 2.447746830681
|
||||
|
||||
angle = np.arctan2(v[1, 0], v[0, 0])
|
||||
# We multiply k by 2 since k corresponds to the radius but Ellipse uses
|
||||
# the diameter.
|
||||
e1 = patches.Ellipse(origin,
|
||||
np.sqrt(w[0]) * 2 * k,
|
||||
np.sqrt(w[1]) * 2 * k,
|
||||
np.rad2deg(angle),
|
||||
fill=False)
|
||||
axes.add_patch(e1)
|
||||
|
||||
|
||||
def plot_point2_on_axes(axes,
|
||||
point: Point2,
|
||||
linespec: str,
|
||||
P: Optional[np.ndarray] = None) -> None:
|
||||
"""
|
||||
Plot a 2D point on given axis `axes` with given `linespec`.
|
||||
Plot a 2D point and its corresponding uncertainty ellipse on given axis
|
||||
`axes` with given `linespec`.
|
||||
|
||||
The uncertainty ellipse (if covariance is given) is scaled in such a way
|
||||
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_2d`.
|
||||
|
||||
Args:
|
||||
axes (matplotlib.axes.Axes): Matplotlib axes.
|
||||
|
@ -130,19 +179,7 @@ def plot_point2_on_axes(axes,
|
|||
"""
|
||||
axes.plot([point[0]], [point[1]], linespec, marker='.', markersize=10)
|
||||
if P is not None:
|
||||
w, v = np.linalg.eig(P)
|
||||
|
||||
# 5 sigma corresponds to 99.9996%, see note above
|
||||
k = 5.0
|
||||
|
||||
angle = np.arctan2(v[1, 0], v[0, 0])
|
||||
e1 = patches.Ellipse(point,
|
||||
np.sqrt(w[0] * k),
|
||||
np.sqrt(w[1] * k),
|
||||
np.rad2deg(angle),
|
||||
fill=False)
|
||||
axes.add_patch(e1)
|
||||
|
||||
plot_covariance_ellipse_2d(axes, point, P)
|
||||
|
||||
def plot_point2(
|
||||
fignum: int,
|
||||
|
@ -154,6 +191,9 @@ def plot_point2(
|
|||
"""
|
||||
Plot a 2D point on given figure with given `linespec`.
|
||||
|
||||
The uncertainty ellipse (if covariance is given) is scaled in such a way
|
||||
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_2d`.
|
||||
|
||||
Args:
|
||||
fignum: Integer representing the figure number to use for plotting.
|
||||
point: The point to be plotted.
|
||||
|
@ -182,6 +222,9 @@ def plot_pose2_on_axes(axes,
|
|||
"""
|
||||
Plot a 2D pose on given axis `axes` with given `axis_length`.
|
||||
|
||||
The ellipse is scaled in such a way that 95% of drawn samples are inliers,
|
||||
see `plot_covariance_ellipse_2d`.
|
||||
|
||||
Args:
|
||||
axes (matplotlib.axes.Axes): Matplotlib axes.
|
||||
pose: The pose to be plotted.
|
||||
|
@ -206,19 +249,7 @@ def plot_pose2_on_axes(axes,
|
|||
if covariance is not None:
|
||||
pPp = covariance[0:2, 0:2]
|
||||
gPp = np.matmul(np.matmul(gRp, pPp), gRp.T)
|
||||
|
||||
w, v = np.linalg.eig(gPp)
|
||||
|
||||
# 5 sigma corresponds to 99.9996%, see note above
|
||||
k = 5.0
|
||||
|
||||
angle = np.arctan2(v[1, 0], v[0, 0])
|
||||
e1 = patches.Ellipse(origin,
|
||||
np.sqrt(w[0] * k),
|
||||
np.sqrt(w[1] * k),
|
||||
np.rad2deg(angle),
|
||||
fill=False)
|
||||
axes.add_patch(e1)
|
||||
plot_covariance_ellipse_2d(axes, origin, gPp)
|
||||
|
||||
|
||||
def plot_pose2(
|
||||
|
@ -231,6 +262,9 @@ def plot_pose2(
|
|||
"""
|
||||
Plot a 2D pose on given figure with given `axis_length`.
|
||||
|
||||
The uncertainty ellipse (if covariance is given) is scaled in such a way
|
||||
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_2d`.
|
||||
|
||||
Args:
|
||||
fignum: Integer representing the figure number to use for plotting.
|
||||
pose: The pose to be plotted.
|
||||
|
@ -260,6 +294,9 @@ def plot_point3_on_axes(axes,
|
|||
"""
|
||||
Plot a 3D point on given axis `axes` with given `linespec`.
|
||||
|
||||
The uncertainty ellipse (if covariance is given) is scaled in such a way
|
||||
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`.
|
||||
|
||||
Args:
|
||||
axes (matplotlib.axes.Axes): Matplotlib axes.
|
||||
point: The point to be plotted.
|
||||
|
@ -281,6 +318,9 @@ def plot_point3(
|
|||
"""
|
||||
Plot a 3D point on given figure with given `linespec`.
|
||||
|
||||
The uncertainty ellipse (if covariance is given) is scaled in such a way
|
||||
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`.
|
||||
|
||||
Args:
|
||||
fignum: Integer representing the figure number to use for plotting.
|
||||
point: The point to be plotted.
|
||||
|
@ -355,6 +395,9 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1):
|
|||
"""
|
||||
Plot a 3D pose on given axis `axes` with given `axis_length`.
|
||||
|
||||
The uncertainty ellipse (if covariance is given) is scaled in such a way
|
||||
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`.
|
||||
|
||||
Args:
|
||||
axes (matplotlib.axes.Axes): Matplotlib axes.
|
||||
point (gtsam.Point3): The point to be plotted.
|
||||
|
@ -397,6 +440,9 @@ def plot_pose3(
|
|||
"""
|
||||
Plot a 3D pose on given figure with given `axis_length`.
|
||||
|
||||
The uncertainty ellipse (if covariance is given) is scaled in such a way
|
||||
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`.
|
||||
|
||||
Args:
|
||||
fignum: Integer representing the figure number to use for plotting.
|
||||
pose (gtsam.Pose3): 3D pose to be plotted.
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
ProjectionFactorRollingShutter unit tests.
|
||||
Author: Yotam Stern
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam_unstable
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
pose1 = gtsam.Pose3()
|
||||
pose2 = gtsam.Pose3(np.array([[ 0.9999375 , 0.00502487, 0.00998725, 0.1 ],
|
||||
[-0.00497488, 0.999975 , -0.00502487, 0.02 ],
|
||||
[-0.01001225, 0.00497488, 0.9999375 , 1. ],
|
||||
[ 0. , 0. , 0. , 1. ]]))
|
||||
point = np.array([2, 0, 15])
|
||||
point_noise = gtsam.noiseModel.Diagonal.Sigmas(np.ones(2))
|
||||
cal = gtsam.Cal3_S2()
|
||||
body_p_sensor = gtsam.Pose3()
|
||||
alpha = 0.1
|
||||
measured = np.array([0.13257015, 0.0004157])
|
||||
|
||||
|
||||
class TestProjectionFactorRollingShutter(GtsamTestCase):
|
||||
|
||||
def test_constructor(self):
|
||||
'''
|
||||
test constructor for the ProjectionFactorRollingShutter
|
||||
'''
|
||||
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal)
|
||||
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal,
|
||||
body_p_sensor)
|
||||
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal, True, False)
|
||||
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal, True, False,
|
||||
body_p_sensor)
|
||||
|
||||
def test_error(self):
|
||||
'''
|
||||
test the factor error for a specific example
|
||||
'''
|
||||
values = gtsam.Values()
|
||||
values.insert(0, pose1)
|
||||
values.insert(1, pose2)
|
||||
values.insert(2, point)
|
||||
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal)
|
||||
self.gtsamAssertEquals(factor.error(values), np.array(0), tol=1e-9)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -335,21 +335,21 @@ TEST(NonlinearFactorGraph, dot) {
|
|||
"graph {\n"
|
||||
" size=\"5,5\";\n"
|
||||
"\n"
|
||||
" varl1[label=\"l1\"];\n"
|
||||
" varx1[label=\"x1\"];\n"
|
||||
" varx2[label=\"x2\"];\n"
|
||||
" var7782220156096217089[label=\"l1\"];\n"
|
||||
" var8646911284551352321[label=\"x1\"];\n"
|
||||
" var8646911284551352322[label=\"x2\"];\n"
|
||||
"\n"
|
||||
" factor0[label=\"\", shape=point];\n"
|
||||
" varx1--factor0;\n"
|
||||
" var8646911284551352321--factor0;\n"
|
||||
" factor1[label=\"\", shape=point];\n"
|
||||
" varx1--factor1;\n"
|
||||
" varx2--factor1;\n"
|
||||
" var8646911284551352321--factor1;\n"
|
||||
" var8646911284551352322--factor1;\n"
|
||||
" factor2[label=\"\", shape=point];\n"
|
||||
" varx1--factor2;\n"
|
||||
" varl1--factor2;\n"
|
||||
" var8646911284551352321--factor2;\n"
|
||||
" var7782220156096217089--factor2;\n"
|
||||
" factor3[label=\"\", shape=point];\n"
|
||||
" varx2--factor3;\n"
|
||||
" varl1--factor3;\n"
|
||||
" var8646911284551352322--factor3;\n"
|
||||
" var7782220156096217089--factor3;\n"
|
||||
"}\n";
|
||||
|
||||
const NonlinearFactorGraph fg = createNonlinearFactorGraph();
|
||||
|
@ -363,21 +363,21 @@ TEST(NonlinearFactorGraph, dot_extra) {
|
|||
"graph {\n"
|
||||
" size=\"5,5\";\n"
|
||||
"\n"
|
||||
" varl1[label=\"l1\", pos=\"0,0!\"];\n"
|
||||
" varx1[label=\"x1\", pos=\"1,0!\"];\n"
|
||||
" varx2[label=\"x2\", pos=\"1,1.5!\"];\n"
|
||||
" var7782220156096217089[label=\"l1\", pos=\"0,0!\"];\n"
|
||||
" var8646911284551352321[label=\"x1\", pos=\"1,0!\"];\n"
|
||||
" var8646911284551352322[label=\"x2\", pos=\"1,1.5!\"];\n"
|
||||
"\n"
|
||||
" factor0[label=\"\", shape=point];\n"
|
||||
" varx1--factor0;\n"
|
||||
" var8646911284551352321--factor0;\n"
|
||||
" factor1[label=\"\", shape=point];\n"
|
||||
" varx1--factor1;\n"
|
||||
" varx2--factor1;\n"
|
||||
" var8646911284551352321--factor1;\n"
|
||||
" var8646911284551352322--factor1;\n"
|
||||
" factor2[label=\"\", shape=point];\n"
|
||||
" varx1--factor2;\n"
|
||||
" varl1--factor2;\n"
|
||||
" var8646911284551352321--factor2;\n"
|
||||
" var7782220156096217089--factor2;\n"
|
||||
" factor3[label=\"\", shape=point];\n"
|
||||
" varx2--factor3;\n"
|
||||
" varl1--factor3;\n"
|
||||
" var8646911284551352322--factor3;\n"
|
||||
" var7782220156096217089--factor3;\n"
|
||||
"}\n";
|
||||
|
||||
const NonlinearFactorGraph fg = createNonlinearFactorGraph();
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/sfm/TranslationRecovery.h>
|
||||
#include <gtsam/sfm/SfmData.h>
|
||||
#include <gtsam/sfm/TranslationRecovery.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
using namespace std;
|
||||
|
@ -62,13 +62,13 @@ TEST(TranslationRecovery, BAL) {
|
|||
unitTranslation.measured()));
|
||||
}
|
||||
|
||||
TranslationRecovery algorithm(relativeTranslations);
|
||||
const auto graph = algorithm.buildGraph();
|
||||
TranslationRecovery algorithm;
|
||||
const auto graph = algorithm.buildGraph(relativeTranslations);
|
||||
EXPECT_LONGS_EQUAL(3, graph.size());
|
||||
|
||||
// Run translation recovery
|
||||
const double scale = 2.0;
|
||||
const auto result = algorithm.run(scale);
|
||||
const auto result = algorithm.run(relativeTranslations, scale);
|
||||
|
||||
// Check result for first two translations, determined by prior
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0)));
|
||||
|
@ -107,12 +107,12 @@ TEST(TranslationRecovery, TwoPoseTest) {
|
|||
unitTranslation.measured()));
|
||||
}
|
||||
|
||||
TranslationRecovery algorithm(relativeTranslations);
|
||||
const auto graph = algorithm.buildGraph();
|
||||
TranslationRecovery algorithm;
|
||||
const auto graph = algorithm.buildGraph(relativeTranslations);
|
||||
EXPECT_LONGS_EQUAL(1, graph.size());
|
||||
|
||||
// Run translation recovery
|
||||
const auto result = algorithm.run(/*scale=*/3.0);
|
||||
const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0);
|
||||
|
||||
// Check result for first two translations, determined by prior
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
|
||||
|
@ -145,11 +145,11 @@ TEST(TranslationRecovery, ThreePoseTest) {
|
|||
unitTranslation.measured()));
|
||||
}
|
||||
|
||||
TranslationRecovery algorithm(relativeTranslations);
|
||||
const auto graph = algorithm.buildGraph();
|
||||
TranslationRecovery algorithm;
|
||||
const auto graph = algorithm.buildGraph(relativeTranslations);
|
||||
EXPECT_LONGS_EQUAL(3, graph.size());
|
||||
|
||||
const auto result = algorithm.run(/*scale=*/3.0);
|
||||
const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0);
|
||||
|
||||
// Check result
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
|
||||
|
@ -180,13 +180,9 @@ TEST(TranslationRecovery, ThreePosesIncludingZeroTranslation) {
|
|||
unitTranslation.measured()));
|
||||
}
|
||||
|
||||
TranslationRecovery algorithm(relativeTranslations);
|
||||
const auto graph = algorithm.buildGraph();
|
||||
// There is only 1 non-zero translation edge.
|
||||
EXPECT_LONGS_EQUAL(1, graph.size());
|
||||
|
||||
TranslationRecovery algorithm;
|
||||
// Run translation recovery
|
||||
const auto result = algorithm.run(/*scale=*/3.0);
|
||||
const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0);
|
||||
|
||||
// Check result
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
|
||||
|
@ -222,12 +218,10 @@ TEST(TranslationRecovery, FourPosesIncludingZeroTranslation) {
|
|||
unitTranslation.measured()));
|
||||
}
|
||||
|
||||
TranslationRecovery algorithm(relativeTranslations);
|
||||
const auto graph = algorithm.buildGraph();
|
||||
EXPECT_LONGS_EQUAL(3, graph.size());
|
||||
TranslationRecovery algorithm;
|
||||
|
||||
// Run translation recovery
|
||||
const auto result = algorithm.run(/*scale=*/4.0);
|
||||
const auto result = algorithm.run(relativeTranslations, /*scale=*/4.0);
|
||||
|
||||
// Check result
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
|
||||
|
@ -251,13 +245,10 @@ TEST(TranslationRecovery, ThreePosesWithZeroTranslation) {
|
|||
unitTranslation.measured()));
|
||||
}
|
||||
|
||||
TranslationRecovery algorithm(relativeTranslations);
|
||||
const auto graph = algorithm.buildGraph();
|
||||
// Graph size will be zero as there no 'non-zero distance' edges.
|
||||
EXPECT_LONGS_EQUAL(0, graph.size());
|
||||
TranslationRecovery algorithm;
|
||||
|
||||
// Run translation recovery
|
||||
const auto result = algorithm.run(/*scale=*/4.0);
|
||||
const auto result = algorithm.run(relativeTranslations, /*scale=*/4.0);
|
||||
|
||||
// Check result
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
|
||||
|
@ -265,6 +256,73 @@ TEST(TranslationRecovery, ThreePosesWithZeroTranslation) {
|
|||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(2), 1e-8));
|
||||
}
|
||||
|
||||
TEST(TranslationRecovery, ThreePosesWithOneSoftConstraint) {
|
||||
// Create a dataset with 3 poses.
|
||||
// __ __
|
||||
// \/ \/
|
||||
// 0 _____ 1
|
||||
// \ __ /
|
||||
// \\//
|
||||
// 3
|
||||
//
|
||||
// 0 and 1 face in the same direction but have a translation offset. 3 is in
|
||||
// the same direction as 0 and 1, in between 0 and 1, with some Y axis offset.
|
||||
|
||||
Values poses;
|
||||
poses.insert<Pose3>(0, Pose3(Rot3(), Point3(0, 0, 0)));
|
||||
poses.insert<Pose3>(1, Pose3(Rot3(), Point3(2, 0, 0)));
|
||||
poses.insert<Pose3>(3, Pose3(Rot3(), Point3(1, -1, 0)));
|
||||
|
||||
auto relativeTranslations = TranslationRecovery::SimulateMeasurements(
|
||||
poses, {{0, 1}, {0, 3}, {1, 3}});
|
||||
|
||||
std::vector<BinaryMeasurement<Point3>> betweenTranslations;
|
||||
betweenTranslations.emplace_back(0, 3, Point3(1, -1, 0),
|
||||
noiseModel::Isotropic::Sigma(3, 1e-2));
|
||||
|
||||
TranslationRecovery algorithm;
|
||||
auto result =
|
||||
algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations);
|
||||
|
||||
// Check result
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-4));
|
||||
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(1), 1e-4));
|
||||
EXPECT(assert_equal(Point3(1, -1, 0), result.at<Point3>(3), 1e-4));
|
||||
}
|
||||
|
||||
TEST(TranslationRecovery, ThreePosesWithOneHardConstraint) {
|
||||
// Create a dataset with 3 poses.
|
||||
// __ __
|
||||
// \/ \/
|
||||
// 0 _____ 1
|
||||
// \ __ /
|
||||
// \\//
|
||||
// 3
|
||||
//
|
||||
// 0 and 1 face in the same direction but have a translation offset. 3 is in
|
||||
// the same direction as 0 and 1, in between 0 and 1, with some Y axis offset.
|
||||
|
||||
Values poses;
|
||||
poses.insert<Pose3>(0, Pose3(Rot3(), Point3(0, 0, 0)));
|
||||
poses.insert<Pose3>(1, Pose3(Rot3(), Point3(2, 0, 0)));
|
||||
poses.insert<Pose3>(3, Pose3(Rot3(), Point3(1, -1, 0)));
|
||||
|
||||
auto relativeTranslations = TranslationRecovery::SimulateMeasurements(
|
||||
poses, {{0, 1}, {0, 3}, {1, 3}});
|
||||
|
||||
std::vector<BinaryMeasurement<Point3>> betweenTranslations;
|
||||
betweenTranslations.emplace_back(0, 1, Point3(2, 0, 0),
|
||||
noiseModel::Constrained::All(3, 1e2));
|
||||
|
||||
TranslationRecovery algorithm;
|
||||
auto result =
|
||||
algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations);
|
||||
|
||||
// Check result
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-4));
|
||||
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(1), 1e-4));
|
||||
EXPECT(assert_equal(Point3(1, -1, 0), result.at<Point3>(3), 1e-4));
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
@ -19,7 +19,7 @@ install:
|
|||
if ($env:PLATFORM -eq "x64") { $env:PYTHON = "$env:PYTHON-x64" }
|
||||
$env:PATH = "C:\Python$env:PYTHON\;C:\Python$env:PYTHON\Scripts\;$env:PATH"
|
||||
python -W ignore -m pip install --upgrade pip wheel
|
||||
python -W ignore -m pip install pytest numpy --no-warn-script-location
|
||||
python -W ignore -m pip install pytest numpy --no-warn-script-location pytest-timeout
|
||||
- ps: |
|
||||
Start-FileDownload 'https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.zip'
|
||||
7z x eigen-3.3.7.zip -y > $null
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
# See all possible options and defaults with:
|
||||
# clang-format --style=llvm --dump-config
|
||||
BasedOnStyle: LLVM
|
||||
AccessModifierOffset: -4
|
||||
AlwaysBreakTemplateDeclarations: Yes
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
BreakBeforeBinaryOperators: All
|
||||
BreakConstructorInitializers: BeforeColon
|
||||
ColumnLimit: 99
|
||||
IndentCaseLabels: true
|
||||
IndentPPDirectives: AfterHash
|
||||
IndentWidth: 4
|
||||
Language: Cpp
|
||||
SpaceAfterCStyleCast: true
|
||||
Standard: Cpp11
|
||||
TabWidth: 4
|
||||
...
|
|
@ -1,13 +1,66 @@
|
|||
FormatStyle: file
|
||||
|
||||
Checks: '
|
||||
*bugprone*,
|
||||
cppcoreguidelines-init-variables,
|
||||
cppcoreguidelines-slicing,
|
||||
clang-analyzer-optin.cplusplus.VirtualCall,
|
||||
google-explicit-constructor,
|
||||
llvm-namespace-comment,
|
||||
modernize-use-override,
|
||||
readability-container-size-empty,
|
||||
modernize-use-using,
|
||||
modernize-use-equals-default,
|
||||
misc-misplaced-const,
|
||||
misc-non-copyable-objects,
|
||||
misc-static-assert,
|
||||
misc-throw-by-value-catch-by-reference,
|
||||
misc-uniqueptr-reset-release,
|
||||
misc-unused-parameters,
|
||||
modernize-avoid-bind,
|
||||
modernize-make-shared,
|
||||
modernize-redundant-void-arg,
|
||||
modernize-replace-auto-ptr,
|
||||
modernize-replace-disallow-copy-and-assign-macro,
|
||||
modernize-replace-random-shuffle,
|
||||
modernize-shrink-to-fit,
|
||||
modernize-use-auto,
|
||||
modernize-use-bool-literals,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-default-member-init,
|
||||
modernize-use-noexcept,
|
||||
modernize-use-emplace,
|
||||
modernize-use-override,
|
||||
modernize-use-using,
|
||||
*performance*,
|
||||
readability-avoid-const-params-in-decls,
|
||||
readability-const-return-type,
|
||||
readability-container-size-empty,
|
||||
readability-delete-null-pointer,
|
||||
readability-else-after-return,
|
||||
readability-implicit-bool-conversion,
|
||||
readability-make-member-function-const,
|
||||
readability-misplaced-array-index,
|
||||
readability-non-const-parameter,
|
||||
readability-redundant-function-ptr-dereference,
|
||||
readability-redundant-smartptr-get,
|
||||
readability-redundant-string-cstr,
|
||||
readability-simplify-subscript-expr,
|
||||
readability-static-accessed-through-instance,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
readability-string-compare,
|
||||
readability-suspicious-call-argument,
|
||||
readability-uniqueptr-delete-release,
|
||||
-bugprone-exception-escape,
|
||||
-bugprone-reserved-identifier,
|
||||
-bugprone-unused-raii,
|
||||
'
|
||||
|
||||
CheckOptions:
|
||||
- key: performance-for-range-copy.WarnOnAllAutoCopies
|
||||
value: true
|
||||
- key: performance-unnecessary-value-param.AllowedTypes
|
||||
value: 'exception_ptr$;'
|
||||
- key: readability-implicit-bool-conversion.AllowPointerConditions
|
||||
value: true
|
||||
|
||||
HeaderFilterRegex: 'pybind11/.*h'
|
||||
|
||||
WarningsAsErrors: '*'
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
*.cmake @henryiii
|
||||
CMakeLists.txt @henryiii
|
||||
*.yml @henryiii
|
||||
*.yaml @henryiii
|
||||
/tools/ @henryiii
|
||||
/pybind11/ @henryiii
|
||||
noxfile.py @henryiii
|
||||
.clang-format @henryiii
|
||||
.clang-tidy @henryiii
|
|
@ -53,6 +53,33 @@ derivative works thereof, in binary and source code form.
|
|||
|
||||
## Development of pybind11
|
||||
|
||||
### Quick setup
|
||||
|
||||
To setup a quick development environment, use [`nox`](https://nox.thea.codes).
|
||||
This will allow you to do some common tasks with minimal setup effort, but will
|
||||
take more time to run and be less flexible than a full development environment.
|
||||
If you use [`pipx run nox`](https://pipx.pypa.io), you don't even need to
|
||||
install `nox`. Examples:
|
||||
|
||||
```bash
|
||||
# List all available sessions
|
||||
nox -l
|
||||
|
||||
# Run linters
|
||||
nox -s lint
|
||||
|
||||
# Run tests on Python 3.9
|
||||
nox -s tests-3.9
|
||||
|
||||
# Build and preview docs
|
||||
nox -s docs -- serve
|
||||
|
||||
# Build SDists and wheels
|
||||
nox -s build
|
||||
```
|
||||
|
||||
### Full setup
|
||||
|
||||
To setup an ideal development environment, run the following commands on a
|
||||
system with CMake 3.14+:
|
||||
|
||||
|
@ -93,7 +120,7 @@ The valid options are:
|
|||
* `-DPYBIND11_NOPYTHON=ON`: Disable all Python searching (disables tests)
|
||||
* `-DBUILD_TESTING=ON`: Enable the tests
|
||||
* `-DDOWNLOAD_CATCH=ON`: Download catch to build the C++ tests
|
||||
* `-DOWNLOAD_EIGEN=ON`: Download Eigen for the NumPy tests
|
||||
* `-DDOWNLOAD_EIGEN=ON`: Download Eigen for the NumPy tests
|
||||
* `-DPYBIND11_INSTALL=ON/OFF`: Enable the install target (on by default for the
|
||||
master project)
|
||||
* `-DUSE_PYTHON_INSTALL_DIR=ON`: Try to install into the python dir
|
||||
|
@ -126,13 +153,26 @@ cmake --build build --target check
|
|||
`--target` can be spelled `-t` in CMake 3.15+. You can also run individual
|
||||
tests with these targets:
|
||||
|
||||
* `pytest`: Python tests only
|
||||
* `pytest`: Python tests only, using the
|
||||
[pytest](https://docs.pytest.org/en/stable/) framework
|
||||
* `cpptest`: C++ tests only
|
||||
* `test_cmake_build`: Install / subdirectory tests
|
||||
|
||||
If you want to build just a subset of tests, use
|
||||
`-DPYBIND11_TEST_OVERRIDE="test_callbacks.cpp;test_pickling.cpp"`. If this is
|
||||
empty, all tests will be built.
|
||||
`-DPYBIND11_TEST_OVERRIDE="test_callbacks;test_pickling"`. If this is
|
||||
empty, all tests will be built. Tests are specified without an extension if they need both a .py and
|
||||
.cpp file.
|
||||
|
||||
You may also pass flags to the `pytest` target by editing `tests/pytest.ini` or
|
||||
by using the `PYTEST_ADDOPTS` environment variable
|
||||
(see [`pytest` docs](https://docs.pytest.org/en/2.7.3/customize.html#adding-default-options)). As an example:
|
||||
|
||||
```bash
|
||||
env PYTEST_ADDOPTS="--capture=no --exitfirst" \
|
||||
cmake --build build --target pytest
|
||||
# Or using abbreviated flags
|
||||
env PYTEST_ADDOPTS="-s -x" cmake --build build --target pytest
|
||||
```
|
||||
|
||||
### Formatting
|
||||
|
||||
|
@ -164,16 +204,42 @@ name, pre-commit):
|
|||
pre-commit install
|
||||
```
|
||||
|
||||
### Clang-Tidy
|
||||
### Clang-Format
|
||||
|
||||
To run Clang tidy, the following recipe should work. Files will be modified in
|
||||
place, so you can use git to monitor the changes.
|
||||
As of v2.6.2, pybind11 ships with a [`clang-format`][clang-format]
|
||||
configuration file at the top level of the repo (the filename is
|
||||
`.clang-format`). Currently, formatting is NOT applied automatically, but
|
||||
manually using `clang-format` for newly developed files is highly encouraged.
|
||||
To check if a file needs formatting:
|
||||
|
||||
```bash
|
||||
docker run --rm -v $PWD:/pybind11 -it silkeh/clang:10
|
||||
apt-get update && apt-get install python3-dev python3-pytest
|
||||
cmake -S pybind11/ -B build -DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy);-fix"
|
||||
cmake --build build
|
||||
clang-format -style=file --dry-run some.cpp
|
||||
```
|
||||
|
||||
The output will show things to be fixed, if any. To actually format the file:
|
||||
|
||||
```bash
|
||||
clang-format -style=file -i some.cpp
|
||||
```
|
||||
|
||||
Note that the `-style-file` option searches the parent directories for the
|
||||
`.clang-format` file, i.e. the commands above can be run in any subdirectory
|
||||
of the pybind11 repo.
|
||||
|
||||
### Clang-Tidy
|
||||
|
||||
[`clang-tidy`][clang-tidy] performs deeper static code analyses and is
|
||||
more complex to run, compared to `clang-format`, but support for `clang-tidy`
|
||||
is built into the pybind11 CMake configuration. To run `clang-tidy`, the
|
||||
following recipe should work. Run the `docker` command from the top-level
|
||||
directory inside your pybind11 git clone. Files will be modified in place,
|
||||
so you can use git to monitor the changes.
|
||||
|
||||
```bash
|
||||
docker run --rm -v $PWD:/mounted_pybind11 -it silkeh/clang:12
|
||||
apt-get update && apt-get install -y python3-dev python3-pytest
|
||||
cmake -S /mounted_pybind11/ -B build -DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy);-fix" -DDOWNLOAD_EIGEN=ON -DDOWNLOAD_CATCH=ON -DCMAKE_CXX_STANDARD=17
|
||||
cmake --build build -j 2 -- --keep-going
|
||||
```
|
||||
|
||||
### Include what you use
|
||||
|
@ -186,7 +252,7 @@ cmake -S . -B build-iwyu -DCMAKE_CXX_INCLUDE_WHAT_YOU_USE=$(which include-what-y
|
|||
cmake --build build
|
||||
```
|
||||
|
||||
The report is sent to stderr; you can pip it into a file if you wish.
|
||||
The report is sent to stderr; you can pipe it into a file if you wish.
|
||||
|
||||
### Build recipes
|
||||
|
||||
|
@ -313,6 +379,8 @@ if you really want to.
|
|||
|
||||
|
||||
[pre-commit]: https://pre-commit.com
|
||||
[clang-format]: https://clang.llvm.org/docs/ClangFormat.html
|
||||
[clang-tidy]: https://clang.llvm.org/extra/clang-tidy/
|
||||
[pybind11.readthedocs.org]: http://pybind11.readthedocs.org/en/latest
|
||||
[issue tracker]: https://github.com/pybind/pybind11/issues
|
||||
[gitter]: https://gitter.im/pybind/Lobby
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
---
|
||||
name: Bug Report
|
||||
about: File an issue about a bug
|
||||
title: "[BUG] "
|
||||
---
|
||||
|
||||
|
||||
Make sure you've completed the following steps before submitting your issue -- thank you!
|
||||
|
||||
1. Make sure you've read the [documentation][]. Your issue may be addressed there.
|
||||
2. Search the [issue tracker][] to verify that this hasn't already been reported. +1 or comment there if it has.
|
||||
3. Consider asking first in the [Gitter chat room][].
|
||||
4. Include a self-contained and minimal piece of code that reproduces the problem. If that's not possible, try to make the description as clear as possible.
|
||||
a. If possible, make a PR with a new, failing test to give us a starting point to work on!
|
||||
|
||||
[documentation]: https://pybind11.readthedocs.io
|
||||
[issue tracker]: https://github.com/pybind/pybind11/issues
|
||||
[Gitter chat room]: https://gitter.im/pybind/Lobby
|
||||
|
||||
*After reading, remove this checklist and the template text in parentheses below.*
|
||||
|
||||
## Issue description
|
||||
|
||||
(Provide a short description, state the expected behavior and what actually happens.)
|
||||
|
||||
## Reproducible example code
|
||||
|
||||
(The code should be minimal, have no external dependencies, isolate the function(s) that cause breakage. Submit matched and complete C++ and Python snippets that can be easily compiled and run to diagnose the issue.)
|
|
@ -0,0 +1,45 @@
|
|||
name: Bug Report
|
||||
description: File an issue about a bug
|
||||
title: "[BUG]: "
|
||||
labels: [triage]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Maintainers will only make a best effort to triage PRs. Please do your best to make the issue as easy to act on as possible, and only open if clearly a problem with pybind11 (ask first if unsure).
|
||||
- type: checkboxes
|
||||
id: steps
|
||||
attributes:
|
||||
label: Required prerequisites
|
||||
description: Make sure you've completed the following steps before submitting your issue -- thank you!
|
||||
options:
|
||||
- label: Make sure you've read the [documentation](https://pybind11.readthedocs.io). Your issue may be addressed there.
|
||||
required: true
|
||||
- label: Search the [issue tracker](https://github.com/pybind/pybind11/issues) and [Discussions](https:/pybind/pybind11/discussions) to verify that this hasn't already been reported. +1 or comment there if it has.
|
||||
required: true
|
||||
- label: Consider asking first in the [Gitter chat room](https://gitter.im/pybind/Lobby) or in a [Discussion](https:/pybind/pybind11/discussions/new).
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Problem description
|
||||
placeholder: >-
|
||||
Provide a short description, state the expected behavior and what
|
||||
actually happens. Include relevant information like what version of
|
||||
pybind11 you are using, what system you are on, and any useful commands
|
||||
/ output.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: code
|
||||
attributes:
|
||||
label: Reproducible example code
|
||||
placeholder: >-
|
||||
The code should be minimal, have no external dependencies, isolate the
|
||||
function(s) that cause breakage. Submit matched and complete C++ and
|
||||
Python snippets that can be easily compiled and run to diagnose the
|
||||
issue. If possible, make a PR with a new, failing test to give us a
|
||||
starting point to work on!
|
||||
render: text
|
|
@ -1,5 +1,8 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Ask a question
|
||||
url: https://github.com/pybind/pybind11/discussions/new
|
||||
about: Please ask and answer questions here, or propose new ideas.
|
||||
- name: Gitter room
|
||||
url: https://gitter.im/pybind/Lobby
|
||||
about: A room for discussing pybind11 with an active community
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
name: Feature Request
|
||||
about: File an issue about adding a feature
|
||||
title: "[FEAT] "
|
||||
---
|
||||
|
||||
|
||||
Make sure you've completed the following steps before submitting your issue -- thank you!
|
||||
|
||||
1. Check if your feature has already been mentioned / rejected / planned in other issues.
|
||||
2. If those resources didn't help, consider asking in the [Gitter chat room][] to see if this is interesting / useful to a larger audience and possible to implement reasonably,
|
||||
4. If you have a useful feature that passes the previous items (or not suitable for chat), please fill in the details below.
|
||||
|
||||
[Gitter chat room]: https://gitter.im/pybind/Lobby
|
||||
|
||||
*After reading, remove this checklist.*
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
name: Question
|
||||
about: File an issue about unexplained behavior
|
||||
title: "[QUESTION] "
|
||||
---
|
||||
|
||||
If you have a question, please check the following first:
|
||||
|
||||
1. Check if your question has already been answered in the [FAQ][] section.
|
||||
2. Make sure you've read the [documentation][]. Your issue may be addressed there.
|
||||
3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room][]
|
||||
4. Search the [issue tracker][], including the closed issues, to see if your question has already been asked/answered. +1 or comment if it has been asked but has no answer.
|
||||
5. If you have a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below.
|
||||
6. Include a self-contained and minimal piece of code that illustrates your question. If that's not possible, try to make the description as clear as possible.
|
||||
|
||||
[FAQ]: http://pybind11.readthedocs.io/en/latest/faq.html
|
||||
[documentation]: https://pybind11.readthedocs.io
|
||||
[issue tracker]: https://github.com/pybind/pybind11/issues
|
||||
[Gitter chat room]: https://gitter.im/pybind/Lobby
|
||||
|
||||
*After reading, remove this checklist.*
|
|
@ -0,0 +1,16 @@
|
|||
version: 2
|
||||
updates:
|
||||
# Maintain dependencies for GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
ignore:
|
||||
# Official actions have moving tags like v1
|
||||
# that are used, so they don't need updates here
|
||||
- dependency-name: "actions/checkout"
|
||||
- dependency-name: "actions/setup-python"
|
||||
- dependency-name: "actions/cache"
|
||||
- dependency-name: "actions/upload-artifact"
|
||||
- dependency-name: "actions/download-artifact"
|
||||
- dependency-name: "actions/labeler"
|
|
@ -0,0 +1,8 @@
|
|||
docs:
|
||||
- any:
|
||||
- 'docs/**/*.rst'
|
||||
- '!docs/changelog.rst'
|
||||
- '!docs/upgrade.rst'
|
||||
|
||||
ci:
|
||||
- '.github/workflows/*.yml'
|
|
@ -0,0 +1,3 @@
|
|||
needs changelog:
|
||||
- all:
|
||||
- '!docs/changelog.rst'
|
|
@ -0,0 +1,19 @@
|
|||
<!--
|
||||
Title (above): please place [branch_name] at the beginning if you are targeting a branch other than master. *Do not target stable*.
|
||||
It is recommended to use conventional commit format, see conventionalcommits.org, but not required.
|
||||
-->
|
||||
## Description
|
||||
|
||||
<!-- Include relevant issues or PRs here, describe what changed and why -->
|
||||
|
||||
|
||||
## Suggested changelog entry:
|
||||
|
||||
<!-- Fill in the below block with the expected RestructuredText entry. Delete if no entry needed;
|
||||
but do not delete header or rst block if an entry is needed! Will be collected via a script. -->
|
||||
|
||||
```rst
|
||||
|
||||
```
|
||||
|
||||
<!-- If the upgrade guide needs updating, note that here too -->
|
|
@ -9,6 +9,13 @@ on:
|
|||
- stable
|
||||
- v*
|
||||
|
||||
concurrency:
|
||||
group: test-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
PIP_ONLY_BINARY: numpy
|
||||
|
||||
jobs:
|
||||
# This is the "main" test suite, which tests a large number of different
|
||||
# versions of default compilers and Python versions in GitHub Actions.
|
||||
|
@ -16,71 +23,42 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
runs-on: [ubuntu-latest, windows-latest, macos-latest]
|
||||
arch: [x64]
|
||||
runs-on: [ubuntu-latest, windows-2022, macos-latest]
|
||||
python:
|
||||
- 2.7
|
||||
- 3.5
|
||||
- 3.8
|
||||
- pypy2
|
||||
- pypy3
|
||||
- '2.7'
|
||||
- '3.5'
|
||||
- '3.6'
|
||||
- '3.9'
|
||||
- '3.10'
|
||||
- 'pypy-3.7-v7.3.7'
|
||||
- 'pypy-3.8-v7.3.7'
|
||||
|
||||
# Items in here will either be added to the build matrix (if not
|
||||
# present), or add new keys to an existing matrix element if all the
|
||||
# existing keys match.
|
||||
#
|
||||
# We support three optional keys: args (both build), args1 (first
|
||||
# build), and args2 (second build).
|
||||
# We support an optional key: args, for cmake args
|
||||
include:
|
||||
# Just add a key
|
||||
- runs-on: ubuntu-latest
|
||||
python: 3.6
|
||||
arch: x64
|
||||
python: '3.6'
|
||||
args: >
|
||||
-DPYBIND11_FINDPYTHON=ON
|
||||
- runs-on: windows-2016
|
||||
python: 3.7
|
||||
arch: x86
|
||||
args2: >
|
||||
-DCMAKE_CXX_FLAGS="/permissive- /EHsc /GR"
|
||||
-DCMAKE_CXX_FLAGS="-D_=1"
|
||||
- runs-on: windows-latest
|
||||
python: 3.6
|
||||
arch: x64
|
||||
python: '3.6'
|
||||
args: >
|
||||
-DPYBIND11_FINDPYTHON=ON
|
||||
- runs-on: windows-latest
|
||||
python: 3.7
|
||||
arch: x64
|
||||
|
||||
- runs-on: ubuntu-latest
|
||||
python: 3.9-dev
|
||||
arch: x64
|
||||
- runs-on: macos-latest
|
||||
python: 3.9-dev
|
||||
arch: x64
|
||||
args: >
|
||||
-DPYBIND11_FINDPYTHON=ON
|
||||
python: 'pypy-2.7'
|
||||
# Inject a couple Windows 2019 runs
|
||||
- runs-on: windows-2019
|
||||
python: '3.9'
|
||||
- runs-on: windows-2019
|
||||
python: '2.7'
|
||||
|
||||
# These items will be removed from the build matrix, keys must match.
|
||||
exclude:
|
||||
# Currently 32bit only, and we build 64bit
|
||||
- runs-on: windows-latest
|
||||
python: pypy2
|
||||
arch: x64
|
||||
- runs-on: windows-latest
|
||||
python: pypy3
|
||||
arch: x64
|
||||
|
||||
# Currently broken on embed_test
|
||||
- runs-on: windows-latest
|
||||
python: 3.8
|
||||
arch: x64
|
||||
- runs-on: windows-latest
|
||||
python: 3.9-dev
|
||||
arch: x64
|
||||
|
||||
name: "🐍 ${{ matrix.python }} • ${{ matrix.runs-on }} • ${{ matrix.arch }} ${{ matrix.args }}"
|
||||
name: "🐍 ${{ matrix.python }} • ${{ matrix.runs-on }} • x64 ${{ matrix.args }}"
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
continue-on-error: ${{ endsWith(matrix.python, 'dev') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
@ -89,13 +67,18 @@ jobs:
|
|||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
architecture: ${{ matrix.arch }}
|
||||
|
||||
- name: Setup Boost (Windows / Linux latest)
|
||||
run: echo "::set-env name=BOOST_ROOT::$BOOST_ROOT_1_72_0"
|
||||
- name: Setup Boost (Linux)
|
||||
# Can't use boost + define _
|
||||
if: runner.os == 'Linux' && matrix.python != '3.6'
|
||||
run: sudo apt-get install libboost-dev
|
||||
|
||||
- name: Setup Boost (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: brew install boost
|
||||
|
||||
- name: Update CMake
|
||||
uses: jwlawson/actions-setup-cmake@v1.3
|
||||
uses: jwlawson/actions-setup-cmake@v1.12
|
||||
|
||||
- name: Cache wheels
|
||||
if: runner.os == 'macOS'
|
||||
|
@ -106,10 +89,11 @@ jobs:
|
|||
# for ways to do this more generally
|
||||
path: ~/Library/Caches/pip
|
||||
# Look to see if there is a cache hit for the corresponding requirements file
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python }}-${{ matrix.arch }}-${{ hashFiles('tests/requirements.txt') }}
|
||||
key: ${{ runner.os }}-pip-${{ matrix.python }}-x64-${{ hashFiles('tests/requirements.txt') }}
|
||||
|
||||
- name: Prepare env
|
||||
run: python -m pip install -r tests/requirements.txt --prefer-binary
|
||||
run: |
|
||||
python -m pip install -r tests/requirements.txt
|
||||
|
||||
- name: Setup annotations on Linux
|
||||
if: runner.os == 'Linux'
|
||||
|
@ -132,6 +116,8 @@ jobs:
|
|||
run: cmake --build . --target pytest -j 2
|
||||
|
||||
- name: C++11 tests
|
||||
# TODO: Figure out how to load the DLL on Python 3.8+
|
||||
if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11-dev' || matrix.python == 'pypy-3.8'))"
|
||||
run: cmake --build . --target cpptest -j 2
|
||||
|
||||
- name: Interface test C++11
|
||||
|
@ -141,7 +127,7 @@ jobs:
|
|||
run: git clean -fdx
|
||||
|
||||
# Second build - C++17 mode and in a build directory
|
||||
- name: Configure ${{ matrix.args2 }}
|
||||
- name: Configure C++17
|
||||
run: >
|
||||
cmake -S . -B build2
|
||||
-DPYBIND11_WERROR=ON
|
||||
|
@ -149,7 +135,6 @@ jobs:
|
|||
-DDOWNLOAD_EIGEN=ON
|
||||
-DCMAKE_CXX_STANDARD=17
|
||||
${{ matrix.args }}
|
||||
${{ matrix.args2 }}
|
||||
|
||||
- name: Build
|
||||
run: cmake --build build2 -j 2
|
||||
|
@ -158,8 +143,28 @@ jobs:
|
|||
run: cmake --build build2 --target pytest
|
||||
|
||||
- name: C++ tests
|
||||
# TODO: Figure out how to load the DLL on Python 3.8+
|
||||
if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11-dev' || matrix.python == 'pypy-3.8'))"
|
||||
run: cmake --build build2 --target cpptest
|
||||
|
||||
# Third build - C++17 mode with unstable ABI
|
||||
- name: Configure (unstable ABI)
|
||||
run: >
|
||||
cmake -S . -B build3
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
-DCMAKE_CXX_STANDARD=17
|
||||
-DPYBIND11_INTERNALS_VERSION=10000000
|
||||
"-DPYBIND11_TEST_OVERRIDE=test_call_policies.cpp;test_gil_scoped.cpp;test_thread.cpp"
|
||||
${{ matrix.args }}
|
||||
|
||||
- name: Build (unstable ABI)
|
||||
run: cmake --build build3 -j 2
|
||||
|
||||
- name: Python tests (unstable ABI)
|
||||
run: cmake --build build3 --target pytest
|
||||
|
||||
- name: Interface test
|
||||
run: cmake --build build2 --target test_cmake_build
|
||||
|
||||
|
@ -167,21 +172,105 @@ jobs:
|
|||
# MSVC, but for now, this action works:
|
||||
- name: Prepare compiler environment for Windows 🐍 2.7
|
||||
if: matrix.python == 2.7 && runner.os == 'Windows'
|
||||
uses: ilammy/msvc-dev-cmd@v1
|
||||
uses: ilammy/msvc-dev-cmd@v1.10.0
|
||||
with:
|
||||
arch: x64
|
||||
|
||||
# This makes two environment variables available in the following step(s)
|
||||
- name: Set Windows 🐍 2.7 environment variables
|
||||
if: matrix.python == 2.7 && runner.os == 'Windows'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::set-env name=DISTUTILS_USE_SDK::1"
|
||||
echo "::set-env name=MSSdk::1"
|
||||
echo "DISTUTILS_USE_SDK=1" >> $GITHUB_ENV
|
||||
echo "MSSdk=1" >> $GITHUB_ENV
|
||||
|
||||
# This makes sure the setup_helpers module can build packages using
|
||||
# setuptools
|
||||
- name: Setuptools helpers test
|
||||
run: pytest tests/extra_setuptools
|
||||
if: "!(matrix.python == '3.5' && matrix.runs-on == 'windows-2022')"
|
||||
|
||||
|
||||
deadsnakes:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# TODO: Fails on 3.10, investigate
|
||||
- python-version: "3.9"
|
||||
python-debug: true
|
||||
valgrind: true
|
||||
# - python-version: "3.11-dev"
|
||||
# python-debug: false
|
||||
|
||||
name: "🐍 ${{ matrix.python-version }}${{ matrix.python-debug && '-dbg' || '' }} (deadsnakes)${{ matrix.valgrind && ' • Valgrind' || '' }} • x64"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Python ${{ matrix.python-version }} (deadsnakes)
|
||||
uses: deadsnakes/action@v2.1.1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
debug: ${{ matrix.python-debug }}
|
||||
|
||||
- name: Update CMake
|
||||
uses: jwlawson/actions-setup-cmake@v1.12
|
||||
|
||||
- name: Valgrind cache
|
||||
if: matrix.valgrind
|
||||
uses: actions/cache@v2
|
||||
id: cache-valgrind
|
||||
with:
|
||||
path: valgrind
|
||||
key: 3.16.1 # Valgrind version
|
||||
|
||||
- name: Compile Valgrind
|
||||
if: matrix.valgrind && steps.cache-valgrind.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
VALGRIND_VERSION=3.16.1
|
||||
curl https://sourceware.org/pub/valgrind/valgrind-$VALGRIND_VERSION.tar.bz2 -o - | tar xj
|
||||
mv valgrind-$VALGRIND_VERSION valgrind
|
||||
cd valgrind
|
||||
./configure
|
||||
make -j 2 > /dev/null
|
||||
|
||||
- name: Install Valgrind
|
||||
if: matrix.valgrind
|
||||
working-directory: valgrind
|
||||
run: |
|
||||
sudo make install
|
||||
sudo apt-get update
|
||||
sudo apt-get install libc6-dbg # Needed by Valgrind
|
||||
|
||||
- name: Prepare env
|
||||
run: |
|
||||
python -m pip install -r tests/requirements.txt
|
||||
|
||||
- name: Configure
|
||||
env:
|
||||
SETUPTOOLS_USE_DISTUTILS: stdlib
|
||||
run: >
|
||||
cmake -S . -B build
|
||||
-DCMAKE_BUILD_TYPE=Debug
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
-DCMAKE_CXX_STANDARD=17
|
||||
|
||||
- name: Build
|
||||
run: cmake --build build -j 2
|
||||
|
||||
- name: Python tests
|
||||
run: cmake --build build --target pytest
|
||||
|
||||
- name: C++ tests
|
||||
run: cmake --build build --target cpptest
|
||||
|
||||
- name: Run Valgrind on Python tests
|
||||
if: matrix.valgrind
|
||||
run: cmake --build build --target memcheck
|
||||
|
||||
|
||||
# Testing on clang using the excellent silkeh clang docker images
|
||||
|
@ -194,12 +283,20 @@ jobs:
|
|||
- 3.6
|
||||
- 3.7
|
||||
- 3.9
|
||||
- 5
|
||||
- 7
|
||||
- 9
|
||||
- dev
|
||||
std:
|
||||
- 11
|
||||
include:
|
||||
- clang: 5
|
||||
std: 14
|
||||
- clang: 10
|
||||
std: 20
|
||||
- clang: 10
|
||||
std: 17
|
||||
|
||||
name: "🐍 3 • Clang ${{ matrix.clang }} • x64"
|
||||
name: "🐍 3 • Clang ${{ matrix.clang }} • C++${{ matrix.std }} • x64"
|
||||
container: "silkeh/clang:${{ matrix.clang }}"
|
||||
|
||||
steps:
|
||||
|
@ -214,6 +311,7 @@ jobs:
|
|||
cmake -S . -B build
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DCMAKE_CXX_STANDARD=${{ matrix.std }}
|
||||
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
|
||||
|
||||
- name: Build
|
||||
|
@ -252,50 +350,54 @@ jobs:
|
|||
run: cmake --build build --target pytest
|
||||
|
||||
|
||||
# Testing CentOS 8 + PGI compilers
|
||||
centos-nvhpc8:
|
||||
runs-on: ubuntu-latest
|
||||
name: "🐍 3 • CentOS8 / PGI 20.7 • x64"
|
||||
container: centos:8
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Add Python 3 and a few requirements
|
||||
run: yum update -y && yum install -y git python3-devel python3-numpy python3-pytest make environment-modules
|
||||
|
||||
- name: Install CMake with pip
|
||||
run: |
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install cmake --prefer-binary
|
||||
|
||||
- name: Install NVidia HPC SDK
|
||||
run: yum -y install https://developer.download.nvidia.com/hpc-sdk/nvhpc-20-7-20.7-1.x86_64.rpm https://developer.download.nvidia.com/hpc-sdk/nvhpc-2020-20.7-1.x86_64.rpm
|
||||
|
||||
- name: Configure
|
||||
shell: bash
|
||||
run: |
|
||||
source /etc/profile.d/modules.sh
|
||||
module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.7
|
||||
cmake -S . -B build -DDOWNLOAD_CATCH=ON -DCMAKE_CXX_STANDARD=14 -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
|
||||
|
||||
- name: Build
|
||||
run: cmake --build build -j 2 --verbose
|
||||
|
||||
- name: Python tests
|
||||
run: cmake --build build --target pytest
|
||||
|
||||
- name: C++ tests
|
||||
run: cmake --build build --target cpptest
|
||||
|
||||
- name: Interface test
|
||||
run: cmake --build build --target test_cmake_build
|
||||
# TODO: Internal compiler error - report to NVidia
|
||||
# # Testing CentOS 8 + PGI compilers
|
||||
# centos-nvhpc8:
|
||||
# runs-on: ubuntu-latest
|
||||
# name: "🐍 3 • CentOS8 / PGI 20.11 • x64"
|
||||
# container: centos:8
|
||||
#
|
||||
# steps:
|
||||
# - uses: actions/checkout@v2
|
||||
#
|
||||
# - name: Add Python 3 and a few requirements
|
||||
# run: yum update -y && yum install -y git python3-devel python3-numpy python3-pytest make environment-modules
|
||||
#
|
||||
# - name: Install CMake with pip
|
||||
# run: |
|
||||
# python3 -m pip install --upgrade pip
|
||||
# python3 -m pip install cmake --prefer-binary
|
||||
#
|
||||
# - name: Install NVidia HPC SDK
|
||||
# run: >
|
||||
# yum -y install
|
||||
# https://developer.download.nvidia.com/hpc-sdk/20.11/nvhpc-20-11-20.11-1.x86_64.rpm
|
||||
# https://developer.download.nvidia.com/hpc-sdk/20.11/nvhpc-2020-20.11-1.x86_64.rpm
|
||||
#
|
||||
# - name: Configure
|
||||
# shell: bash
|
||||
# run: |
|
||||
# source /etc/profile.d/modules.sh
|
||||
# module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.11
|
||||
# cmake -S . -B build -DDOWNLOAD_CATCH=ON -DCMAKE_CXX_STANDARD=14 -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
|
||||
#
|
||||
# - name: Build
|
||||
# run: cmake --build build -j 2 --verbose
|
||||
#
|
||||
# - name: Python tests
|
||||
# run: cmake --build build --target pytest
|
||||
#
|
||||
# - name: C++ tests
|
||||
# run: cmake --build build --target cpptest
|
||||
#
|
||||
# - name: Interface test
|
||||
# run: cmake --build build --target test_cmake_build
|
||||
|
||||
|
||||
# Testing on CentOS 7 + PGI compilers, which seems to require more workarounds
|
||||
centos-nvhpc7:
|
||||
runs-on: ubuntu-latest
|
||||
name: "🐍 3 • CentOS7 / PGI 20.7 • x64"
|
||||
name: "🐍 3 • CentOS7 / PGI 20.9 • x64"
|
||||
container: centos:7
|
||||
|
||||
steps:
|
||||
|
@ -305,17 +407,17 @@ jobs:
|
|||
run: yum update -y && yum install -y epel-release && yum install -y git python3-devel make environment-modules cmake3
|
||||
|
||||
- name: Install NVidia HPC SDK
|
||||
run: yum -y install https://developer.download.nvidia.com/hpc-sdk/nvhpc-20-7-20.7-1.x86_64.rpm https://developer.download.nvidia.com/hpc-sdk/nvhpc-2020-20.7-1.x86_64.rpm
|
||||
run: yum -y install https://developer.download.nvidia.com/hpc-sdk/20.9/nvhpc-20-9-20.9-1.x86_64.rpm https://developer.download.nvidia.com/hpc-sdk/20.9/nvhpc-2020-20.9-1.x86_64.rpm
|
||||
|
||||
# On CentOS 7, we have to filter a few tests (compiler internal error)
|
||||
# and allow deeper templete recursion (not needed on CentOS 8 with a newer
|
||||
# and allow deeper template recursion (not needed on CentOS 8 with a newer
|
||||
# standard library). On some systems, you many need further workarounds:
|
||||
# https://github.com/pybind/pybind11/pull/2475
|
||||
- name: Configure
|
||||
shell: bash
|
||||
run: |
|
||||
source /etc/profile.d/modules.sh
|
||||
module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.7
|
||||
module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.9
|
||||
cmake3 -S . -B build -DDOWNLOAD_CATCH=ON \
|
||||
-DCMAKE_CXX_STANDARD=11 \
|
||||
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") \
|
||||
|
@ -340,6 +442,7 @@ jobs:
|
|||
- name: Interface test
|
||||
run: cmake3 --build build --target test_cmake_build
|
||||
|
||||
|
||||
# Testing on GCC using the GCC docker images (only recent images supported)
|
||||
gcc:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -349,8 +452,13 @@ jobs:
|
|||
gcc:
|
||||
- 7
|
||||
- latest
|
||||
std:
|
||||
- 11
|
||||
include:
|
||||
- gcc: 10
|
||||
std: 20
|
||||
|
||||
name: "🐍 3 • GCC ${{ matrix.gcc }} • x64"
|
||||
name: "🐍 3 • GCC ${{ matrix.gcc }} • C++${{ matrix.std }}• x64"
|
||||
container: "gcc:${{ matrix.gcc }}"
|
||||
|
||||
steps:
|
||||
|
@ -362,10 +470,8 @@ jobs:
|
|||
- name: Update pip
|
||||
run: python3 -m pip install --upgrade pip
|
||||
|
||||
- name: Setup CMake 3.18
|
||||
uses: jwlawson/actions-setup-cmake@v1.3
|
||||
with:
|
||||
cmake-version: 3.18
|
||||
- name: Update CMake
|
||||
uses: jwlawson/actions-setup-cmake@v1.12
|
||||
|
||||
- name: Configure
|
||||
shell: bash
|
||||
|
@ -373,7 +479,7 @@ jobs:
|
|||
cmake -S . -B build
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DCMAKE_CXX_STANDARD=11
|
||||
-DCMAKE_CXX_STANDARD=${{ matrix.std }}
|
||||
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
|
||||
|
||||
- name: Build
|
||||
|
@ -389,6 +495,103 @@ jobs:
|
|||
run: cmake --build build --target test_cmake_build
|
||||
|
||||
|
||||
# Testing on ICC using the oneAPI apt repo
|
||||
icc:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
name: "🐍 3 • ICC latest • x64"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Add apt repo
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y wget build-essential pkg-config cmake ca-certificates gnupg
|
||||
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
|
||||
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
|
||||
echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list
|
||||
|
||||
- name: Add ICC & Python 3
|
||||
run: sudo apt-get update; sudo apt-get install -y intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic cmake python3-dev python3-numpy python3-pytest python3-pip
|
||||
|
||||
- name: Update pip
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
python3 -m pip install --upgrade pip
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
python3 -m pip install -r tests/requirements.txt
|
||||
|
||||
- name: Configure C++11
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
cmake -S . -B build-11 \
|
||||
-DPYBIND11_WERROR=ON \
|
||||
-DDOWNLOAD_CATCH=ON \
|
||||
-DDOWNLOAD_EIGEN=OFF \
|
||||
-DCMAKE_CXX_STANDARD=11 \
|
||||
-DCMAKE_CXX_COMPILER=$(which icpc) \
|
||||
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
|
||||
|
||||
- name: Build C++11
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
cmake --build build-11 -j 2 -v
|
||||
|
||||
- name: Python tests C++11
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
sudo service apport stop
|
||||
cmake --build build-11 --target check
|
||||
|
||||
- name: C++ tests C++11
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
cmake --build build-11 --target cpptest
|
||||
|
||||
- name: Interface test C++11
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
cmake --build build-11 --target test_cmake_build
|
||||
|
||||
- name: Configure C++17
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
cmake -S . -B build-17 \
|
||||
-DPYBIND11_WERROR=ON \
|
||||
-DDOWNLOAD_CATCH=ON \
|
||||
-DDOWNLOAD_EIGEN=OFF \
|
||||
-DCMAKE_CXX_STANDARD=17 \
|
||||
-DCMAKE_CXX_COMPILER=$(which icpc) \
|
||||
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
|
||||
|
||||
- name: Build C++17
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
cmake --build build-17 -j 2 -v
|
||||
|
||||
- name: Python tests C++17
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
sudo service apport stop
|
||||
cmake --build build-17 --target check
|
||||
|
||||
- name: C++ tests C++17
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
cmake --build build-17 --target cpptest
|
||||
|
||||
- name: Interface test C++17
|
||||
run: |
|
||||
set +e; source /opt/intel/oneapi/setvars.sh; set -e
|
||||
cmake --build build-17 --target test_cmake_build
|
||||
|
||||
|
||||
# Testing on CentOS (manylinux uses a centos base, and this is an easy way
|
||||
# to get GCC 4.8, which is the manylinux1 compiler).
|
||||
centos:
|
||||
|
@ -397,11 +600,11 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
centos:
|
||||
- 7 # GCC 4.8
|
||||
- 8
|
||||
- centos7 # GCC 4.8
|
||||
- stream8
|
||||
|
||||
name: "🐍 3 • CentOS ${{ matrix.centos }} • x64"
|
||||
container: "centos:${{ matrix.centos }}"
|
||||
container: "quay.io/centos/centos:${{ matrix.centos }}"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
@ -413,12 +616,14 @@ jobs:
|
|||
run: python3 -m pip install --upgrade pip
|
||||
|
||||
- name: Install dependencies
|
||||
run: python3 -m pip install cmake -r tests/requirements.txt --prefer-binary
|
||||
run: |
|
||||
python3 -m pip install cmake -r tests/requirements.txt
|
||||
|
||||
- name: Configure
|
||||
shell: bash
|
||||
run: >
|
||||
cmake -S . -B build
|
||||
-DCMAKE_BUILD_TYPE=MinSizeRel
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
|
@ -476,7 +681,7 @@ jobs:
|
|||
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
|
||||
working-directory: /build-tests
|
||||
|
||||
- name: Run tests
|
||||
- name: Python tests
|
||||
run: make pytest -j 2
|
||||
working-directory: /build-tests
|
||||
|
||||
|
@ -493,16 +698,13 @@ jobs:
|
|||
- uses: actions/setup-python@v2
|
||||
|
||||
- name: Install Doxygen
|
||||
run: sudo apt install -y doxygen
|
||||
|
||||
- name: Install docs & setup requirements
|
||||
run: python3 -m pip install -r docs/requirements.txt
|
||||
run: sudo apt-get install -y doxygen librsvg2-bin # Changed to rsvg-convert in 20.04
|
||||
|
||||
- name: Build docs
|
||||
run: python3 -m sphinx -W -b html docs docs/.build
|
||||
run: pipx run nox -s docs
|
||||
|
||||
- name: Make SDist
|
||||
run: python3 setup.py sdist
|
||||
run: pipx run nox -s build -- --sdist
|
||||
|
||||
- run: git status --ignored
|
||||
|
||||
|
@ -514,6 +716,250 @@ jobs:
|
|||
- name: Compare Dists (headers only)
|
||||
working-directory: include
|
||||
run: |
|
||||
python3 -m pip install --user -U ../dist/*
|
||||
python3 -m pip install --user -U ../dist/*.tar.gz
|
||||
installed=$(python3 -c "import pybind11; print(pybind11.get_include() + '/pybind11')")
|
||||
diff -rq $installed ./pybind11
|
||||
|
||||
win32:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python:
|
||||
- 3.5
|
||||
- 3.6
|
||||
- 3.7
|
||||
- 3.8
|
||||
- 3.9
|
||||
- pypy-3.6
|
||||
|
||||
include:
|
||||
- python: 3.9
|
||||
args: -DCMAKE_CXX_STANDARD=20 -DDOWNLOAD_EIGEN=OFF
|
||||
- python: 3.8
|
||||
args: -DCMAKE_CXX_STANDARD=17
|
||||
|
||||
name: "🐍 ${{ matrix.python }} • MSVC 2019 • x86 ${{ matrix.args }}"
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Python ${{ matrix.python }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
architecture: x86
|
||||
|
||||
- name: Update CMake
|
||||
uses: jwlawson/actions-setup-cmake@v1.12
|
||||
|
||||
- name: Prepare MSVC
|
||||
uses: ilammy/msvc-dev-cmd@v1.10.0
|
||||
with:
|
||||
arch: x86
|
||||
|
||||
- name: Prepare env
|
||||
run: |
|
||||
python -m pip install -r tests/requirements.txt
|
||||
|
||||
# First build - C++11 mode and inplace
|
||||
- name: Configure ${{ matrix.args }}
|
||||
run: >
|
||||
cmake -S . -B build
|
||||
-G "Visual Studio 16 2019" -A Win32
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
${{ matrix.args }}
|
||||
- name: Build C++11
|
||||
run: cmake --build build -j 2
|
||||
|
||||
- name: Python tests
|
||||
run: cmake --build build -t pytest
|
||||
|
||||
win32-msvc2015:
|
||||
name: "🐍 ${{ matrix.python }} • MSVC 2015 • x64"
|
||||
runs-on: windows-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python:
|
||||
- 2.7
|
||||
- 3.6
|
||||
- 3.7
|
||||
# todo: check/cpptest does not support 3.8+ yet
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup 🐍 ${{ matrix.python }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
|
||||
- name: Update CMake
|
||||
uses: jwlawson/actions-setup-cmake@v1.12
|
||||
|
||||
- name: Prepare MSVC
|
||||
uses: ilammy/msvc-dev-cmd@v1.10.0
|
||||
with:
|
||||
toolset: 14.0
|
||||
|
||||
- name: Prepare env
|
||||
run: |
|
||||
python -m pip install -r tests/requirements.txt
|
||||
|
||||
# First build - C++11 mode and inplace
|
||||
- name: Configure
|
||||
run: >
|
||||
cmake -S . -B build
|
||||
-G "Visual Studio 14 2015" -A x64
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
|
||||
- name: Build C++14
|
||||
run: cmake --build build -j 2
|
||||
|
||||
- name: Run all checks
|
||||
run: cmake --build build -t check
|
||||
|
||||
|
||||
win32-msvc2017:
|
||||
name: "🐍 ${{ matrix.python }} • MSVC 2017 • x64"
|
||||
runs-on: windows-2016
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python:
|
||||
- 2.7
|
||||
- 3.5
|
||||
- 3.7
|
||||
std:
|
||||
- 14
|
||||
|
||||
include:
|
||||
- python: 2.7
|
||||
std: 17
|
||||
args: >
|
||||
-DCMAKE_CXX_FLAGS="/permissive- /EHsc /GR"
|
||||
- python: 3.7
|
||||
std: 17
|
||||
args: >
|
||||
-DCMAKE_CXX_FLAGS="/permissive- /EHsc /GR"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup 🐍 ${{ matrix.python }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
|
||||
- name: Update CMake
|
||||
uses: jwlawson/actions-setup-cmake@v1.12
|
||||
|
||||
- name: Prepare env
|
||||
run: |
|
||||
python -m pip install -r tests/requirements.txt
|
||||
|
||||
# First build - C++11 mode and inplace
|
||||
- name: Configure
|
||||
run: >
|
||||
cmake -S . -B build
|
||||
-G "Visual Studio 15 2017" -A x64
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
-DCMAKE_CXX_STANDARD=${{ matrix.std }}
|
||||
${{ matrix.args }}
|
||||
|
||||
- name: Build ${{ matrix.std }}
|
||||
run: cmake --build build -j 2
|
||||
|
||||
- name: Run all checks
|
||||
run: cmake --build build -t check
|
||||
|
||||
mingw:
|
||||
name: "🐍 3 • windows-latest • ${{ matrix.sys }}"
|
||||
runs-on: windows-latest
|
||||
defaults:
|
||||
run:
|
||||
shell: msys2 {0}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- { sys: mingw64, env: x86_64 }
|
||||
- { sys: mingw32, env: i686 }
|
||||
steps:
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: ${{matrix.sys}}
|
||||
install: >-
|
||||
git
|
||||
mingw-w64-${{matrix.env}}-gcc
|
||||
mingw-w64-${{matrix.env}}-python-pip
|
||||
mingw-w64-${{matrix.env}}-python-numpy
|
||||
mingw-w64-${{matrix.env}}-python-scipy
|
||||
mingw-w64-${{matrix.env}}-cmake
|
||||
mingw-w64-${{matrix.env}}-make
|
||||
mingw-w64-${{matrix.env}}-python-pytest
|
||||
mingw-w64-${{matrix.env}}-eigen3
|
||||
mingw-w64-${{matrix.env}}-boost
|
||||
mingw-w64-${{matrix.env}}-catch
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Configure C++11
|
||||
# LTO leads to many undefined reference like
|
||||
# `pybind11::detail::function_call::function_call(pybind11::detail::function_call&&)
|
||||
run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=11 -S . -B build
|
||||
|
||||
- name: Build C++11
|
||||
run: cmake --build build -j 2
|
||||
|
||||
- name: Python tests C++11
|
||||
run: cmake --build build --target pytest -j 2
|
||||
|
||||
- name: C++11 tests
|
||||
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build --target cpptest -j 2
|
||||
|
||||
- name: Interface test C++11
|
||||
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build --target test_cmake_build
|
||||
|
||||
- name: Clean directory
|
||||
run: git clean -fdx
|
||||
|
||||
- name: Configure C++14
|
||||
run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=14 -S . -B build2
|
||||
|
||||
- name: Build C++14
|
||||
run: cmake --build build2 -j 2
|
||||
|
||||
- name: Python tests C++14
|
||||
run: cmake --build build2 --target pytest -j 2
|
||||
|
||||
- name: C++14 tests
|
||||
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build2 --target cpptest -j 2
|
||||
|
||||
- name: Interface test C++14
|
||||
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build2 --target test_cmake_build
|
||||
|
||||
- name: Clean directory
|
||||
run: git clean -fdx
|
||||
|
||||
- name: Configure C++17
|
||||
run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=17 -S . -B build3
|
||||
|
||||
- name: Build C++17
|
||||
run: cmake --build build3 -j 2
|
||||
|
||||
- name: Python tests C++17
|
||||
run: cmake --build build3 --target pytest -j 2
|
||||
|
||||
- name: C++17 tests
|
||||
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build3 --target cpptest -j 2
|
||||
|
||||
- name: Interface test C++17
|
||||
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build3 --target test_cmake_build
|
||||
|
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
matrix:
|
||||
runs-on: [ubuntu-latest, macos-latest, windows-latest]
|
||||
arch: [x64]
|
||||
cmake: [3.18]
|
||||
cmake: ["3.21"]
|
||||
|
||||
include:
|
||||
- runs-on: ubuntu-latest
|
||||
|
@ -55,7 +55,7 @@ jobs:
|
|||
# An action for adding a specific version of CMake:
|
||||
# https://github.com/jwlawson/actions-setup-cmake
|
||||
- name: Setup CMake ${{ matrix.cmake }}
|
||||
uses: jwlawson/actions-setup-cmake@v1.3
|
||||
uses: jwlawson/actions-setup-cmake@v1.12
|
||||
with:
|
||||
cmake-version: ${{ matrix.cmake }}
|
||||
|
||||
|
@ -82,57 +82,3 @@ jobs:
|
|||
working-directory: build dir
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: cmake --build . --config Release --target check
|
||||
|
||||
# This builds the sdists and wheels and makes sure the files are exactly as
|
||||
# expected. Using Windows and Python 2.7, since that is often the most
|
||||
# challenging matrix element.
|
||||
test-packaging:
|
||||
name: 🐍 2.7 • 📦 tests • windows-latest
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup 🐍 2.7
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 2.7
|
||||
|
||||
- name: Prepare env
|
||||
run: python -m pip install -r tests/requirements.txt --prefer-binary
|
||||
|
||||
- name: Python Packaging tests
|
||||
run: pytest tests/extra_python_package/
|
||||
|
||||
|
||||
# This runs the packaging tests and also builds and saves the packages as
|
||||
# artifacts.
|
||||
packaging:
|
||||
name: 🐍 3.8 • 📦 & 📦 tests • ubuntu-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup 🐍 3.8
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Prepare env
|
||||
run: python -m pip install -r tests/requirements.txt build twine --prefer-binary
|
||||
|
||||
- name: Python Packaging tests
|
||||
run: pytest tests/extra_python_package/
|
||||
|
||||
- name: Build SDist and wheels
|
||||
run: |
|
||||
python -m build -s -w .
|
||||
PYBIND11_GLOBAL_SDIST=1 python -m build -s -w .
|
||||
|
||||
- name: Check metadata
|
||||
run: twine check dist/*
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
path: dist/*
|
||||
|
|
|
@ -19,15 +19,17 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: pre-commit/action@v2.0.0
|
||||
- uses: pre-commit/action@v2.0.3
|
||||
with:
|
||||
# Slow hooks are marked with manual - slow is okay here, run them too
|
||||
extra_args: --hook-stage manual
|
||||
extra_args: --hook-stage manual --all-files
|
||||
|
||||
clang-tidy:
|
||||
# When making changes here, please also review the "Clang-Tidy" section
|
||||
# in .github/CONTRIBUTING.md and update as needed.
|
||||
name: Clang-Tidy
|
||||
runs-on: ubuntu-latest
|
||||
container: silkeh/clang:10
|
||||
container: silkeh/clang:12
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
|
@ -35,7 +37,12 @@ jobs:
|
|||
run: apt-get update && apt-get install -y python3-dev python3-pytest
|
||||
|
||||
- name: Configure
|
||||
run: cmake -S . -B build -DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy);--warnings-as-errors=*"
|
||||
run: >
|
||||
cmake -S . -B build
|
||||
-DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy)"
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DCMAKE_CXX_STANDARD=17
|
||||
|
||||
- name: Build
|
||||
run: cmake --build build -j 2
|
||||
run: cmake --build build -j 2 -- --keep-going
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
name: Labeler
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
label:
|
||||
name: Labeler
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/labeler@main
|
||||
if: github.event.pull_request.merged == true
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
configuration-path: .github/labeler_merged.yml
|
|
@ -0,0 +1,108 @@
|
|||
name: Pip
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- stable
|
||||
- v*
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
env:
|
||||
PIP_ONLY_BINARY: numpy
|
||||
|
||||
jobs:
|
||||
# This builds the sdists and wheels and makes sure the files are exactly as
|
||||
# expected. Using Windows and Python 2.7, since that is often the most
|
||||
# challenging matrix element.
|
||||
test-packaging:
|
||||
name: 🐍 2.7 • 📦 tests • windows-latest
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup 🐍 2.7
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 2.7
|
||||
|
||||
- name: Prepare env
|
||||
run: |
|
||||
python -m pip install -r tests/requirements.txt
|
||||
|
||||
- name: Python Packaging tests
|
||||
run: pytest tests/extra_python_package/
|
||||
|
||||
|
||||
# This runs the packaging tests and also builds and saves the packages as
|
||||
# artifacts.
|
||||
packaging:
|
||||
name: 🐍 3.8 • 📦 & 📦 tests • ubuntu-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup 🐍 3.8
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Prepare env
|
||||
run: |
|
||||
python -m pip install -r tests/requirements.txt build twine
|
||||
|
||||
- name: Python Packaging tests
|
||||
run: pytest tests/extra_python_package/
|
||||
|
||||
- name: Build SDist and wheels
|
||||
run: |
|
||||
python -m build
|
||||
PYBIND11_GLOBAL_SDIST=1 python -m build
|
||||
|
||||
- name: Check metadata
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Save standard package
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: standard
|
||||
path: dist/pybind11-*
|
||||
|
||||
- name: Save global package
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: global
|
||||
path: dist/pybind11_global-*
|
||||
|
||||
|
||||
|
||||
# When a GitHub release is made, upload the artifacts to PyPI
|
||||
upload:
|
||||
name: Upload to PyPI
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'release' && github.event.action == 'published'
|
||||
needs: [packaging]
|
||||
|
||||
steps:
|
||||
- uses: actions/setup-python@v2
|
||||
|
||||
# Downloads all to directories matching the artifact names
|
||||
- uses: actions/download-artifact@v2
|
||||
|
||||
- name: Publish standard package
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.0
|
||||
with:
|
||||
password: ${{ secrets.pypi_password }}
|
||||
packages_dir: standard/
|
||||
|
||||
- name: Publish global package
|
||||
uses: pypa/gh-action-pypi-publish@v1.5.0
|
||||
with:
|
||||
password: ${{ secrets.pypi_password_global }}
|
||||
packages_dir: global/
|
|
@ -0,0 +1,112 @@
|
|||
|
||||
name: Upstream
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: upstream-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
PIP_ONLY_BINARY: numpy
|
||||
|
||||
jobs:
|
||||
standard:
|
||||
name: "🐍 3.11 dev • ubuntu-latest • x64"
|
||||
runs-on: ubuntu-latest
|
||||
if: "contains(github.event.pull_request.labels.*.name, 'python dev')"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Python 3.11
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.11-dev"
|
||||
|
||||
- name: Setup Boost (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: sudo apt-get install libboost-dev
|
||||
|
||||
- name: Update CMake
|
||||
uses: jwlawson/actions-setup-cmake@v1.12
|
||||
|
||||
- name: Prepare env
|
||||
run: |
|
||||
python -m pip install -r tests/requirements.txt
|
||||
|
||||
- name: Setup annotations on Linux
|
||||
if: runner.os == 'Linux'
|
||||
run: python -m pip install pytest-github-actions-annotate-failures
|
||||
|
||||
# First build - C++11 mode and inplace
|
||||
- name: Configure C++11
|
||||
run: >
|
||||
cmake -S . -B .
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
-DCMAKE_CXX_STANDARD=11
|
||||
|
||||
- name: Build C++11
|
||||
run: cmake --build . -j 2
|
||||
|
||||
- name: Python tests C++11
|
||||
run: cmake --build . --target pytest -j 2
|
||||
|
||||
- name: C++11 tests
|
||||
run: cmake --build . --target cpptest -j 2
|
||||
|
||||
- name: Interface test C++11
|
||||
run: cmake --build . --target test_cmake_build
|
||||
|
||||
- name: Clean directory
|
||||
run: git clean -fdx
|
||||
|
||||
# Second build - C++17 mode and in a build directory
|
||||
- name: Configure C++17
|
||||
run: >
|
||||
cmake -S . -B build2
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
-DCMAKE_CXX_STANDARD=17
|
||||
${{ matrix.args }}
|
||||
${{ matrix.args2 }}
|
||||
|
||||
- name: Build
|
||||
run: cmake --build build2 -j 2
|
||||
|
||||
- name: Python tests
|
||||
run: cmake --build build2 --target pytest
|
||||
|
||||
- name: C++ tests
|
||||
run: cmake --build build2 --target cpptest
|
||||
|
||||
# Third build - C++17 mode with unstable ABI
|
||||
- name: Configure (unstable ABI)
|
||||
run: >
|
||||
cmake -S . -B build3
|
||||
-DPYBIND11_WERROR=ON
|
||||
-DDOWNLOAD_CATCH=ON
|
||||
-DDOWNLOAD_EIGEN=ON
|
||||
-DCMAKE_CXX_STANDARD=17
|
||||
-DPYBIND11_INTERNALS_VERSION=10000000
|
||||
"-DPYBIND11_TEST_OVERRIDE=test_call_policies.cpp;test_gil_scoped.cpp;test_thread.cpp"
|
||||
${{ matrix.args }}
|
||||
|
||||
- name: Build (unstable ABI)
|
||||
run: cmake --build build3 -j 2
|
||||
|
||||
- name: Python tests (unstable ABI)
|
||||
run: cmake --build build3 --target pytest
|
||||
|
||||
- name: Interface test
|
||||
run: cmake --build build2 --target test_cmake_build
|
||||
|
||||
# This makes sure the setup_helpers module can build packages using
|
||||
# setuptools
|
||||
- name: Setuptools helpers test
|
||||
run: pytest tests/extra_setuptools
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue