Merge branch 'develop' of github.com:borglab/gtsam into ta-add-methods

release/4.3a0
Akshay Krishnan 2022-05-05 21:49:51 -07:00
commit 2e8d8ddf88
262 changed files with 17607 additions and 5938 deletions

1
.gitignore vendored
View File

@ -17,3 +17,4 @@
# for QtCreator:
CMakeLists.txt.user*
xcode/
/Dockerfile

View File

@ -20,6 +20,8 @@
#pragma once
#include <gtsam/config.h> // Configuration from CMake
#include <Eigen/Dense>
#include <stdexcept>
#include <string>
#ifndef OPTIONALJACOBIAN_NOBOOST
#include <boost/optional.hpp>
@ -96,6 +98,24 @@ public:
usurp(dynamic->data());
}
/**
* @brief Constructor from an Eigen::Ref *value*. Will not usurp if dimension is wrong
* @note This is important so we don't overwrite someone else's memory!
*/
template<class MATRIX>
OptionalJacobian(Eigen::Ref<MATRIX> dynamic_ref) :
map_(nullptr) {
if (dynamic_ref.rows() == Rows && dynamic_ref.cols() == Cols && !dynamic_ref.IsRowMajor) {
usurp(dynamic_ref.data());
} else {
throw std::invalid_argument(
std::string("OptionalJacobian called with wrong dimensions or "
"storage order.\n"
"Expected: ") +
"(" + std::to_string(Rows) + ", " + std::to_string(Cols) + ")");
}
}
#ifndef OPTIONALJACOBIAN_NOBOOST
/// Constructor with boost::none just makes empty

View File

@ -160,7 +160,7 @@ namespace gtsam {
const typename Base::LabelFormatter& labelFormatter =
&DefaultFormatter) const {
auto valueFormatter = [](const double& v) {
return (boost::format("%4.4g") % v).str();
return (boost::format("%4.8g") % v).str();
};
Base::print(s, labelFormatter, valueFormatter);
}

View File

@ -59,33 +59,41 @@ namespace gtsam {
/** constant stored in this leaf */
Y constant_;
/** Constructor from constant */
Leaf(const Y& constant) :
constant_(constant) {}
/** The number of assignments contained within this leaf.
* Particularly useful when leaves have been pruned.
*/
size_t nrAssignments_;
/** return the constant */
/// Constructor from constant
Leaf(const Y& constant, size_t nrAssignments = 1)
: constant_(constant), nrAssignments_(nrAssignments) {}
/// Return the constant
const Y& constant() const {
return constant_;
}
/// Return the number of assignments contained within this leaf.
size_t nrAssignments() const { return nrAssignments_; }
/// Leaf-Leaf equality
bool sameLeaf(const Leaf& q) const override {
return constant_ == q.constant_;
}
/// polymorphic equality: is q is a leaf, could be
/// polymorphic equality: is q a leaf and is it the same as this leaf?
bool sameLeaf(const Node& q) const override {
return (q.isLeaf() && q.sameLeaf(*this));
}
/** equality up to tolerance */
/// equality up to tolerance
bool equals(const Node& q, const CompareFunc& compare) const override {
const Leaf* other = dynamic_cast<const Leaf*>(&q);
if (!other) return false;
return compare(this->constant_, other->constant_);
}
/** print */
/// print
void print(const std::string& s, const LabelFormatter& labelFormatter,
const ValueFormatter& valueFormatter) const override {
std::cout << s << " Leaf " << valueFormatter(constant_) << std::endl;
@ -108,14 +116,14 @@ namespace gtsam {
/** apply unary operator */
NodePtr apply(const Unary& op) const override {
NodePtr f(new Leaf(op(constant_)));
NodePtr f(new Leaf(op(constant_), nrAssignments_));
return f;
}
/// Apply unary operator with assignment
NodePtr apply(const UnaryAssignment& op,
const Assignment<L>& choices) const override {
NodePtr f(new Leaf(op(choices, constant_)));
const Assignment<L>& assignment) const override {
NodePtr f(new Leaf(op(assignment, constant_), nrAssignments_));
return f;
}
@ -130,7 +138,8 @@ namespace gtsam {
// Applying binary operator to two leaves results in a leaf
NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const override {
NodePtr h(new Leaf(op(fL.constant_, constant_))); // fL op gL
// fL op gL
NodePtr h(new Leaf(op(fL.constant_, constant_), nrAssignments_));
return h;
}
@ -141,7 +150,7 @@ namespace gtsam {
/** choose a branch, create new memory ! */
NodePtr choose(const L& label, size_t index) const override {
return NodePtr(new Leaf(constant()));
return NodePtr(new Leaf(constant(), nrAssignments()));
}
bool isLeaf() const override { return true; }
@ -159,7 +168,10 @@ namespace gtsam {
std::vector<NodePtr> branches_;
private:
/** incremental allSame */
/**
* Incremental allSame.
* Records if all the branches are the same leaf.
*/
size_t allSame_;
using ChoicePtr = boost::shared_ptr<const Choice>;
@ -172,15 +184,22 @@ namespace gtsam {
#endif
}
/** If all branches of a choice node f are the same, just return a branch */
/// If all branches of a choice node f are the same, just return a branch.
static NodePtr Unique(const ChoicePtr& f) {
#ifndef DT_NO_PRUNING
#ifndef GTSAM_DT_NO_PRUNING
if (f->allSame_) {
assert(f->branches().size() > 0);
NodePtr f0 = f->branches_[0];
assert(f0->isLeaf());
size_t nrAssignments = 0;
for(auto branch: f->branches()) {
assert(branch->isLeaf());
nrAssignments +=
boost::dynamic_pointer_cast<const Leaf>(branch)->nrAssignments();
}
NodePtr newLeaf(
new Leaf(boost::dynamic_pointer_cast<const Leaf>(f0)->constant()));
new Leaf(boost::dynamic_pointer_cast<const Leaf>(f0)->constant(),
nrAssignments));
return newLeaf;
} else
#endif
@ -189,15 +208,13 @@ namespace gtsam {
bool isLeaf() const override { return false; }
/** Constructor, given choice label and mandatory expected branch count */
/// Constructor, given choice label and mandatory expected branch count.
Choice(const L& label, size_t count) :
label_(label), allSame_(true) {
branches_.reserve(count);
}
/**
* Construct from applying binary op to two Choice nodes
*/
/// Construct from applying binary op to two Choice nodes.
Choice(const Choice& f, const Choice& g, const Binary& op) :
allSame_(true) {
// Choose what to do based on label
@ -225,6 +242,7 @@ namespace gtsam {
}
}
/// Return the label of this choice node.
const L& label() const {
return label_;
}
@ -246,7 +264,7 @@ namespace gtsam {
branches_.push_back(node);
}
/** print (as a tree) */
/// print (as a tree).
void print(const std::string& s, const LabelFormatter& labelFormatter,
const ValueFormatter& valueFormatter) const override {
std::cout << s << " Choice(";
@ -292,7 +310,7 @@ namespace gtsam {
return (q.isLeaf() && q.sameLeaf(*this));
}
/** equality */
/// equality
bool equals(const Node& q, const CompareFunc& compare) const override {
const Choice* other = dynamic_cast<const Choice*>(&q);
if (!other) return false;
@ -305,7 +323,7 @@ namespace gtsam {
return true;
}
/** evaluate */
/// evaluate
const Y& operator()(const Assignment<L>& x) const override {
#ifndef NDEBUG
typename Assignment<L>::const_iterator it = x.find(label_);
@ -320,13 +338,13 @@ namespace gtsam {
return (*child)(x);
}
/**
* Construct from applying unary op to a Choice node
*/
/// Construct from applying unary op to a Choice node.
Choice(const L& label, const Choice& f, const Unary& op) :
label_(label), allSame_(true) {
branches_.reserve(f.branches_.size()); // reserve space
for (const NodePtr& branch : f.branches_) push_back(branch->apply(op));
for (const NodePtr& branch : f.branches_) {
push_back(branch->apply(op));
}
}
/**
@ -337,28 +355,28 @@ namespace gtsam {
* @param f The original choice node to apply the op on.
* @param op Function to apply on the choice node. Takes Assignment and
* value as arguments.
* @param choices The Assignment that will go to op.
* @param assignment The Assignment that will go to op.
*/
Choice(const L& label, const Choice& f, const UnaryAssignment& op,
const Assignment<L>& choices)
const Assignment<L>& assignment)
: label_(label), allSame_(true) {
branches_.reserve(f.branches_.size()); // reserve space
Assignment<L> choices_ = choices;
Assignment<L> assignment_ = assignment;
for (size_t i = 0; i < f.branches_.size(); i++) {
choices_[label_] = i; // Set assignment for label to i
assignment_[label_] = i; // Set assignment for label to i
const NodePtr branch = f.branches_[i];
push_back(branch->apply(op, choices_));
push_back(branch->apply(op, assignment_));
// Remove the choice so we are backtracking
auto choice_it = choices_.find(label_);
choices_.erase(choice_it);
// Remove the assignment so we are backtracking
auto assignment_it = assignment_.find(label_);
assignment_.erase(assignment_it);
}
}
/** apply unary operator */
/// apply unary operator.
NodePtr apply(const Unary& op) const override {
auto r = boost::make_shared<Choice>(label_, *this, op);
return Unique(r);
@ -366,8 +384,8 @@ namespace gtsam {
/// Apply unary operator with assignment
NodePtr apply(const UnaryAssignment& op,
const Assignment<L>& choices) const override {
auto r = boost::make_shared<Choice>(label_, *this, op, choices);
const Assignment<L>& assignment) const override {
auto r = boost::make_shared<Choice>(label_, *this, op, assignment);
return Unique(r);
}
@ -640,7 +658,7 @@ namespace gtsam {
// If leaf, apply unary conversion "op" and create a unique leaf.
using MXLeaf = typename DecisionTree<M, X>::Leaf;
if (auto leaf = boost::dynamic_pointer_cast<const MXLeaf>(f)) {
return NodePtr(new Leaf(Y_of_X(leaf->constant())));
return NodePtr(new Leaf(Y_of_X(leaf->constant()), leaf->nrAssignments()));
}
// Check if Choice
@ -662,7 +680,16 @@ namespace gtsam {
}
/****************************************************************************/
// Functor performing depth-first visit without Assignment<L> argument.
/**
* Functor performing depth-first visit to each leaf with the leaf value as
* the argument.
*
* NOTE: We differentiate between leaves and assignments. Concretely, a 3
* binary variable tree will have 2^3=8 assignments, but based on pruning, it
* can have less than 8 leaves. For example, if a tree has all assignment
* values as 1, then pruning will cause the tree to have only 1 leaf yet 8
* assignments.
*/
template <typename L, typename Y>
struct Visit {
using F = std::function<void(const Y&)>;
@ -691,33 +718,74 @@ namespace gtsam {
}
/****************************************************************************/
// Functor performing depth-first visit with Assignment<L> argument.
/**
* Functor performing depth-first visit to each leaf with the Leaf object
* passed as an argument.
*
* NOTE: We differentiate between leaves and assignments. Concretely, a 3
* binary variable tree will have 2^3=8 assignments, but based on pruning, it
* can have <8 leaves. For example, if a tree has all assignment values as 1,
* then pruning will cause the tree to have only 1 leaf yet 8 assignments.
*/
template <typename L, typename Y>
struct VisitLeaf {
using F = std::function<void(const typename DecisionTree<L, Y>::Leaf&)>;
explicit VisitLeaf(F f) : f(f) {} ///< Construct from folding function.
F f; ///< folding function object.
/// Do a depth-first visit on the tree rooted at node.
void operator()(const typename DecisionTree<L, Y>::NodePtr& node) const {
using Leaf = typename DecisionTree<L, Y>::Leaf;
if (auto leaf = boost::dynamic_pointer_cast<const Leaf>(node))
return f(*leaf);
using Choice = typename DecisionTree<L, Y>::Choice;
auto choice = boost::dynamic_pointer_cast<const Choice>(node);
if (!choice)
throw std::invalid_argument("DecisionTree::VisitLeaf: Invalid NodePtr");
for (auto&& branch : choice->branches()) (*this)(branch); // recurse!
}
};
template <typename L, typename Y>
template <typename Func>
void DecisionTree<L, Y>::visitLeaf(Func f) const {
VisitLeaf<L, Y> visit(f);
visit(root_);
}
/****************************************************************************/
/**
* Functor performing depth-first visit to each leaf with the leaf's
* `Assignment<L>` and value passed as arguments.
*
* NOTE: Follows the same pruning semantics as `visit`.
*/
template <typename L, typename Y>
struct VisitWith {
using Choices = Assignment<L>;
using F = std::function<void(const Choices&, const Y&)>;
using F = std::function<void(const Assignment<L>&, const Y&)>;
explicit VisitWith(F f) : f(f) {} ///< Construct from folding function.
Choices choices; ///< Assignment, mutating through recursion.
F f; ///< folding function object.
Assignment<L> assignment; ///< Assignment, mutating through recursion.
F f; ///< folding function object.
/// Do a depth-first visit on the tree rooted at node.
void operator()(const typename DecisionTree<L, Y>::NodePtr& node) {
using Leaf = typename DecisionTree<L, Y>::Leaf;
if (auto leaf = boost::dynamic_pointer_cast<const Leaf>(node))
return f(choices, leaf->constant());
return f(assignment, leaf->constant());
using Choice = typename DecisionTree<L, Y>::Choice;
auto choice = boost::dynamic_pointer_cast<const Choice>(node);
if (!choice)
throw std::invalid_argument("DecisionTree::VisitWith: Invalid NodePtr");
for (size_t i = 0; i < choice->nrChoices(); i++) {
choices[choice->label()] = i; // Set assignment for label to i
assignment[choice->label()] = i; // Set assignment for label to i
(*this)(choice->branches()[i]); // recurse!
// Remove the choice so we are backtracking
auto choice_it = choices.find(choice->label());
choices.erase(choice_it);
auto choice_it = assignment.find(choice->label());
assignment.erase(choice_it);
}
}
};
@ -747,12 +815,26 @@ namespace gtsam {
}
/****************************************************************************/
// labels is just done with a visit
/**
* Get (partial) labels by performing a visit.
*
* This method performs a depth-first search to go to every leaf and records
* the keys assignment which leads to that leaf. Since the tree can be pruned,
* there might be a leaf at a lower depth which results in a partial
* assignment (i.e. not all keys are specified).
*
* E.g. given a tree with 3 keys, there may be a branch where the 3rd key has
* the same values for all the leaves. This leads to the branch being pruned
* so we get a leaf which is arrived at by just the first 2 keys and their
* assignments.
*/
template <typename L, typename Y>
std::set<L> DecisionTree<L, Y>::labels() const {
std::set<L> unique;
auto f = [&](const Assignment<L>& choices, const Y&) {
for (auto&& kv : choices) unique.insert(kv.first);
auto f = [&](const Assignment<L>& assignment, const Y&) {
for (auto&& kv : assignment) {
unique.insert(kv.first);
}
};
visitWith(f);
return unique;
@ -801,8 +883,8 @@ namespace gtsam {
throw std::runtime_error(
"DecisionTree::apply(unary op) undefined for empty tree.");
}
Assignment<L> choices;
return DecisionTree(root_->apply(op, choices));
Assignment<L> assignment;
return DecisionTree(root_->apply(op, assignment));
}
/****************************************************************************/

View File

@ -105,7 +105,7 @@ namespace gtsam {
virtual const Y& operator()(const Assignment<L>& x) const = 0;
virtual Ptr apply(const Unary& op) const = 0;
virtual Ptr apply(const UnaryAssignment& op,
const Assignment<L>& choices) const = 0;
const Assignment<L>& assignment) const = 0;
virtual Ptr apply_f_op_g(const Node&, const Binary&) const = 0;
virtual Ptr apply_g_op_fL(const Leaf&, const Binary&) const = 0;
virtual Ptr apply_g_op_fC(const Choice&, const Binary&) const = 0;
@ -153,7 +153,7 @@ namespace gtsam {
/** Create a constant */
explicit DecisionTree(const Y& y);
/** Create a new leaf function splitting on a variable */
/// Create tree with 2 assignments `y1`, `y2`, splitting on variable `label`
DecisionTree(const L& label, const Y& y1, const Y& y2);
/** Allow Label+Cardinality for convenience */
@ -219,9 +219,8 @@ namespace gtsam {
/// @name Standard Interface
/// @{
/** Make virtual */
virtual ~DecisionTree() {
}
/// Make virtual
virtual ~DecisionTree() {}
/// Check if tree is empty.
bool empty() const { return !root_; }
@ -235,9 +234,11 @@ namespace gtsam {
/**
* @brief Visit all leaves in depth-first fashion.
*
* @param f side-effect taking a value.
* @param f (side-effect) Function taking a value.
*
* @note Due to pruning, leaves might not exhaust choices.
* @note Due to pruning, the number of leaves may not be the same as the
* number of assignments. E.g. if we have a tree on 2 binary variables with
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
*
* Example:
* int sum = 0;
@ -250,13 +251,32 @@ namespace gtsam {
/**
* @brief Visit all leaves in depth-first fashion.
*
* @param f side-effect taking an assignment and a value.
* @param f (side-effect) Function taking the leaf node pointer.
*
* @note Due to pruning, leaves might not exhaust choices.
* @note Due to pruning, the number of leaves may not be the same as the
* number of assignments. E.g. if we have a tree on 2 binary variables with
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
*
* Example:
* int sum = 0;
* auto visitor = [&](const Assignment<L>& choices, int y) { sum += y; };
* auto visitor = [&](int y) { sum += y; };
* tree.visitWith(visitor);
*/
template <typename Func>
void visitLeaf(Func f) const;
/**
* @brief Visit all leaves in depth-first fashion.
*
* @param f (side-effect) Function taking an assignment and a value.
*
* @note Due to pruning, the number of leaves may not be the same as the
* number of assignments. E.g. if we have a tree on 2 binary variables with
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
*
* Example:
* int sum = 0;
* auto visitor = [&](const Assignment<L>& assignment, int y) { sum += y; };
* tree.visitWith(visitor);
*/
template <typename Func>

View File

@ -286,5 +286,43 @@ namespace gtsam {
AlgebraicDecisionTree<Key>(keys, table),
cardinalities_(keys.cardinalities()) {}
/* ************************************************************************ */
DecisionTreeFactor DecisionTreeFactor::prune(size_t maxNrAssignments) const {
const size_t N = maxNrAssignments;
// Get the probabilities in the decision tree so we can threshold.
std::vector<double> probabilities;
this->visitLeaf([&](const Leaf& leaf) {
size_t nrAssignments = leaf.nrAssignments();
double prob = leaf.constant();
probabilities.insert(probabilities.end(), nrAssignments, prob);
});
// The number of probabilities can be lower than max_leaves
if (probabilities.size() <= N) {
return *this;
}
std::sort(probabilities.begin(), probabilities.end(),
std::greater<double>{});
double threshold = probabilities[N - 1];
// Now threshold the decision tree
size_t total = 0;
auto thresholdFunc = [threshold, &total, N](const double& value) {
if (value < threshold || total >= N) {
return 0.0;
} else {
total += 1;
return value;
}
};
DecisionTree<Key, double> thresholded(*this, thresholdFunc);
// Create pruned decision tree factor and return.
return DecisionTreeFactor(this->discreteKeys(), thresholded);
}
/* ************************************************************************ */
} // namespace gtsam

View File

@ -170,6 +170,26 @@ namespace gtsam {
/// Return all the discrete keys associated with this factor.
DiscreteKeys discreteKeys() const;
/**
* @brief Prune the decision tree of discrete variables.
*
* Pruning will set the leaves to be "pruned" to 0 indicating a 0
* probability. An assignment is pruned if it is not in the top
* `maxNrAssignments` values.
*
* A violation can occur if there are more
* duplicate values than `maxNrAssignments`. A violation here is the need to
* un-prune the decision tree (e.g. all assignment values are 1.0). We could
* have another case where some subset of duplicates exist (e.g. for a tree
* with 8 assignments we have 1, 1, 1, 1, 0.8, 0.7, 0.6, 0.5), but this is
* not a violation since the for `maxNrAssignments=5` the top values are (1,
* 0.8).
*
* @param maxNrAssignments The maximum number of assignments to keep.
* @return DecisionTreeFactor
*/
DecisionTreeFactor prune(size_t maxNrAssignments) const;
/// @}
/// @name Wrapper support
/// @{

View File

@ -20,7 +20,7 @@
#include <gtsam/discrete/DiscreteKey.h> // make sure we have traits
#include <gtsam/discrete/DiscreteValues.h>
// headers first to make sure no missing headers
//#define DT_NO_PRUNING
//#define GTSAM_DT_NO_PRUNING
#include <gtsam/discrete/AlgebraicDecisionTree.h>
#include <gtsam/discrete/DecisionTree-inl.h> // for convert only
#define DISABLE_TIMING

View File

@ -18,7 +18,7 @@
*/
// #define DT_DEBUG_MEMORY
// #define DT_NO_PRUNING
// #define GTSAM_DT_NO_PRUNING
#define DISABLE_DOT
#include <gtsam/discrete/DecisionTree-inl.h>
@ -323,6 +323,49 @@ TEST(DecisionTree, Containers) {
StringContainerTree converted(stringIntTree, container_of_int);
}
/* ************************************************************************** */
// Test nrAssignments.
TEST(DecisionTree, NrAssignments) {
pair<string, size_t> A("A", 2), B("B", 2), C("C", 2);
DT tree({A, B, C}, "1 1 1 1 1 1 1 1");
EXPECT(tree.root_->isLeaf());
auto leaf = boost::dynamic_pointer_cast<const DT::Leaf>(tree.root_);
EXPECT_LONGS_EQUAL(8, leaf->nrAssignments());
DT tree2({C, B, A}, "1 1 1 2 3 4 5 5");
/* The tree is
Choice(C)
0 Choice(B)
0 0 Leaf 1
0 1 Choice(A)
0 1 0 Leaf 1
0 1 1 Leaf 2
1 Choice(B)
1 0 Choice(A)
1 0 0 Leaf 3
1 0 1 Leaf 4
1 1 Leaf 5
*/
auto root = boost::dynamic_pointer_cast<const DT::Choice>(tree2.root_);
CHECK(root);
auto choice0 = boost::dynamic_pointer_cast<const DT::Choice>(root->branches()[0]);
CHECK(choice0);
EXPECT(choice0->branches()[0]->isLeaf());
auto choice00 = boost::dynamic_pointer_cast<const DT::Leaf>(choice0->branches()[0]);
CHECK(choice00);
EXPECT_LONGS_EQUAL(2, choice00->nrAssignments());
auto choice1 = boost::dynamic_pointer_cast<const DT::Choice>(root->branches()[1]);
CHECK(choice1);
auto choice10 = boost::dynamic_pointer_cast<const DT::Choice>(choice1->branches()[0]);
CHECK(choice10);
auto choice11 = boost::dynamic_pointer_cast<const DT::Leaf>(choice1->branches()[1]);
CHECK(choice11);
EXPECT(choice11->isLeaf());
EXPECT_LONGS_EQUAL(2, choice11->nrAssignments());
}
/* ************************************************************************** */
// Test visit.
TEST(DecisionTree, visit) {

View File

@ -106,6 +106,41 @@ TEST(DecisionTreeFactor, enumerate) {
EXPECT(actual == expected);
}
/* ************************************************************************* */
// Check pruning of the decision tree works as expected.
TEST(DecisionTreeFactor, Prune) {
DiscreteKey A(1, 2), B(2, 2), C(3, 2);
DecisionTreeFactor f(A & B & C, "1 5 3 7 2 6 4 8");
// Only keep the leaves with the top 5 values.
size_t maxNrAssignments = 5;
auto pruned5 = f.prune(maxNrAssignments);
// Pruned leaves should be 0
DecisionTreeFactor expected(A & B & C, "0 5 0 7 0 6 4 8");
EXPECT(assert_equal(expected, pruned5));
// Check for more extreme pruning where we only keep the top 2 leaves
maxNrAssignments = 2;
auto pruned2 = f.prune(maxNrAssignments);
DecisionTreeFactor expected2(A & B & C, "0 0 0 7 0 0 0 8");
EXPECT(assert_equal(expected2, pruned2));
DiscreteKey D(4, 2);
DecisionTreeFactor factor(
D & C & B & A,
"0.0 0.0 0.0 0.60658897 0.61241912 0.61241969 0.61247685 0.61247742 0.0 "
"0.0 0.0 0.99995287 1.0 1.0 1.0 1.0");
DecisionTreeFactor expected3(
D & C & B & A,
"0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 "
"0.999952870000 1.0 1.0 1.0 1.0");
maxNrAssignments = 5;
auto pruned3 = factor.prune(maxNrAssignments);
EXPECT(assert_equal(expected3, pruned3));
}
/* ************************************************************************* */
TEST(DecisionTreeFactor, DotWithNames) {
DiscreteKey A(12, 3), B(5, 2);
@ -194,4 +229,3 @@ int main() {
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -113,6 +113,18 @@ list<Point2> circleCircleIntersection(Point2 c1, double r1, Point2 c2,
return circleCircleIntersection(c1, c2, fh);
}
Point2Pair means(const std::vector<Point2Pair> &abPointPairs) {
const size_t n = abPointPairs.size();
if (n == 0) throw std::invalid_argument("Point2::mean input Point2Pair vector is empty");
Point2 aSum(0, 0), bSum(0, 0);
for (const Point2Pair &abPair : abPointPairs) {
aSum += abPair.first;
bSum += abPair.second;
}
const double f = 1.0 / n;
return {aSum * f, bSum * f};
}
/* ************************************************************************* */
ostream &operator<<(ostream &os, const gtsam::Point2Pair &p) {
os << p.first << " <-> " << p.second;

View File

@ -72,6 +72,9 @@ GTSAM_EXPORT boost::optional<Point2> circleCircleIntersection(double R_d, double
*/
GTSAM_EXPORT std::list<Point2> circleCircleIntersection(Point2 c1, Point2 c2, boost::optional<Point2> fh);
/// Calculate the two means of a set of Point2 pairs
GTSAM_EXPORT Point2Pair means(const std::vector<Point2Pair> &abPointPairs);
/**
* @brief Intersect 2 circles
* @param c1 center of first circle

View File

@ -309,54 +309,77 @@ double Pose2::range(const Pose2& pose,
}
/* *************************************************************************
* New explanation, from scan.ml
* It finds the angle using a linear method:
* q = Pose2::transformFrom(p) = t + R*p
* Align finds the angle using a linear method:
* a = Pose2::transformFrom(b) = t + R*b
* We need to remove the centroids from the data to find the rotation
* using dp=[dpx;dpy] and q=[dqx;dqy] we have
* |dqx| |c -s| |dpx| |dpx -dpy| |c|
* using db=[dbx;dby] and a=[dax;day] we have
* |dax| |c -s| |dbx| |dbx -dby| |c|
* | | = | | * | | = | | * | | = H_i*cs
* |dqy| |s c| |dpy| |dpy dpx| |s|
* |day| |s c| |dby| |dby dbx| |s|
* where the Hi are the 2*2 matrices. Then we will minimize the criterion
* J = \sum_i norm(q_i - H_i * cs)
* J = \sum_i norm(a_i - H_i * cs)
* Taking the derivative with respect to cs and setting to zero we have
* cs = (\sum_i H_i' * q_i)/(\sum H_i'*H_i)
* cs = (\sum_i H_i' * a_i)/(\sum H_i'*H_i)
* The hessian is diagonal and just divides by a constant, but this
* normalization constant is irrelevant, since we take atan2.
* i.e., cos ~ sum(dpx*dqx + dpy*dqy) and sin ~ sum(-dpy*dqx + dpx*dqy)
* i.e., cos ~ sum(dbx*dax + dby*day) and sin ~ sum(-dby*dax + dbx*day)
* The translation is then found from the centroids
* as they also satisfy cq = t + R*cp, hence t = cq - R*cp
* as they also satisfy ca = t + R*cb, hence t = ca - R*cb
*/
boost::optional<Pose2> align(const vector<Point2Pair>& pairs) {
size_t n = pairs.size();
if (n<2) return boost::none; // we need at least two pairs
boost::optional<Pose2> Pose2::Align(const Point2Pairs &ab_pairs) {
const size_t n = ab_pairs.size();
if (n < 2) {
return boost::none; // we need at least 2 pairs
}
// calculate centroids
Point2 cp(0,0), cq(0,0);
for(const Point2Pair& pair: pairs) {
cp += pair.first;
cq += pair.second;
Point2 ca(0, 0), cb(0, 0);
for (const Point2Pair& pair : ab_pairs) {
ca += pair.first;
cb += pair.second;
}
double f = 1.0/n;
cp *= f; cq *= f;
const double f = 1.0/n;
ca *= f;
cb *= f;
// calculate cos and sin
double c=0,s=0;
for(const Point2Pair& pair: pairs) {
Point2 dp = pair.first - cp;
Point2 dq = pair.second - cq;
c += dp.x() * dq.x() + dp.y() * dq.y();
s += -dp.y() * dq.x() + dp.x() * dq.y();
double c = 0, s = 0;
for (const Point2Pair& pair : ab_pairs) {
Point2 da = pair.first - ca;
Point2 db = pair.second - cb;
c += db.x() * da.x() + db.y() * da.y();
s += -db.y() * da.x() + db.x() * da.y();
}
// calculate angle and translation
double theta = atan2(s,c);
Rot2 R = Rot2::fromAngle(theta);
Point2 t = cq - R*cp;
const double theta = atan2(s, c);
const Rot2 R = Rot2::fromAngle(theta);
const Point2 t = ca - R*cb;
return Pose2(R, t);
}
boost::optional<Pose2> Pose2::Align(const Matrix& a, const Matrix& b) {
if (a.rows() != 2 || b.rows() != 2 || a.cols() != b.cols()) {
throw std::invalid_argument(
"Pose2:Align expects 2*N matrices of equal shape.");
}
Point2Pairs ab_pairs;
for (Eigen::Index j = 0; j < a.cols(); j++) {
ab_pairs.emplace_back(a.col(j), b.col(j));
}
return Pose2::Align(ab_pairs);
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
boost::optional<Pose2> align(const Point2Pairs& ba_pairs) {
Point2Pairs ab_pairs;
for (const Point2Pair &baPair : ba_pairs) {
ab_pairs.emplace_back(baPair.second, baPair.first);
}
return Pose2::Align(ab_pairs);
}
#endif
/* ************************************************************************* */
} // namespace gtsam

View File

@ -92,6 +92,18 @@ public:
*this = Expmap(v);
}
/**
* Create Pose2 by aligning two point pairs
* A pose aTb is estimated between pairs (a_point, b_point) such that
* a_point = aTb * b_point
* Note this allows for noise on the points but in that case the mapping
* will not be exact.
*/
static boost::optional<Pose2> Align(const Point2Pairs& abPointPairs);
// Version of Pose2::Align that takes 2 matrices.
static boost::optional<Pose2> Align(const Matrix& a, const Matrix& b);
/// @}
/// @name Testable
/// @{
@ -331,12 +343,19 @@ inline Matrix wedge<Pose2>(const Vector& xi) {
return Matrix(Pose2::wedge(xi(0),xi(1),xi(2))).eval();
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
/**
* @deprecated Use static constructor (with reversed pairs!)
* Calculate pose between a vector of 2D point correspondences (p,q)
* where q = Pose2::transformFrom(p) = t + R*p
*/
typedef std::pair<Point2,Point2> Point2Pair;
GTSAM_EXPORT boost::optional<Pose2> align(const std::vector<Point2Pair>& pairs);
GTSAM_EXPORT boost::optional<Pose2>
GTSAM_DEPRECATED align(const Point2Pairs& pairs);
#endif
// Convenience typedef
using Pose2Pair = std::pair<Pose2, Pose2>;
using Pose2Pairs = std::vector<Pose2Pair>;
template <>
struct traits<Pose2> : public internal::LieGroup<Pose2> {};

View File

@ -473,12 +473,13 @@ boost::optional<Pose3> Pose3::Align(const Matrix& a, const Matrix& b) {
"Pose3:Align expects 3*N matrices of equal shape.");
}
Point3Pairs abPointPairs;
for (size_t j=0; j < a.cols(); j++) {
for (Eigen::Index j = 0; j < a.cols(); j++) {
abPointPairs.emplace_back(a.col(j), b.col(j));
}
return Pose3::Align(abPointPairs);
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
boost::optional<Pose3> align(const Point3Pairs &baPointPairs) {
Point3Pairs abPointPairs;
for (const Point3Pair &baPair : baPointPairs) {
@ -486,6 +487,7 @@ boost::optional<Pose3> align(const Point3Pairs &baPointPairs) {
}
return Pose3::Align(abPointPairs);
}
#endif
/* ************************************************************************* */
std::ostream &operator<<(std::ostream &os, const Pose3& pose) {

View File

@ -129,6 +129,19 @@ Rot2 Rot2::relativeBearing(const Point2& d, OptionalJacobian<1, 2> H) {
}
}
/* ************************************************************************* */
Rot2 Rot2::ClosestTo(const Matrix2& M) {
Eigen::JacobiSVD<Matrix2> svd(M, Eigen::ComputeFullU | Eigen::ComputeFullV);
const Matrix2& U = svd.matrixU();
const Matrix2& V = svd.matrixV();
const double det = (U * V.transpose()).determinant();
Matrix2 M_prime = (U * Vector2(1, det).asDiagonal() * V.transpose());
double c = M_prime(0, 0);
double s = M_prime(1, 0);
return Rot2::fromCosSin(c, s);
}
/* ************************************************************************* */
} // gtsam

View File

@ -14,6 +14,7 @@
* @brief 2D rotation
* @date Dec 9, 2009
* @author Frank Dellaert
* @author John Lambert
*/
#pragma once
@ -209,6 +210,9 @@ namespace gtsam {
/** return 2*2 transpose (inverse) rotation matrix */
Matrix2 transpose() const;
/** Find closest valid rotation matrix, given a 2x2 matrix */
static Rot2 ClosestTo(const Matrix2& M);
private:
/** Serialization function */
friend class boost::serialization::access;

View File

@ -0,0 +1,242 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Similarity2.cpp
* @brief Implementation of Similarity2 transform
* @author John Lambert, Varun Agrawal
*/
#include <gtsam/base/Manifold.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/geometry/Rot3.h>
#include <gtsam/geometry/Similarity2.h>
#include <gtsam/slam/KarcherMeanFactor-inl.h>
namespace gtsam {
using std::vector;
namespace internal {
/// Subtract centroids from point pairs.
static Point2Pairs SubtractCentroids(const Point2Pairs& abPointPairs,
const Point2Pair& centroids) {
Point2Pairs d_abPointPairs;
for (const Point2Pair& abPair : abPointPairs) {
Point2 da = abPair.first - centroids.first;
Point2 db = abPair.second - centroids.second;
d_abPointPairs.emplace_back(da, db);
}
return d_abPointPairs;
}
/// Form inner products x and y and calculate scale.
static double CalculateScale(const Point2Pairs& d_abPointPairs,
const Rot2& aRb) {
double x = 0, y = 0;
Point2 da, db;
for (const Point2Pair& d_abPair : d_abPointPairs) {
std::tie(da, db) = d_abPair;
const Vector2 da_prime = aRb * db;
y += da.transpose() * da_prime;
x += da_prime.transpose() * da_prime;
}
const double s = y / x;
return s;
}
/// Form outer product H.
static Matrix2 CalculateH(const Point2Pairs& d_abPointPairs) {
Matrix2 H = Z_2x2;
for (const Point2Pair& d_abPair : d_abPointPairs) {
H += d_abPair.first * d_abPair.second.transpose();
}
return H;
}
/**
* @brief This method estimates the similarity transform from differences point
* pairs, given a known or estimated rotation and point centroids.
*
* @param d_abPointPairs
* @param aRb
* @param centroids
* @return Similarity2
*/
static Similarity2 Align(const Point2Pairs& d_abPointPairs, const Rot2& aRb,
const Point2Pair& centroids) {
const double s = CalculateScale(d_abPointPairs, aRb);
// dividing aTb by s is required because the registration cost function
// minimizes ||a - sRb - t||, whereas Sim(2) computes s(Rb + t)
const Point2 aTb = (centroids.first - s * (aRb * centroids.second)) / s;
return Similarity2(aRb, aTb, s);
}
/**
* @brief This method estimates the similarity transform from point pairs,
* given a known or estimated rotation.
* Refer to:
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
* Chapter 3
*
* @param abPointPairs
* @param aRb
* @return Similarity2
*/
static Similarity2 AlignGivenR(const Point2Pairs& abPointPairs,
const Rot2& aRb) {
auto centroids = means(abPointPairs);
auto d_abPointPairs = internal::SubtractCentroids(abPointPairs, centroids);
return internal::Align(d_abPointPairs, aRb, centroids);
}
} // namespace internal
Similarity2::Similarity2() : t_(0, 0), s_(1) {}
Similarity2::Similarity2(double s) : t_(0, 0), s_(s) {}
Similarity2::Similarity2(const Rot2& R, const Point2& t, double s)
: R_(R), t_(t), s_(s) {}
Similarity2::Similarity2(const Matrix2& R, const Vector2& t, double s)
: R_(Rot2::ClosestTo(R)), t_(t), s_(s) {}
Similarity2::Similarity2(const Matrix3& T)
: R_(Rot2::ClosestTo(T.topLeftCorner<2, 2>())),
t_(T.topRightCorner<2, 1>()),
s_(1.0 / T(2, 2)) {}
bool Similarity2::equals(const Similarity2& other, double tol) const {
return R_.equals(other.R_, tol) &&
traits<Point2>::Equals(t_, other.t_, tol) && s_ < (other.s_ + tol) &&
s_ > (other.s_ - tol);
}
bool Similarity2::operator==(const Similarity2& other) const {
return R_.matrix() == other.R_.matrix() && t_ == other.t_ && s_ == other.s_;
}
void Similarity2::print(const std::string& s) const {
std::cout << std::endl;
std::cout << s;
rotation().print("\nR:\n");
std::cout << "t: " << translation().transpose() << " s: " << scale()
<< std::endl;
}
Similarity2 Similarity2::identity() { return Similarity2(); }
Similarity2 Similarity2::operator*(const Similarity2& S) const {
return Similarity2(R_ * S.R_, ((1.0 / S.s_) * t_) + R_ * S.t_, s_ * S.s_);
}
Similarity2 Similarity2::inverse() const {
const Rot2 Rt = R_.inverse();
const Point2 sRt = Rt * (-s_ * t_);
return Similarity2(Rt, sRt, 1.0 / s_);
}
Point2 Similarity2::transformFrom(const Point2& p) const {
const Point2 q = R_ * p + t_;
return s_ * q;
}
Pose2 Similarity2::transformFrom(const Pose2& T) const {
Rot2 R = R_.compose(T.rotation());
Point2 t = Point2(s_ * (R_ * T.translation() + t_));
return Pose2(R, t);
}
Point2 Similarity2::operator*(const Point2& p) const {
return transformFrom(p);
}
Similarity2 Similarity2::Align(const Point2Pairs& abPointPairs) {
// Refer to Chapter 3 of
// http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
if (abPointPairs.size() < 2)
throw std::runtime_error("input should have at least 2 pairs of points");
auto centroids = means(abPointPairs);
auto d_abPointPairs = internal::SubtractCentroids(abPointPairs, centroids);
Matrix2 H = internal::CalculateH(d_abPointPairs);
// ClosestTo finds rotation matrix closest to H in Frobenius sense
Rot2 aRb = Rot2::ClosestTo(H);
return internal::Align(d_abPointPairs, aRb, centroids);
}
Similarity2 Similarity2::Align(const Pose2Pairs& abPosePairs) {
const size_t n = abPosePairs.size();
if (n < 2)
throw std::runtime_error("input should have at least 2 pairs of poses");
// calculate rotation
vector<Rot2> rotations;
Point2Pairs abPointPairs;
rotations.reserve(n);
abPointPairs.reserve(n);
// Below denotes the pose of the i'th object/camera/etc
// in frame "a" or frame "b".
Pose2 aTi, bTi;
for (const Pose2Pair& abPair : abPosePairs) {
std::tie(aTi, bTi) = abPair;
const Rot2 aRb = aTi.rotation().compose(bTi.rotation().inverse());
rotations.emplace_back(aRb);
abPointPairs.emplace_back(aTi.translation(), bTi.translation());
}
const Rot2 aRb_estimate = FindKarcherMean<Rot2>(rotations);
return internal::AlignGivenR(abPointPairs, aRb_estimate);
}
Vector4 Similarity2::Logmap(const Similarity2& S, //
OptionalJacobian<4, 4> Hm) {
const Vector2 u = S.t_;
const Vector1 w = Rot2::Logmap(S.R_);
const double s = log(S.s_);
Vector4 result;
result << u, w, s;
if (Hm) {
throw std::runtime_error("Similarity2::Logmap: derivative not implemented");
}
return result;
}
Similarity2 Similarity2::Expmap(const Vector4& v, //
OptionalJacobian<4, 4> Hm) {
const Vector2 t = v.head<2>();
const Rot2 R = Rot2::Expmap(v.segment<1>(2));
const double s = v[3];
if (Hm) {
throw std::runtime_error("Similarity2::Expmap: derivative not implemented");
}
return Similarity2(R, t, s);
}
Matrix4 Similarity2::AdjointMap() const {
throw std::runtime_error("Similarity2::AdjointMap not implemented");
}
std::ostream& operator<<(std::ostream& os, const Similarity2& p) {
os << "[" << p.rotation().theta() << " " << p.translation().transpose() << " "
<< p.scale() << "]\';";
return os;
}
Matrix3 Similarity2::matrix() const {
Matrix3 T;
T.topRows<2>() << R_.matrix(), t_;
T.bottomRows<1>() << 0, 0, 1.0 / s_;
return T;
}
} // namespace gtsam

View File

@ -0,0 +1,200 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Similarity2.h
* @brief Implementation of Similarity2 transform
* @author John Lambert, Varun Agrawal
*/
#pragma once
#include <gtsam/base/Lie.h>
#include <gtsam/base/Manifold.h>
#include <gtsam/dllexport.h>
#include <gtsam/geometry/Point2.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/geometry/Rot2.h>
namespace gtsam {
// Forward declarations
class Pose2;
/**
* 2D similarity transform
*/
class GTSAM_EXPORT Similarity2 : public LieGroup<Similarity2, 4> {
/// @name Pose Concept
/// @{
typedef Rot2 Rotation;
typedef Point2 Translation;
/// @}
private:
Rot2 R_;
Point2 t_;
double s_;
public:
/// @name Constructors
/// @{
/// Default constructor
Similarity2();
/// Construct pure scaling
Similarity2(double s);
/// Construct from GTSAM types
Similarity2(const Rot2& R, const Point2& t, double s);
/// Construct from Eigen types
Similarity2(const Matrix2& R, const Vector2& t, double s);
/// Construct from matrix [R t; 0 s^-1]
Similarity2(const Matrix3& T);
/// @}
/// @name Testable
/// @{
/// Compare with tolerance
bool equals(const Similarity2& sim, double tol) const;
/// Exact equality
bool operator==(const Similarity2& other) const;
/// Print with optional string
void print(const std::string& s) const;
friend std::ostream& operator<<(std::ostream& os, const Similarity2& p);
/// @}
/// @name Group
/// @{
/// Return an identity transform
static Similarity2 identity();
/// Composition
Similarity2 operator*(const Similarity2& S) const;
/// Return the inverse
Similarity2 inverse() const;
/// @}
/// @name Group action on Point2
/// @{
/// Action on a point p is s*(R*p+t)
Point2 transformFrom(const Point2& p) const;
/**
* Action on a pose T.
* |Rs ts| |R t| |Rs*R Rs*t+ts|
* |0 1/s| * |0 1| = | 0 1/s |, the result is still a Sim2 object.
* To retrieve a Pose2, we normalized the scale value into 1.
* |Rs*R Rs*t+ts| |Rs*R s(Rs*t+ts)|
* | 0 1/s | = | 0 1 |
*
* This group action satisfies the compatibility condition.
* For more details, refer to: https://en.wikipedia.org/wiki/Group_action
*/
Pose2 transformFrom(const Pose2& T) const;
/* syntactic sugar for transformFrom */
Point2 operator*(const Point2& p) const;
/**
* Create Similarity2 by aligning at least two point pairs
*/
static Similarity2 Align(const Point2Pairs& abPointPairs);
/**
* Create the Similarity2 object that aligns at least two pose pairs.
* Each pair is of the form (aTi, bTi).
* Given a list of pairs in frame a, and a list of pairs in frame b,
Align()
* will compute the best-fit Similarity2 aSb transformation to align them.
* First, the rotation aRb will be computed as the average (Karcher mean)
of
* many estimates aRb (from each pair). Afterwards, the scale factor will
be computed
* using the algorithm described here:
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
*/
static Similarity2 Align(const std::vector<Pose2Pair>& abPosePairs);
/// @}
/// @name Lie Group
/// @{
/**
* Log map at the identity
* \f$ [t_x, t_y, \delta, \lambda] \f$
*/
static Vector4 Logmap(const Similarity2& S, //
OptionalJacobian<4, 4> Hm = boost::none);
/// Exponential map at the identity
static Similarity2 Expmap(const Vector4& v, //
OptionalJacobian<4, 4> Hm = boost::none);
/// Chart at the origin
struct ChartAtOrigin {
static Similarity2 Retract(const Vector4& v,
ChartJacobian H = boost::none) {
return Similarity2::Expmap(v, H);
}
static Vector4 Local(const Similarity2& other,
ChartJacobian H = boost::none) {
return Similarity2::Logmap(other, H);
}
};
/// Project from one tangent space to another
Matrix4 AdjointMap() const;
using LieGroup<Similarity2, 4>::inverse;
/// @}
/// @name Standard interface
/// @{
/// Calculate 4*4 matrix group equivalent
Matrix3 matrix() const;
/// Return a GTSAM rotation
Rot2 rotation() const { return R_; }
/// Return a GTSAM translation
Point2 translation() const { return t_; }
/// Return the scale
double scale() const { return s_; }
/// Dimensionality of tangent space = 4 DOF - used to autodetect sizes
inline static size_t Dim() { return 4; }
/// Dimensionality of tangent space = 4 DOF
inline size_t dim() const { return 4; }
/// @}
};
template <>
struct traits<Similarity2> : public internal::LieGroup<Similarity2> {};
template <>
struct traits<const Similarity2> : public internal::LieGroup<Similarity2> {};
} // namespace gtsam

View File

@ -26,7 +26,7 @@ namespace gtsam {
using std::vector;
namespace {
namespace internal {
/// Subtract centroids from point pairs.
static Point3Pairs subtractCentroids(const Point3Pairs &abPointPairs,
const Point3Pair &centroids) {
@ -81,10 +81,10 @@ static Similarity3 align(const Point3Pairs &d_abPointPairs, const Rot3 &aRb,
static Similarity3 alignGivenR(const Point3Pairs &abPointPairs,
const Rot3 &aRb) {
auto centroids = means(abPointPairs);
auto d_abPointPairs = subtractCentroids(abPointPairs, centroids);
auto d_abPointPairs = internal::subtractCentroids(abPointPairs, centroids);
return align(d_abPointPairs, aRb, centroids);
}
} // namespace
} // namespace internal
Similarity3::Similarity3() :
t_(0,0,0), s_(1) {
@ -165,11 +165,11 @@ Similarity3 Similarity3::Align(const Point3Pairs &abPointPairs) {
if (abPointPairs.size() < 3)
throw std::runtime_error("input should have at least 3 pairs of points");
auto centroids = means(abPointPairs);
auto d_abPointPairs = subtractCentroids(abPointPairs, centroids);
Matrix3 H = calculateH(d_abPointPairs);
auto d_abPointPairs = internal::subtractCentroids(abPointPairs, centroids);
Matrix3 H = internal::calculateH(d_abPointPairs);
// ClosestTo finds rotation matrix closest to H in Frobenius sense
Rot3 aRb = Rot3::ClosestTo(H);
return align(d_abPointPairs, aRb, centroids);
return internal::align(d_abPointPairs, aRb, centroids);
}
Similarity3 Similarity3::Align(const vector<Pose3Pair> &abPosePairs) {
@ -192,7 +192,7 @@ Similarity3 Similarity3::Align(const vector<Pose3Pair> &abPosePairs) {
}
const Rot3 aRb_estimate = FindKarcherMean<Rot3>(rotations);
return alignGivenR(abPointPairs, aRb_estimate);
return internal::alignGivenR(abPointPairs, aRb_estimate);
}
Matrix4 Similarity3::wedge(const Vector7 &xi) {
@ -283,15 +283,11 @@ std::ostream &operator<<(std::ostream &os, const Similarity3& p) {
return os;
}
const Matrix4 Similarity3::matrix() const {
Matrix4 Similarity3::matrix() const {
Matrix4 T;
T.topRows<3>() << R_.matrix(), t_;
T.bottomRows<1>() << 0, 0, 0, 1.0 / s_;
return T;
}
Similarity3::operator Pose3() const {
return Pose3(R_, s_ * t_);
}
} // namespace gtsam

View File

@ -18,13 +18,12 @@
#pragma once
#include <gtsam/geometry/Rot3.h>
#include <gtsam/geometry/Point3.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/base/Lie.h>
#include <gtsam/base/Manifold.h>
#include <gtsam/dllexport.h>
#include <gtsam/geometry/Point3.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/geometry/Rot3.h>
namespace gtsam {
@ -34,75 +33,73 @@ class Pose3;
/**
* 3D similarity transform
*/
class Similarity3: public LieGroup<Similarity3, 7> {
class GTSAM_EXPORT Similarity3 : public LieGroup<Similarity3, 7> {
/// @name Pose Concept
/// @{
typedef Rot3 Rotation;
typedef Point3 Translation;
/// @}
private:
private:
Rot3 R_;
Point3 t_;
double s_;
public:
public:
/// @name Constructors
/// @{
/// Default constructor
GTSAM_EXPORT Similarity3();
Similarity3();
/// Construct pure scaling
GTSAM_EXPORT Similarity3(double s);
Similarity3(double s);
/// Construct from GTSAM types
GTSAM_EXPORT Similarity3(const Rot3& R, const Point3& t, double s);
Similarity3(const Rot3& R, const Point3& t, double s);
/// Construct from Eigen types
GTSAM_EXPORT Similarity3(const Matrix3& R, const Vector3& t, double s);
Similarity3(const Matrix3& R, const Vector3& t, double s);
/// Construct from matrix [R t; 0 s^-1]
GTSAM_EXPORT Similarity3(const Matrix4& T);
Similarity3(const Matrix4& T);
/// @}
/// @name Testable
/// @{
/// Compare with tolerance
GTSAM_EXPORT bool equals(const Similarity3& sim, double tol) const;
bool equals(const Similarity3& sim, double tol) const;
/// Exact equality
GTSAM_EXPORT bool operator==(const Similarity3& other) const;
bool operator==(const Similarity3& other) const;
/// Print with optional string
GTSAM_EXPORT void print(const std::string& s) const;
void print(const std::string& s) const;
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Similarity3& p);
friend std::ostream& operator<<(std::ostream& os, const Similarity3& p);
/// @}
/// @name Group
/// @{
/// Return an identity transform
GTSAM_EXPORT static Similarity3 identity();
static Similarity3 identity();
/// Composition
GTSAM_EXPORT Similarity3 operator*(const Similarity3& S) const;
Similarity3 operator*(const Similarity3& S) const;
/// Return the inverse
GTSAM_EXPORT Similarity3 inverse() const;
Similarity3 inverse() const;
/// @}
/// @name Group action on Point3
/// @{
/// Action on a point p is s*(R*p+t)
GTSAM_EXPORT Point3 transformFrom(const Point3& p, //
OptionalJacobian<3, 7> H1 = boost::none, //
OptionalJacobian<3, 3> H2 = boost::none) const;
Point3 transformFrom(const Point3& p, //
OptionalJacobian<3, 7> H1 = boost::none, //
OptionalJacobian<3, 3> H2 = boost::none) const;
/**
* Action on a pose T.
@ -115,15 +112,15 @@ public:
* This group action satisfies the compatibility condition.
* For more details, refer to: https://en.wikipedia.org/wiki/Group_action
*/
GTSAM_EXPORT Pose3 transformFrom(const Pose3& T) const;
Pose3 transformFrom(const Pose3& T) const;
/** syntactic sugar for transformFrom */
GTSAM_EXPORT Point3 operator*(const Point3& p) const;
Point3 operator*(const Point3& p) const;
/**
* Create Similarity3 by aligning at least three point pairs
*/
GTSAM_EXPORT static Similarity3 Align(const std::vector<Point3Pair>& abPointPairs);
static Similarity3 Align(const std::vector<Point3Pair>& abPointPairs);
/**
* Create the Similarity3 object that aligns at least two pose pairs.
@ -131,11 +128,11 @@ public:
* Given a list of pairs in frame a, and a list of pairs in frame b, Align()
* will compute the best-fit Similarity3 aSb transformation to align them.
* First, the rotation aRb will be computed as the average (Karcher mean) of
* many estimates aRb (from each pair). Afterwards, the scale factor will be computed
* using the algorithm described here:
* many estimates aRb (from each pair). Afterwards, the scale factor will be
* computed using the algorithm described here:
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
*/
GTSAM_EXPORT static Similarity3 Align(const std::vector<Pose3Pair>& abPosePairs);
static Similarity3 Align(const std::vector<Pose3Pair>& abPosePairs);
/// @}
/// @name Lie Group
@ -144,20 +141,22 @@ public:
/** Log map at the identity
* \f$ [R_x,R_y,R_z, t_x, t_y, t_z, \lambda] \f$
*/
GTSAM_EXPORT static Vector7 Logmap(const Similarity3& s, //
OptionalJacobian<7, 7> Hm = boost::none);
static Vector7 Logmap(const Similarity3& s, //
OptionalJacobian<7, 7> Hm = boost::none);
/** Exponential map at the identity
*/
GTSAM_EXPORT static Similarity3 Expmap(const Vector7& v, //
OptionalJacobian<7, 7> Hm = boost::none);
static Similarity3 Expmap(const Vector7& v, //
OptionalJacobian<7, 7> Hm = boost::none);
/// Chart at the origin
struct ChartAtOrigin {
static Similarity3 Retract(const Vector7& v, ChartJacobian H = boost::none) {
static Similarity3 Retract(const Vector7& v,
ChartJacobian H = boost::none) {
return Similarity3::Expmap(v, H);
}
static Vector7 Local(const Similarity3& other, ChartJacobian H = boost::none) {
static Vector7 Local(const Similarity3& other,
ChartJacobian H = boost::none) {
return Similarity3::Logmap(other, H);
}
};
@ -170,67 +169,53 @@ public:
* @return 4*4 element of Lie algebra that can be exponentiated
* TODO(frank): rename to Hat, make part of traits
*/
GTSAM_EXPORT static Matrix4 wedge(const Vector7& xi);
static Matrix4 wedge(const Vector7& xi);
/// Project from one tangent space to another
GTSAM_EXPORT Matrix7 AdjointMap() const;
Matrix7 AdjointMap() const;
/// @}
/// @name Standard interface
/// @{
/// Calculate 4*4 matrix group equivalent
GTSAM_EXPORT const Matrix4 matrix() const;
Matrix4 matrix() const;
/// Return a GTSAM rotation
const Rot3& rotation() const {
return R_;
}
Rot3 rotation() const { return R_; }
/// Return a GTSAM translation
const Point3& translation() const {
return t_;
}
Point3 translation() const { return t_; }
/// Return the scale
double scale() const {
return s_;
}
/// Convert to a rigid body pose (R, s*t)
/// TODO(frank): why is this here? Red flag! Definitely don't have it as a cast.
GTSAM_EXPORT operator Pose3() const;
double scale() const { return s_; }
/// Dimensionality of tangent space = 7 DOF - used to autodetect sizes
inline static size_t Dim() {
return 7;
}
inline static size_t Dim() { return 7; }
/// Dimensionality of tangent space = 7 DOF
inline size_t dim() const {
return 7;
}
inline size_t dim() const { return 7; }
/// @}
/// @name Helper functions
/// @{
private:
private:
/// Calculate expmap and logmap coefficients.
static Matrix3 GetV(Vector3 w, double lambda);
/// @}
};
template<>
template <>
inline Matrix wedge<Similarity3>(const Vector& xi) {
return Similarity3::wedge(xi);
}
template<>
template <>
struct traits<Similarity3> : public internal::LieGroup<Similarity3> {};
template<>
template <>
struct traits<const Similarity3> : public internal::LieGroup<Similarity3> {};
} // namespace gtsam
} // namespace gtsam

View File

@ -372,6 +372,9 @@ class Pose2 {
Pose2(const gtsam::Rot2& r, const gtsam::Point2& t);
Pose2(Vector v);
static boost::optional<gtsam::Pose2> Align(const gtsam::Point2Pairs& abPointPairs);
static boost::optional<gtsam::Pose2> Align(const gtsam::Matrix& a, const gtsam::Matrix& b);
// Testable
void print(string s = "") const;
bool equals(const gtsam::Pose2& pose, double tol) const;
@ -424,8 +427,6 @@ class Pose2 {
void serialize() const;
};
boost::optional<gtsam::Pose2> align(const gtsam::Point2Pairs& pairs);
#include <gtsam/geometry/Pose3.h>
class Pose3 {
// Standard Constructors
@ -583,7 +584,13 @@ class Cal3_S2 {
// Action on Point2
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
gtsam::Point2 calibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
// Standard Interface
double fx() const;
@ -622,7 +629,13 @@ virtual class Cal3DS2_Base {
// Action on Point2
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
gtsam::Point2 calibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
// enabling serialization functionality
void serialize() const;
@ -679,7 +692,13 @@ virtual class Cal3Unified : gtsam::Cal3DS2_Base {
// Note: the signature of this functions differ from the functions
// with equal name in the base class.
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
gtsam::Point2 calibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
// enabling serialization functionality
void serialize() const;
@ -705,7 +724,13 @@ class Cal3Fisheye {
// Action on Point2
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
gtsam::Point2 calibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
// Standard Interface
double fx() const;
@ -768,7 +793,13 @@ class Cal3Bundler {
// Action on Point2
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
gtsam::Point2 calibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
Eigen::Ref<Eigen::MatrixXd> Dcal,
Eigen::Ref<Eigen::MatrixXd> Dp) const;
// Standard Interface
double fx() const;
@ -806,12 +837,25 @@ class CalibratedCamera {
// Action on Point3
gtsam::Point2 project(const gtsam::Point3& point) const;
gtsam::Point2 project(const gtsam::Point3& point,
Eigen::Ref<Eigen::MatrixXd> Dcamera,
Eigen::Ref<Eigen::MatrixXd> Dpoint);
gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const;
gtsam::Point3 backproject(const gtsam::Point2& p, double depth,
Eigen::Ref<Eigen::MatrixXd> Dresult_dpose,
Eigen::Ref<Eigen::MatrixXd> Dresult_dp,
Eigen::Ref<Eigen::MatrixXd> Dresult_ddepth);
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
// Standard Interface
gtsam::Pose3 pose() const;
double range(const gtsam::Point3& point) const;
double range(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
Eigen::Ref<Eigen::MatrixXd> Dpoint);
double range(const gtsam::Pose3& pose) const;
double range(const gtsam::Pose3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
Eigen::Ref<Eigen::MatrixXd> Dpose);
double range(const gtsam::CalibratedCamera& camera) const;
// enabling serialization functionality
@ -823,6 +867,7 @@ template <CALIBRATION>
class PinholeCamera {
// Standard Constructors and Named Constructors
PinholeCamera();
PinholeCamera(const gtsam::PinholeCamera<CALIBRATION> other);
PinholeCamera(const gtsam::Pose3& pose);
PinholeCamera(const gtsam::Pose3& pose, const CALIBRATION& K);
static This Level(const CALIBRATION& K, const gtsam::Pose2& pose,
@ -849,14 +894,50 @@ class PinholeCamera {
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
pair<gtsam::Point2, bool> projectSafe(const gtsam::Point3& pw) const;
gtsam::Point2 project(const gtsam::Point3& point);
gtsam::Point2 project(const gtsam::Point3& point,
Eigen::Ref<Eigen::MatrixXd> Dpose,
Eigen::Ref<Eigen::MatrixXd> Dpoint,
Eigen::Ref<Eigen::MatrixXd> Dcal);
gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const;
gtsam::Point3 backproject(const gtsam::Point2& p, double depth,
Eigen::Ref<Eigen::MatrixXd> Dresult_dpose,
Eigen::Ref<Eigen::MatrixXd> Dresult_dp,
Eigen::Ref<Eigen::MatrixXd> Dresult_ddepth,
Eigen::Ref<Eigen::MatrixXd> Dresult_dcal);
double range(const gtsam::Point3& point);
double range(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
Eigen::Ref<Eigen::MatrixXd> Dpoint);
double range(const gtsam::Pose3& pose);
double range(const gtsam::Pose3& pose, Eigen::Ref<Eigen::MatrixXd> Dcamera,
Eigen::Ref<Eigen::MatrixXd> Dpose);
// enabling serialization functionality
void serialize() const;
};
#include <gtsam/geometry/Similarity2.h>
class Similarity2 {
// Standard Constructors
Similarity2();
Similarity2(double s);
Similarity2(const gtsam::Rot2& R, const gtsam::Point2& t, double s);
Similarity2(const Matrix& R, const Vector& t, double s);
Similarity2(const Matrix& T);
gtsam::Point2 transformFrom(const gtsam::Point2& p) const;
gtsam::Pose2 transformFrom(const gtsam::Pose2& T);
static gtsam::Similarity2 Align(const gtsam::Point2Pairs& abPointPairs);
static gtsam::Similarity2 Align(const gtsam::Pose2Pairs& abPosePairs);
// Standard Interface
bool equals(const gtsam::Similarity2& sim, double tol) const;
Matrix matrix() const;
gtsam::Rot2& rotation();
gtsam::Point2& translation();
double scale() const;
};
#include <gtsam/geometry/Similarity3.h>
class Similarity3 {
// Standard Constructors
@ -873,9 +954,10 @@ class Similarity3 {
static gtsam::Similarity3 Align(const gtsam::Pose3Pairs& abPosePairs);
// Standard Interface
const Matrix matrix() const;
const gtsam::Rot3& rotation();
const gtsam::Point3& translation();
bool equals(const gtsam::Similarity3& sim, double tol) const;
Matrix matrix() const;
gtsam::Rot3& rotation();
gtsam::Point3& translation();
double scale() const;
};
@ -920,9 +1002,18 @@ class StereoCamera {
static size_t Dim();
// Transformations and measurement functions
gtsam::StereoPoint2 project(const gtsam::Point3& point);
gtsam::StereoPoint2 project(const gtsam::Point3& point) const;
gtsam::Point3 backproject(const gtsam::StereoPoint2& p) const;
// project with Jacobian
gtsam::StereoPoint2 project2(const gtsam::Point3& point,
Eigen::Ref<Eigen::MatrixXd> H1,
Eigen::Ref<Eigen::MatrixXd> H2) const;
gtsam::Point3 backproject2(const gtsam::StereoPoint2& p,
Eigen::Ref<Eigen::MatrixXd> H1,
Eigen::Ref<Eigen::MatrixXd> H2) const;
// enabling serialization functionality
void serialize() const;
};

View File

@ -717,81 +717,75 @@ TEST( Pose2, range_pose )
/* ************************************************************************* */
TEST(Pose2, align_1) {
Pose2 expected(Rot2::fromAngle(0), Point2(10,10));
vector<Point2Pair> correspondences;
Point2Pair pq1(make_pair(Point2(0,0), Point2(10,10)));
Point2Pair pq2(make_pair(Point2(20,10), Point2(30,20)));
correspondences += pq1, pq2;
boost::optional<Pose2> actual = align(correspondences);
EXPECT(assert_equal(expected, *actual));
Pose2 expected(Rot2::fromAngle(0), Point2(10, 10));
Point2Pairs ab_pairs {{Point2(10, 10), Point2(0, 0)},
{Point2(30, 20), Point2(20, 10)}};
boost::optional<Pose2> aTb = Pose2::Align(ab_pairs);
EXPECT(assert_equal(expected, *aTb));
}
TEST(Pose2, align_2) {
Point2 t(20,10);
Point2 t(20, 10);
Rot2 R = Rot2::fromAngle(M_PI/2.0);
Pose2 expected(R, t);
vector<Point2Pair> correspondences;
Point2 p1(0,0), p2(10,0);
Point2 q1 = expected.transformFrom(p1), q2 = expected.transformFrom(p2);
EXPECT(assert_equal(Point2(20,10),q1));
EXPECT(assert_equal(Point2(20,20),q2));
Point2Pair pq1(make_pair(p1, q1));
Point2Pair pq2(make_pair(p2, q2));
correspondences += pq1, pq2;
Point2 b1(0, 0), b2(10, 0);
Point2Pairs ab_pairs {{expected.transformFrom(b1), b1},
{expected.transformFrom(b2), b2}};
boost::optional<Pose2> actual = align(correspondences);
EXPECT(assert_equal(expected, *actual));
boost::optional<Pose2> aTb = Pose2::Align(ab_pairs);
EXPECT(assert_equal(expected, *aTb));
}
namespace align_3 {
Point2 t(10,10);
Point2 t(10, 10);
Pose2 expected(Rot2::fromAngle(2*M_PI/3), t);
Point2 p1(0,0), p2(10,0), p3(10,10);
Point2 q1 = expected.transformFrom(p1), q2 = expected.transformFrom(p2), q3 = expected.transformFrom(p3);
Point2 b1(0, 0), b2(10, 0), b3(10, 10);
Point2 a1 = expected.transformFrom(b1),
a2 = expected.transformFrom(b2),
a3 = expected.transformFrom(b3);
}
TEST(Pose2, align_3) {
using namespace align_3;
vector<Point2Pair> correspondences;
Point2Pair pq1(make_pair(p1, q1));
Point2Pair pq2(make_pair(p2, q2));
Point2Pair pq3(make_pair(p3, q3));
correspondences += pq1, pq2, pq3;
Point2Pairs ab_pairs;
Point2Pair ab1(make_pair(a1, b1));
Point2Pair ab2(make_pair(a2, b2));
Point2Pair ab3(make_pair(a3, b3));
ab_pairs += ab1, ab2, ab3;
boost::optional<Pose2> actual = align(correspondences);
EXPECT(assert_equal(expected, *actual));
boost::optional<Pose2> aTb = Pose2::Align(ab_pairs);
EXPECT(assert_equal(expected, *aTb));
}
namespace {
/* ************************************************************************* */
// Prototype code to align two triangles using a rigid transform
/* ************************************************************************* */
struct Triangle { size_t i_,j_,k_;};
struct Triangle { size_t i_, j_, k_;};
boost::optional<Pose2> align2(const Point2Vector& ps, const Point2Vector& qs,
boost::optional<Pose2> align2(const Point2Vector& as, const Point2Vector& bs,
const pair<Triangle, Triangle>& trianglePair) {
const Triangle& t1 = trianglePair.first, t2 = trianglePair.second;
vector<Point2Pair> correspondences;
correspondences += make_pair(ps[t1.i_],qs[t2.i_]), make_pair(ps[t1.j_],qs[t2.j_]), make_pair(ps[t1.k_],qs[t2.k_]);
return align(correspondences);
Point2Pairs ab_pairs = {{as[t1.i_], bs[t2.i_]},
{as[t1.j_], bs[t2.j_]},
{as[t1.k_], bs[t2.k_]}};
return Pose2::Align(ab_pairs);
}
}
TEST(Pose2, align_4) {
using namespace align_3;
Point2Vector ps,qs;
ps += p1, p2, p3;
qs += q3, q1, q2; // note in 3,1,2 order !
Point2Vector as, bs;
as += a1, a2, a3;
bs += b3, b1, b2; // note in 3,1,2 order !
Triangle t1; t1.i_=0; t1.j_=1; t1.k_=2;
Triangle t2; t2.i_=1; t2.j_=2; t2.k_=0;
boost::optional<Pose2> actual = align2(ps, qs, make_pair(t1,t2));
boost::optional<Pose2> actual = align2(as, bs, {t1, t2});
EXPECT(assert_equal(expected, *actual));
}

View File

@ -0,0 +1,66 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testSimilarity2.cpp
* @brief Unit tests for Similarity2 class
* @author Varun Agrawal
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/base/numericalDerivative.h>
#include <gtsam/base/testLie.h>
#include <gtsam/geometry/Similarity2.h>
#include <functional>
using namespace std::placeholders;
using namespace gtsam;
using namespace std;
GTSAM_CONCEPT_TESTABLE_INST(Similarity2)
static const Point2 P(0.2, 0.7);
static const Rot2 R = Rot2::fromAngle(0.3);
static const double s = 4;
const double degree = M_PI / 180;
//******************************************************************************
TEST(Similarity2, Concepts) {
BOOST_CONCEPT_ASSERT((IsGroup<Similarity2>));
BOOST_CONCEPT_ASSERT((IsManifold<Similarity2>));
BOOST_CONCEPT_ASSERT((IsLieGroup<Similarity2>));
}
//******************************************************************************
TEST(Similarity2, Constructors) {
Similarity2 sim2_Construct1;
Similarity2 sim2_Construct2(s);
Similarity2 sim2_Construct3(R, P, s);
Similarity2 sim2_Construct4(R.matrix(), P, s);
}
//******************************************************************************
TEST(Similarity2, Getters) {
Similarity2 sim2_default;
EXPECT(assert_equal(Rot2(), sim2_default.rotation()));
EXPECT(assert_equal(Point2(0, 0), sim2_default.translation()));
EXPECT_DOUBLES_EQUAL(1.0, sim2_default.scale(), 1e-9);
}
//******************************************************************************
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
//******************************************************************************

View File

@ -458,18 +458,18 @@ TEST(Similarity3, Optimization2) {
Values result;
result = LevenbergMarquardtOptimizer(graph, initial).optimize();
//result.print("Optimized Estimate\n");
Pose3 p1, p2, p3, p4, p5;
p1 = Pose3(result.at<Similarity3>(X(1)));
p2 = Pose3(result.at<Similarity3>(X(2)));
p3 = Pose3(result.at<Similarity3>(X(3)));
p4 = Pose3(result.at<Similarity3>(X(4)));
p5 = Pose3(result.at<Similarity3>(X(5)));
Similarity3 p1, p2, p3, p4, p5;
p1 = result.at<Similarity3>(X(1));
p2 = result.at<Similarity3>(X(2));
p3 = result.at<Similarity3>(X(3));
p4 = result.at<Similarity3>(X(4));
p5 = result.at<Similarity3>(X(5));
//p1.print("Pose1");
//p2.print("Pose2");
//p3.print("Pose3");
//p4.print("Pose4");
//p5.print("Pose5");
//p1.print("Similarity1");
//p2.print("Similarity2");
//p3.print("Similarity3");
//p4.print("Similarity4");
//p5.print("Similarity5");
Similarity3 expected(0.7);
EXPECT(assert_equal(expected, result.at<Similarity3>(X(5)), 0.4));

View File

@ -15,6 +15,10 @@
#include <gtsam/base/timing.h>
#include <gtsam/base/treeTraversal-inst.h>
#ifdef GTSAM_USE_TBB
#include <mutex>
#endif
namespace gtsam {
/* ************************************************************************* */
@ -120,12 +124,25 @@ struct EliminationData {
size_t myIndexInParent;
FastVector<sharedFactor> childFactors;
boost::shared_ptr<BTNode> bayesTreeNode;
#ifdef GTSAM_USE_TBB
boost::shared_ptr<std::mutex> writeLock;
#endif
EliminationData(EliminationData* _parentData, size_t nChildren) :
parentData(_parentData), bayesTreeNode(boost::make_shared<BTNode>()) {
parentData(_parentData), bayesTreeNode(boost::make_shared<BTNode>())
#ifdef GTSAM_USE_TBB
, writeLock(boost::make_shared<std::mutex>())
#endif
{
if (parentData) {
#ifdef GTSAM_USE_TBB
parentData->writeLock->lock();
#endif
myIndexInParent = parentData->childFactors.size();
parentData->childFactors.push_back(sharedFactor());
#ifdef GTSAM_USE_TBB
parentData->writeLock->unlock();
#endif
} else {
myIndexInParent = 0;
}
@ -196,8 +213,15 @@ struct EliminationData {
nodesIndex_.insert(std::make_pair(j, myData.bayesTreeNode));
// Store remaining factor in parent's gathered factors
if (!eliminationResult.second->empty())
if (!eliminationResult.second->empty()) {
#ifdef GTSAM_USE_TBB
myData.parentData->writeLock->lock();
#endif
myData.parentData->childFactors[myData.myIndexInParent] = eliminationResult.second;
#ifdef GTSAM_USE_TBB
myData.parentData->writeLock->unlock();
#endif
}
}
};
};

View File

@ -91,13 +91,18 @@ namespace gtsam {
void GaussianConditional::print(const string &s, const KeyFormatter& formatter) const {
cout << s << " p(";
for (const_iterator it = beginFrontals(); it != endFrontals(); ++it) {
cout << (boost::format("%1%")%(formatter(*it))).str() << " ";
cout << (boost::format("%1%") % (formatter(*it))).str()
<< (nrFrontals() > 1 ? " " : "");
}
cout << "|";
for (const_iterator it = beginParents(); it != endParents(); ++it) {
cout << " " << (boost::format("%1%")%(formatter(*it))).str();
if (nrParents()) {
cout << " |";
for (const_iterator it = beginParents(); it != endParents(); ++it) {
cout << " " << (boost::format("%1%") % (formatter(*it))).str();
}
}
cout << ")" << endl;
cout << formatMatrixIndented(" R = ", R()) << endl;
for (const_iterator it = beginParents() ; it != endParents() ; ++it) {
cout << formatMatrixIndented((boost::format(" S[%1%] = ")%(formatter(*it))).str(), getA(it))

View File

@ -109,8 +109,9 @@ namespace gtsam {
/// @{
/** print */
void print(const std::string& = "GaussianConditional",
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
void print(
const std::string& = "GaussianConditional",
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
/** equals function */
bool equals(const GaussianFactor&cg, double tol = 1e-9) const override;

View File

@ -54,23 +54,31 @@ namespace noiseModel {
// clang-format on
namespace mEstimator {
//---------------------------------------------------------------------------------------
/**
* Pure virtual class for all robust error function classes.
*
* It provides the machinery for block vs scalar reweighting strategies, in
* addition to defining the interface of derived classes.
*/
class GTSAM_EXPORT Base {
public:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
enum ReweightScheme { Scalar, Block };
typedef boost::shared_ptr<Base> shared_ptr;
protected:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
/// Strategy for reweighting \sa ReweightScheme
ReweightScheme reweight_;
public:
Base(const ReweightScheme reweight = Block) : reweight_(reweight) {}
virtual ~Base() {}
/*
/// Returns the reweight scheme, as explained in ReweightScheme
ReweightScheme reweightScheme() const { return reweight_; }
/**
* This method is responsible for returning the total penalty for a given
* amount of error. For example, this method is responsible for implementing
* the quadratic function for an L2 penalty, the absolute value function for
@ -80,16 +88,20 @@ class GTSAM_EXPORT Base {
* error vector, then it prevents implementations of asymmeric loss
* functions. It would be better for this function to accept the vector and
* internally call the norm if necessary.
*
* This returns \rho(x) in \ref mEstimator
*/
virtual double loss(double distance) const { return 0; };
virtual double loss(double distance) const { return 0; }
/*
/**
* This method is responsible for returning the weight function for a given
* amount of error. The weight function is related to the analytic derivative
* of the loss function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with
* robust penalties using iteratively re-weighted least squares.
*
* This returns w(x) in \ref mEstimator
*/
virtual double weight(double distance) const = 0;
@ -124,7 +136,15 @@ class GTSAM_EXPORT Base {
}
};
/// Null class should behave as Gaussian
/** "Null" robust loss function, equivalent to a Gaussian pdf noise model, or
* plain least-squares (non-robust).
*
* This model has no additional parameters.
*
* - Loss \rho(x) = 0.5 x²
* - Derivative \phi(x) = x
* - Weight w(x) = \phi(x)/x = 1
*/
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
@ -146,7 +166,14 @@ class GTSAM_EXPORT Null : public Base {
}
};
/// Fair implements the "Fair" robust error model (Zhang97ivc)
/** Implementation of the "Fair" robust error model (Zhang97ivc)
*
* This model has a scalar parameter "c".
*
* - Loss \rho(x) = c² (|x|/c - log(1+|x|/c))
* - Derivative \phi(x) = x/(1+|x|/c)
* - Weight w(x) = \phi(x)/x = 1/(1+|x|/c)
*/
class GTSAM_EXPORT Fair : public Base {
protected:
double c_;
@ -160,6 +187,7 @@ class GTSAM_EXPORT Fair : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
private:
/** Serialization function */
@ -171,7 +199,14 @@ class GTSAM_EXPORT Fair : public Base {
}
};
/// Huber implements the "Huber" robust error model (Zhang97ivc)
/** The "Huber" robust error model (Zhang97ivc).
*
* This model has a scalar parameter "k".
*
* - Loss \rho(x) = 0.5 x² if |x|<k, 0.5 k² + k|x-k| otherwise
* - Derivative \phi(x) = x if |x|<k, k sgn(x) otherwise
* - Weight w(x) = \phi(x)/x = 1 if |x|<k, k/|x| otherwise
*/
class GTSAM_EXPORT Huber : public Base {
protected:
double k_;
@ -185,6 +220,7 @@ class GTSAM_EXPORT Huber : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return k_; }
private:
/** Serialization function */
@ -196,12 +232,19 @@ class GTSAM_EXPORT Huber : public Base {
}
};
/// Cauchy implements the "Cauchy" robust error model (Lee2013IROS). Contributed
/// by:
/// Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
/// Information Technology, Karlsruhe, Germany.
/// oberlaender@fzi.de
/// Thanks Jan!
/** Implementation of the "Cauchy" robust error model (Lee2013IROS).
* Contributed by:
* Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
* Information Technology, Karlsruhe, Germany.
* oberlaender@fzi.de
* Thanks Jan!
*
* This model has a scalar parameter "k".
*
* - Loss \rho(x) = 0.5 k² log(1+x²/k²)
* - Derivative \phi(x) = (k²x)/(x²+k²)
* - Weight w(x) = \phi(x)/x = k²/(x²+k²)
*/
class GTSAM_EXPORT Cauchy : public Base {
protected:
double k_, ksquared_;
@ -215,6 +258,7 @@ class GTSAM_EXPORT Cauchy : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return k_; }
private:
/** Serialization function */
@ -223,10 +267,18 @@ class GTSAM_EXPORT Cauchy : public Base {
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(k_);
ar &BOOST_SERIALIZATION_NVP(ksquared_);
}
};
/// Tukey implements the "Tukey" robust error model (Zhang97ivc)
/** Implementation of the "Tukey" robust error model (Zhang97ivc).
*
* This model has a scalar parameter "c".
*
* - Loss \rho(x) = c² (1 - (1-x²/c²)³)/6 if |x|<c, c²/6 otherwise
* - Derivative \phi(x) = x(1-x²/c²)² if |x|<c, 0 otherwise
* - Weight w(x) = \phi(x)/x = (1-x²/c²)² if |x|<c, 0 otherwise
*/
class GTSAM_EXPORT Tukey : public Base {
protected:
double c_, csquared_;
@ -240,6 +292,7 @@ class GTSAM_EXPORT Tukey : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
private:
/** Serialization function */
@ -251,7 +304,14 @@ class GTSAM_EXPORT Tukey : public Base {
}
};
/// Welsch implements the "Welsch" robust error model (Zhang97ivc)
/** Implementation of the "Welsch" robust error model (Zhang97ivc).
*
* This model has a scalar parameter "c".
*
* - Loss \rho(x) = -0.5 c² (exp(-x²/c²) - 1)
* - Derivative \phi(x) = x exp(-x²/c²)
* - Weight w(x) = \phi(x)/x = exp(-x²/c²)
*/
class GTSAM_EXPORT Welsch : public Base {
protected:
double c_, csquared_;
@ -265,6 +325,7 @@ class GTSAM_EXPORT Welsch : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
private:
/** Serialization function */
@ -273,15 +334,20 @@ class GTSAM_EXPORT Welsch : public Base {
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
ar &BOOST_SERIALIZATION_NVP(csquared_);
}
};
/// GemanMcClure implements the "Geman-McClure" robust error model
/// (Zhang97ivc).
///
/// Note that Geman-McClure weight function uses the parameter c == 1.0,
/// but here it's allowed to use different values, so we actually have
/// the generalized Geman-McClure from (Agarwal15phd).
/** Implementation of the "Geman-McClure" robust error model (Zhang97ivc).
*
* Note that Geman-McClure weight function uses the parameter c == 1.0,
* but here it's allowed to use different values, so we actually have
* the generalized Geman-McClure from (Agarwal15phd).
*
* - Loss \rho(x) = 0.5 (c²x²)/(c²+x²)
* - Derivative \phi(x) = xc/(c²+x²)²
* - Weight w(x) = \phi(x)/x = c/(c²+x²)²
*/
class GTSAM_EXPORT GemanMcClure : public Base {
public:
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
@ -293,6 +359,7 @@ class GTSAM_EXPORT GemanMcClure : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
protected:
double c_;
@ -307,11 +374,18 @@ class GTSAM_EXPORT GemanMcClure : public Base {
}
};
/// DCS implements the Dynamic Covariance Scaling robust error model
/// from the paper Robust Map Optimization (Agarwal13icra).
///
/// Under the special condition of the parameter c == 1.0 and not
/// forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
/** DCS implements the Dynamic Covariance Scaling robust error model
* from the paper Robust Map Optimization (Agarwal13icra).
*
* Under the special condition of the parameter c == 1.0 and not
* forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
*
* This model has a scalar parameter "c" (with "units" of squared error).
*
* - Loss \rho(x) = (c²x² + cx)/(x²+c)² (for any "x")
* - Derivative \phi(x) = 2c²x/(x²+c)²
* - Weight w(x) = \phi(x)/x = 2c²/(x²+c)² if x²>c, 1 otherwise
*/
class GTSAM_EXPORT DCS : public Base {
public:
typedef boost::shared_ptr<DCS> shared_ptr;
@ -323,6 +397,7 @@ class GTSAM_EXPORT DCS : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return c_; }
protected:
double c_;
@ -337,12 +412,19 @@ class GTSAM_EXPORT DCS : public Base {
}
};
/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
/// width 2*k, centered at the origin. The resulting penalty within the dead
/// zone is always zero, and grows quadratically outside the dead zone. In this
/// sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
/// robust to outliers. This penalty can be used to create barrier functions in
/// a general way.
/** L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
* width 2*k, centered at the origin. The resulting penalty within the dead
* zone is always zero, and grows quadratically outside the dead zone. In this
* sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
* robust to outliers. This penalty can be used to create barrier functions in
* a general way.
*
* This model has a scalar parameter "k".
*
* - Loss \rho(x) = 0 if |x|<k, 0.5(k-|x|)² otherwise
* - Derivative \phi(x) = 0 if |x|<k, (-k+x) if x>k, (k+x) if x<-k
* - Weight w(x) = \phi(x)/x = 0 if |x|<k, (-k+x)/x if x>k, (k+x)/x if x<-k
*/
class GTSAM_EXPORT L2WithDeadZone : public Base {
protected:
double k_;
@ -356,6 +438,7 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
double modelParameter() const { return k_; }
private:
/** Serialization function */

View File

@ -404,13 +404,23 @@ TEST(GaussianConditional, Print) {
const Vector2 b(20, 40);
const double sigma = 3;
std::string s = "GaussianConditional";
GaussianConditional conditional(X(0), b, Matrix2::Identity(),
noiseModel::Isotropic::Sigma(2, sigma));
auto conditional =
// Test printing for no parents.
std::string expected =
"GaussianConditional p(x0)\n"
" R = [ 1 0 ]\n"
" [ 0 1 ]\n"
" d = [ 20 40 ]\n"
"isotropic dim=2 sigma=3\n";
EXPECT(assert_print_equal(expected, conditional, "GaussianConditional"));
auto conditional1 =
GaussianConditional::FromMeanAndStddev(X(0), A1, X(1), b, sigma);
// Test printing for single parent.
std::string expected =
std::string expected1 =
"GaussianConditional p(x0 | x1)\n"
" R = [ 1 0 ]\n"
" [ 0 1 ]\n"
@ -418,7 +428,7 @@ TEST(GaussianConditional, Print) {
" [ -3 -4 ]\n"
" d = [ 20 40 ]\n"
"isotropic dim=2 sigma=3\n";
EXPECT(assert_print_equal(expected, conditional, s));
EXPECT(assert_print_equal(expected1, conditional1, "GaussianConditional"));
// Test printing for multiple parents.
auto conditional2 = GaussianConditional::FromMeanAndStddev(X(0), A1, Y(0), A2,
@ -433,7 +443,7 @@ TEST(GaussianConditional, Print) {
" [ -7 -8 ]\n"
" d = [ 20 40 ]\n"
"isotropic dim=2 sigma=3\n";
EXPECT(assert_print_equal(expected2, conditional2, s));
EXPECT(assert_print_equal(expected2, conditional2, "GaussianConditional"));
}
/* ************************************************************************* */

View File

@ -90,7 +90,11 @@ class GTSAM_EXPORT PreintegratedAhrsMeasurements : public PreintegratedRotation
/**
* Add a single Gyroscope measurement to the preintegration.
* @param measureOmedga Measured angular velocity (in body frame)
* Measurements are taken to be in the sensor
* frame and conversion to the body frame is handled by `body_P_sensor` in
* `PreintegratedRotationParams` (if provided).
*
* @param measuredOmega Measured angular velocity (as given by the sensor)
* @param deltaT Time step
*/
void integrateMeasurement(const Vector3& measuredOmega, double deltaT);

View File

@ -208,8 +208,11 @@ public:
/**
* Add a single IMU measurement to the preintegration.
* @param measuredAcc Measured acceleration (in body frame, as given by the
* sensor)
* Both accelerometer and gyroscope measurements are taken to be in the sensor
* frame and conversion to the body frame is handled by `body_P_sensor` in
* `PreintegrationParams`.
*
* @param measuredAcc Measured acceleration (as given by the sensor)
* @param measuredOmega Measured angular velocity (as given by the sensor)
* @param dt Time interval between two consecutive IMU measurements
*/

View File

@ -121,7 +121,11 @@ public:
/**
* Add a single IMU measurement to the preintegration.
* @param measuredAcc Measured acceleration (in body frame, as given by the sensor)
* Both accelerometer and gyroscope measurements are taken to be in the sensor
* frame and conversion to the body frame is handled by `body_P_sensor` in
* `PreintegrationParams`.
*
* @param measuredAcc Measured acceleration (as given by the sensor)
* @param measuredOmega Measured angular velocity (as given by the sensor)
* @param dt Time interval between this and the last IMU measurement
*/

View File

@ -27,7 +27,6 @@
#include <GeographicLib/Config.h>
#include <GeographicLib/LocalCartesian.hpp>
using namespace std::placeholders;
using namespace std;
using namespace gtsam;
using namespace GeographicLib;
@ -71,7 +70,7 @@ TEST( GPSFactor, Constructor ) {
EXPECT(assert_equal(Z_3x1,factor.evaluateError(T),1e-5));
// Calculate numerical derivatives
Matrix expectedH = numericalDerivative11<Vector,Pose3>(
Matrix expectedH = numericalDerivative11<Vector, Pose3>(
std::bind(&GPSFactor::evaluateError, &factor, std::placeholders::_1, boost::none), T);
// Use the factor to calculate the derivative
@ -100,7 +99,7 @@ TEST( GPSFactor2, Constructor ) {
EXPECT(assert_equal(Z_3x1,factor.evaluateError(T),1e-5));
// Calculate numerical derivatives
Matrix expectedH = numericalDerivative11<Vector,NavState>(
Matrix expectedH = numericalDerivative11<Vector, NavState>(
std::bind(&GPSFactor2::evaluateError, &factor, std::placeholders::_1, boost::none), T);
// Use the factor to calculate the derivative

View File

@ -26,7 +26,6 @@
#include <GeographicLib/LocalCartesian.hpp>
using namespace std::placeholders;
using namespace std;
using namespace gtsam;
using namespace GeographicLib;
@ -64,7 +63,7 @@ TEST( MagFactor, unrotate ) {
Matrix H;
Point3 expected(22735.5, 314.502, 44202.5);
EXPECT( assert_equal(expected, MagFactor::unrotate(theta,nM,H),1e-1));
EXPECT( assert_equal(numericalDerivative11<Point3,Rot2> //
EXPECT(assert_equal(numericalDerivative11<Point3, Rot2> //
(std::bind(&MagFactor::unrotate, std::placeholders::_1, nM, none), theta), H, 1e-6));
}
@ -75,27 +74,27 @@ TEST( MagFactor, Factors ) {
// MagFactor
MagFactor f(1, measured, s, dir, bias, model);
EXPECT( assert_equal(Z_3x1,f.evaluateError(theta,H1),1e-5));
EXPECT( assert_equal((Matrix)numericalDerivative11<Vector,Rot2> //
EXPECT(assert_equal(Z_3x1,f.evaluateError(theta,H1),1e-5));
EXPECT(assert_equal((Matrix)numericalDerivative11<Vector, Rot2> //
(std::bind(&MagFactor::evaluateError, &f, std::placeholders::_1, none), theta), H1, 1e-7));
// MagFactor1
// MagFactor1
MagFactor1 f1(1, measured, s, dir, bias, model);
EXPECT( assert_equal(Z_3x1,f1.evaluateError(nRb,H1),1e-5));
EXPECT( assert_equal(numericalDerivative11<Vector,Rot3> //
EXPECT(assert_equal(Z_3x1,f1.evaluateError(nRb,H1),1e-5));
EXPECT(assert_equal(numericalDerivative11<Vector, Rot3> //
(std::bind(&MagFactor1::evaluateError, &f1, std::placeholders::_1, none), nRb), H1, 1e-7));
// MagFactor2
// MagFactor2
MagFactor2 f2(1, 2, measured, nRb, model);
EXPECT( assert_equal(Z_3x1,f2.evaluateError(scaled,bias,H1,H2),1e-5));
EXPECT( assert_equal(numericalDerivative11<Vector,Point3> //
EXPECT(assert_equal(Z_3x1,f2.evaluateError(scaled,bias,H1,H2),1e-5));
EXPECT(assert_equal(numericalDerivative11<Vector, Point3> //
(std::bind(&MagFactor2::evaluateError, &f2, std::placeholders::_1, bias, none, none), scaled),//
H1, 1e-7));
EXPECT( assert_equal(numericalDerivative11<Vector,Point3> //
EXPECT(assert_equal(numericalDerivative11<Vector, Point3> //
(std::bind(&MagFactor2::evaluateError, &f2, scaled, std::placeholders::_1, none, none), bias),//
H2, 1e-7));
// MagFactor2
// MagFactor3
MagFactor3 f3(1, 2, 3, measured, nRb, model);
EXPECT(assert_equal(Z_3x1,f3.evaluateError(s,dir,bias,H1,H2,H3),1e-5));
EXPECT(assert_equal((Matrix)numericalDerivative11<Vector,double> //

View File

@ -279,10 +279,11 @@ namespace gtsam {
template <typename ValueType>
struct handle {
ValueType operator()(Key j, const Value* const pointer) {
try {
auto ptr = dynamic_cast<const GenericValue<ValueType>*>(pointer);
if (ptr) {
// value returns a const ValueType&, and the return makes a copy !!!!!
return dynamic_cast<const GenericValue<ValueType>&>(*pointer).value();
} catch (std::bad_cast&) {
return ptr->value();
} else {
throw ValuesIncorrectType(j, typeid(*pointer), typeid(ValueType));
}
}
@ -294,11 +295,12 @@ namespace gtsam {
// Handle dynamic matrices
template <int M, int N>
struct handle_matrix<Eigen::Matrix<double, M, N>, true> {
Eigen::Matrix<double, M, N> operator()(Key j, const Value* const pointer) {
try {
inline Eigen::Matrix<double, M, N> operator()(Key j, const Value* const pointer) {
auto ptr = dynamic_cast<const GenericValue<Eigen::Matrix<double, M, N>>*>(pointer);
if (ptr) {
// value returns a const Matrix&, and the return makes a copy !!!!!
return dynamic_cast<const GenericValue<Eigen::Matrix<double, M, N>>&>(*pointer).value();
} catch (std::bad_cast&) {
return ptr->value();
} else {
// If a fixed matrix was stored, we end up here as well.
throw ValuesIncorrectType(j, typeid(*pointer), typeid(Eigen::Matrix<double, M, N>));
}
@ -308,16 +310,18 @@ namespace gtsam {
// Handle fixed matrices
template <int M, int N>
struct handle_matrix<Eigen::Matrix<double, M, N>, false> {
Eigen::Matrix<double, M, N> operator()(Key j, const Value* const pointer) {
try {
inline Eigen::Matrix<double, M, N> operator()(Key j, const Value* const pointer) {
auto ptr = dynamic_cast<const GenericValue<Eigen::Matrix<double, M, N>>*>(pointer);
if (ptr) {
// value returns a const MatrixMN&, and the return makes a copy !!!!!
return dynamic_cast<const GenericValue<Eigen::Matrix<double, M, N>>&>(*pointer).value();
} catch (std::bad_cast&) {
return ptr->value();
} else {
Matrix A;
try {
// Check if a dynamic matrix was stored
A = handle_matrix<Eigen::MatrixXd, true>()(j, pointer); // will throw if not....
} catch (const ValuesIncorrectType&) {
// Check if a dynamic matrix was stored
auto ptr = dynamic_cast<const GenericValue<Eigen::MatrixXd>*>(pointer);
if (ptr) {
A = ptr->value();
} else {
// Or a dynamic vector
A = handle_matrix<Eigen::VectorXd, true>()(j, pointer); // will throw if not....
}
@ -364,10 +368,10 @@ namespace gtsam {
if(item != values_.end()) {
// dynamic cast the type and throw exception if incorrect
const Value& value = *item->second;
try {
return dynamic_cast<const GenericValue<ValueType>&>(value).value();
} catch (std::bad_cast &) {
auto ptr = dynamic_cast<const GenericValue<ValueType>*>(item->second);
if (ptr) {
return ptr->value();
} else {
// NOTE(abe): clang warns about potential side effects if done in typeid
const Value* value = item->second;
throw ValuesIncorrectType(j, typeid(*value), typeid(ValueType));

View File

@ -245,6 +245,10 @@ class Values {
void insert(size_t j, const gtsam::ParameterMatrix<14>& X);
void insert(size_t j, const gtsam::ParameterMatrix<15>& X);
template <T = {gtsam::Point2,
gtsam::Point3}>
void insert(size_t j, const T& val);
void update(size_t j, const gtsam::Point2& point2);
void update(size_t j, const gtsam::Point3& point3);
void update(size_t j, const gtsam::Rot2& rot2);

View File

@ -40,8 +40,7 @@ T FindKarcherMeanImpl(const vector<T, ALLOC>& rotations) {
return result.at<T>(kKey);
}
template <class T,
typename = typename std::enable_if< std::is_same<gtsam::Rot3, T>::value >::type >
template <class T>
T FindKarcherMean(const std::vector<T>& rotations) {
return FindKarcherMeanImpl(rotations);
}

View File

@ -797,4 +797,30 @@ virtual class ProjectionFactorPPPC : gtsam::NoiseModelFactor {
typedef gtsam::ProjectionFactorPPPC<gtsam::Pose3, gtsam::Point3, gtsam::Cal3_S2> ProjectionFactorPPPCCal3_S2;
typedef gtsam::ProjectionFactorPPPC<gtsam::Pose3, gtsam::Point3, gtsam::Cal3DS2> ProjectionFactorPPPCCal3DS2;
#include <gtsam_unstable/slam/ProjectionFactorRollingShutter.h>
virtual class ProjectionFactorRollingShutter : gtsam::NoiseModelFactor {
ProjectionFactorRollingShutter(const gtsam::Point2& measured, double alpha, const gtsam::noiseModel::Base* noiseModel,
size_t poseKey_a, size_t poseKey_b, size_t pointKey, const gtsam::Cal3_S2* K);
ProjectionFactorRollingShutter(const gtsam::Point2& measured, double alpha, const gtsam::noiseModel::Base* noiseModel,
size_t poseKey_a, size_t poseKey_b, size_t pointKey, const gtsam::Cal3_S2* K, gtsam::Pose3& body_P_sensor);
ProjectionFactorRollingShutter(const gtsam::Point2& measured, double alpha, const gtsam::noiseModel::Base* noiseModel,
size_t poseKey_a, size_t poseKey_b, size_t pointKey, const gtsam::Cal3_S2* K, bool throwCheirality,
bool verboseCheirality);
ProjectionFactorRollingShutter(const gtsam::Point2& measured, double alpha, const gtsam::noiseModel::Base* noiseModel,
size_t poseKey_a, size_t poseKey_b, size_t pointKey, const gtsam::Cal3_S2* K, bool throwCheirality,
bool verboseCheirality, gtsam::Pose3& body_P_sensor);
gtsam::Point2 measured() const;
double alpha() const;
gtsam::Cal3_S2* calibration() const;
bool verboseCheirality() const;
bool throwCheirality() const;
// enabling serialization functionality
void serialize() const;
};
} //\namespace gtsam

View File

@ -92,7 +92,7 @@ public:
} catch( CheiralityException& e) {
if (H1) *H1 = Matrix::Zero(2,6);
if (H2) *H2 = Matrix::Zero(2,5);
if (H3) *H2 = Matrix::Zero(2,1);
if (H3) *H3 = Matrix::Zero(2,1);
std::cout << e.what() << ": Landmark "<< DefaultKeyFormatter(this->key2()) <<
" moved behind camera " << DefaultKeyFormatter(this->key1()) << std::endl;
return Vector::Ones(2) * 2.0 * K_->fx();

View File

@ -1,8 +1,18 @@
/*
* testInvDepthFactor.cpp
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testInvDepthFactor3.cpp
* @brief Unit tests inverse depth parametrization
*
* Created on: Apr 13, 2012
* Author: cbeall3
* @author cbeall3
* @author Dominik Van Opdenbosch
* @date Apr 13, 2012
*/
#include <CppUnitLite/TestHarness.h>
@ -12,6 +22,7 @@
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/base/numericalDerivative.h>
#include <gtsam_unstable/slam/InvDepthFactor3.h>
@ -28,6 +39,11 @@ PinholeCamera<Cal3_S2> level_camera(level_pose, *K);
typedef InvDepthFactor3<Pose3, Vector5, double> InverseDepthFactor;
typedef NonlinearEquality<Pose3> PoseConstraint;
Matrix factorError(const Pose3& pose, const Vector5& point, double invDepth,
const InverseDepthFactor& factor) {
return factor.evaluateError(pose, point, invDepth);
}
/* ************************************************************************* */
TEST( InvDepthFactor, optimize) {
@ -92,6 +108,55 @@ TEST( InvDepthFactor, optimize) {
}
/* ************************************************************************* */
TEST( InvDepthFactor, Jacobian3D ) {
// landmark 5 meters infront of camera (camera center at (0,0,1))
Point3 landmark(5, 0, 1);
// get expected projection using pinhole camera
Point2 expected_uv = level_camera.project(landmark);
// get expected landmark representation using backprojection
double inv_depth;
Vector5 inv_landmark;
InvDepthCamera3<Cal3_S2> inv_camera(level_pose, K);
std::tie(inv_landmark, inv_depth) = inv_camera.backproject(expected_uv, 5);
Vector5 expected_inv_landmark((Vector(5) << 0., 0., 1., 0., 0.).finished());
CHECK(assert_equal(expected_inv_landmark, inv_landmark, 1e-6));
CHECK(assert_equal(inv_depth, 1./5, 1e-6));
Symbol poseKey('x',1);
Symbol pointKey('l',1);
Symbol invDepthKey('d',1);
InverseDepthFactor factor(expected_uv, sigma, poseKey, pointKey, invDepthKey, K);
std::vector<Matrix> actualHs(3);
factor.unwhitenedError({{poseKey, genericValue(level_pose)},
{pointKey, genericValue(inv_landmark)},
{invDepthKey,genericValue(inv_depth)}},
actualHs);
const Matrix& H1Actual = actualHs.at(0);
const Matrix& H2Actual = actualHs.at(1);
const Matrix& H3Actual = actualHs.at(2);
// Use numerical derivatives to verify the Jacobians
Matrix H1Expected, H2Expected, H3Expected;
std::function<Matrix(const Pose3 &, const Vector5 &, const double &)>
func = std::bind(&factorError, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, factor);
H1Expected = numericalDerivative31(func, level_pose, inv_landmark, inv_depth);
H2Expected = numericalDerivative32(func, level_pose, inv_landmark, inv_depth);
H3Expected = numericalDerivative33(func, level_pose, inv_landmark, inv_depth);
// Verify the Jacobians
CHECK(assert_equal(H1Expected, H1Actual, 1e-6))
CHECK(assert_equal(H2Expected, H2Actual, 1e-6))
CHECK(assert_equal(H3Expected, H3Actual, 1e-6))
}
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
/* ************************************************************************* */

View File

@ -99,11 +99,15 @@ set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam)
copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
"${GTSAM_MODULE_PATH}")
# Hack to get python test files copied every time they are modified
# Hack to get python test and util files copied every time they are modified
file(GLOB GTSAM_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/tests/*.py")
foreach(test_file ${GTSAM_PYTHON_TEST_FILES})
configure_file(${test_file} "${GTSAM_MODULE_PATH}/tests/${test_file}" COPYONLY)
endforeach()
file(GLOB GTSAM_PYTHON_UTIL_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/utils/*.py")
foreach(util_file ${GTSAM_PYTHON_UTIL_FILES})
configure_file(${util_file} "${GTSAM_MODULE_PATH}/utils/${test_file}" COPYONLY)
endforeach()
# Common directory for data/datasets stored with the package.
# This will store the data in the Python site package directly.
@ -162,7 +166,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
# Hack to get python test files copied every time they are modified
file(GLOB GTSAM_UNSTABLE_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable/tests/*.py")
foreach(test_file ${GTSAM_PYTHON_TEST_FILES})
foreach(test_file ${GTSAM_UNSTABLE_PYTHON_TEST_FILES})
configure_file(${test_file} "${GTSAM_UNSTABLE_MODULE_PATH}/tests/${test_file}" COPYONLY)
endforeach()

View File

@ -8,6 +8,7 @@ For instructions on updating the version of the [wrap library](https://github.co
## Requirements
- Cmake >= 3.15
- If you want to build the GTSAM python library for a specific python version (eg 3.6),
use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used.
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment),

View File

@ -23,8 +23,8 @@ PYBIND11_MAKE_OPAQUE(
std::vector<gtsam::Point2, Eigen::aligned_allocator<gtsam::Point2>>);
PYBIND11_MAKE_OPAQUE(gtsam::Point2Pairs);
PYBIND11_MAKE_OPAQUE(gtsam::Point3Pairs);
PYBIND11_MAKE_OPAQUE(gtsam::Pose2Pairs);
PYBIND11_MAKE_OPAQUE(gtsam::Pose3Pairs);
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Pose3>);
PYBIND11_MAKE_OPAQUE(
gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Bundler>>);
PYBIND11_MAKE_OPAQUE(gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Bundler>>);
PYBIND11_MAKE_OPAQUE(gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3_S2>>);

View File

@ -16,6 +16,7 @@ py::bind_vector<
m_, "Point2Vector");
py::bind_vector<std::vector<gtsam::Point2Pair>>(m_, "Point2Pairs");
py::bind_vector<std::vector<gtsam::Point3Pair>>(m_, "Point3Pairs");
py::bind_vector<std::vector<gtsam::Pose2Pair>>(m_, "Pose2Pairs");
py::bind_vector<std::vector<gtsam::Pose3Pair>>(m_, "Pose3Pairs");
py::bind_vector<std::vector<gtsam::Pose3>>(m_, "Pose3Vector");
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3_S2>>>(

View File

@ -139,6 +139,17 @@ class TestCal3Unified(GtsamTestCase):
self.gtsamAssertEquals(z, np.zeros(2))
self.gtsamAssertEquals(H @ H.T, 4*np.eye(2))
Dcal = np.zeros((2, 10), order='F')
Dp = np.zeros((2, 2), order='F')
camera.calibrate(img_point, Dcal, Dp)
self.gtsamAssertEquals(Dcal, np.array(
[[ 0., 0., 0., -1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., -1., 0., 0., 0., 0., 0.]]))
self.gtsamAssertEquals(Dp, np.array(
[[ 1., -0.],
[-0., 1.]]))
@unittest.skip("triangulatePoint3 currently seems to require perspective projections.")
def test_triangulation(self):
"""Estimate spatial point from image measurements"""

View File

@ -0,0 +1,46 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
PinholeCamera unit tests.
Author: Fan Jiang
"""
import unittest
from math import pi
import numpy as np
import gtsam
from gtsam.utils.test_case import GtsamTestCase
class TestPinholeCamera(GtsamTestCase):
"""
Tests if we can correctly get the camera Jacobians in Python
"""
def test_jacobian(self):
cam1 = gtsam.PinholeCameraCal3Bundler()
# order is important because Eigen is column major!
Dpose = np.zeros((2, 6), order='F')
Dpoint = np.zeros((2, 3), order='F')
Dcal = np.zeros((2, 3), order='F')
cam1.project(np.array([1, 1, 1]), Dpose, Dpoint, Dcal)
self.gtsamAssertEquals(Dpoint, np.array([[1, 0, -1], [0, 1, -1]]))
self.gtsamAssertEquals(
Dpose,
np.array([
[1., -2., 1., -1., 0., 1.], #
[2., -1., -1., 0., -1., 1.]
]))
self.gtsamAssertEquals(Dcal, np.array([[1., 2., 4.], [1., 2., 4.]]))
if __name__ == "__main__":
unittest.main()

View File

@ -70,27 +70,36 @@ class TestPose2(GtsamTestCase):
O---O
"""
pts_a = [
Point2(3, 1),
Point2(1, 1),
Point2(1, 3),
Point2(3, 3),
]
pts_b = [
Point2(1, -3),
Point2(1, -5),
Point2(-1, -5),
Point2(-1, -3),
]
pts_b = [
Point2(3, 1),
Point2(1, 1),
Point2(1, 3),
Point2(3, 3),
]
# fmt: on
ab_pairs = Point2Pairs(list(zip(pts_a, pts_b)))
bTa = gtsam.align(ab_pairs)
aTb = bTa.inverse()
assert aTb is not None
aTb = Pose2.Align(ab_pairs)
self.assertIsNotNone(aTb)
for pt_a, pt_b in zip(pts_a, pts_b):
pt_a_ = aTb.transformFrom(pt_b)
assert np.allclose(pt_a, pt_a_)
np.testing.assert_allclose(pt_a, pt_a_)
# Matrix version
A = np.array(pts_a).T
B = np.array(pts_b).T
aTb = Pose2.Align(A, B)
self.assertIsNotNone(aTb)
for pt_a, pt_b in zip(pts_a, pts_b):
pt_a_ = aTb.transformFrom(pt_b)
np.testing.assert_allclose(pt_a, pt_a_)
if __name__ == "__main__":

View File

@ -0,0 +1,194 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Sim3 unit tests.
Author: John Lambert
"""
# pylint: disable=no-name-in-module
import unittest
import numpy as np
from gtsam import Pose2, Pose2Pairs, Rot2, Similarity2
from gtsam.utils.test_case import GtsamTestCase
class TestSim2(GtsamTestCase):
"""Test selected Sim2 methods."""
def test_align_poses_along_straight_line(self) -> None:
"""Test Align Pose2Pairs method.
Scenario:
3 object poses
same scale (no gauge ambiguity)
world frame has poses rotated about 180 degrees.
world and egovehicle frame translated by 15 meters w.r.t. each other
"""
R180 = Rot2.fromDegrees(180)
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
eTo0 = Pose2(Rot2(), np.array([5, 0]))
eTo1 = Pose2(Rot2(), np.array([10, 0]))
eTo2 = Pose2(Rot2(), np.array([15, 0]))
eToi_list = [eTo0, eTo1, eTo2]
# Create destination poses
# (same three objects, but instead living in the world "w" frame)
wTo0 = Pose2(R180, np.array([-10, 0]))
wTo1 = Pose2(R180, np.array([-5, 0]))
wTo2 = Pose2(R180, np.array([0, 0]))
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity2.Align(we_pairs)
for wToi, eToi in zip(wToi_list, eToi_list):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses_along_straight_line_gauge(self):
"""Test if Align Pose3Pairs method can account for gauge ambiguity.
Scenario:
3 object poses
with gauge ambiguity (2x scale)
world frame has poses rotated by 90 degrees.
world and egovehicle frame translated by 11 meters w.r.t. each other
"""
R90 = Rot2.fromDegrees(90)
# Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame)
# Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame
eTo0 = Pose2(Rot2(), np.array([1, 0]))
eTo1 = Pose2(Rot2(), np.array([2, 0]))
eTo2 = Pose2(Rot2(), np.array([4, 0]))
eToi_list = [eTo0, eTo1, eTo2]
# Create destination poses
# (same three objects, but instead living in the world/city "w" frame)
wTo0 = Pose2(R90, np.array([0, 12]))
wTo1 = Pose2(R90, np.array([0, 14]))
wTo2 = Pose2(R90, np.array([0, 18]))
wToi_list = [wTo0, wTo1, wTo2]
we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list)))
# Recover the transformation wSe (i.e. world_S_egovehicle)
wSe = Similarity2.Align(we_pairs)
for wToi, eToi in zip(wToi_list, eToi_list):
self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi))
def test_align_poses_scaled_squares(self):
"""Test if Align Pose2Pairs method can account for gauge ambiguity.
Make sure a big and small square can be aligned.
The u's represent a big square (10x10), and v's represents a small square (4x4).
Scenario:
4 object poses
with gauge ambiguity (2.5x scale)
"""
# 0, 90, 180, and 270 degrees yaw
R0 = Rot2.fromDegrees(0)
R90 = Rot2.fromDegrees(90)
R180 = Rot2.fromDegrees(180)
R270 = Rot2.fromDegrees(270)
aTi0 = Pose2(R0, np.array([2, 3]))
aTi1 = Pose2(R90, np.array([12, 3]))
aTi2 = Pose2(R180, np.array([12, 13]))
aTi3 = Pose2(R270, np.array([2, 13]))
aTi_list = [aTi0, aTi1, aTi2, aTi3]
bTi0 = Pose2(R0, np.array([4, 3]))
bTi1 = Pose2(R90, np.array([8, 3]))
bTi2 = Pose2(R180, np.array([8, 7]))
bTi3 = Pose2(R270, np.array([4, 7]))
bTi_list = [bTi0, bTi1, bTi2, bTi3]
ab_pairs = Pose2Pairs(list(zip(aTi_list, bTi_list)))
# Recover the transformation wSe (i.e. world_S_egovehicle)
aSb = Similarity2.Align(ab_pairs)
for aTi, bTi in zip(aTi_list, bTi_list):
self.gtsamAssertEquals(aTi, aSb.transformFrom(bTi))
def test_constructor(self) -> None:
"""Sim(2) to perform p_b = bSa * p_a"""
bRa = Rot2()
bta = np.array([1, 2])
bsa = 3.0
bSa = Similarity2(R=bRa, t=bta, s=bsa)
self.assertIsInstance(bSa, Similarity2)
np.testing.assert_allclose(bSa.rotation().matrix(), bRa.matrix())
np.testing.assert_allclose(bSa.translation(), bta)
np.testing.assert_allclose(bSa.scale(), bsa)
def test_is_eq(self) -> None:
"""Ensure object equality works properly (are equal)."""
bSa = Similarity2(R=Rot2(), t=np.array([1, 2]), s=3.0)
bSa_ = Similarity2(R=Rot2(), t=np.array([1.0, 2.0]), s=3)
self.gtsamAssertEquals(bSa, bSa_)
def test_not_eq_translation(self) -> None:
"""Ensure object equality works properly (not equal translation)."""
bSa = Similarity2(R=Rot2(), t=np.array([2, 1]), s=3.0)
bSa_ = Similarity2(R=Rot2(), t=np.array([1.0, 2.0]), s=3)
self.assertNotEqual(bSa, bSa_)
def test_not_eq_rotation(self) -> None:
"""Ensure object equality works properly (not equal rotation)."""
bSa = Similarity2(R=Rot2(), t=np.array([2, 1]), s=3.0)
bSa_ = Similarity2(R=Rot2.fromDegrees(180), t=np.array([2.0, 1.0]), s=3)
self.assertNotEqual(bSa, bSa_)
def test_not_eq_scale(self) -> None:
"""Ensure object equality works properly (not equal scale)."""
bSa = Similarity2(R=Rot2(), t=np.array([2, 1]), s=3.0)
bSa_ = Similarity2(R=Rot2(), t=np.array([2.0, 1.0]), s=1.0)
self.assertNotEqual(bSa, bSa_)
def test_rotation(self) -> None:
"""Ensure rotation component is returned properly."""
R = Rot2.fromDegrees(90)
t = np.array([1, 2])
bSa = Similarity2(R=R, t=t, s=3.0)
# evaluates to [[0, -1], [1, 0]]
expected_R = Rot2.fromDegrees(90)
np.testing.assert_allclose(expected_R.matrix(), bSa.rotation().matrix())
def test_translation(self) -> None:
"""Ensure translation component is returned properly."""
R = Rot2.fromDegrees(90)
t = np.array([1, 2])
bSa = Similarity2(R=R, t=t, s=3.0)
expected_t = np.array([1, 2])
np.testing.assert_allclose(expected_t, bSa.translation())
def test_scale(self) -> None:
"""Ensure the scale factor is returned properly."""
bRa = Rot2()
bta = np.array([1, 2])
bsa = 3.0
bSa = Similarity2(R=bRa, t=bta, s=bsa)
self.assertEqual(bSa.scale(), 3.0)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,59 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
ProjectionFactorRollingShutter unit tests.
Author: Yotam Stern
"""
import unittest
import numpy as np
import gtsam
import gtsam_unstable
from gtsam.utils.test_case import GtsamTestCase
pose1 = gtsam.Pose3()
pose2 = gtsam.Pose3(np.array([[ 0.9999375 , 0.00502487, 0.00998725, 0.1 ],
[-0.00497488, 0.999975 , -0.00502487, 0.02 ],
[-0.01001225, 0.00497488, 0.9999375 , 1. ],
[ 0. , 0. , 0. , 1. ]]))
point = np.array([2, 0, 15])
point_noise = gtsam.noiseModel.Diagonal.Sigmas(np.ones(2))
cal = gtsam.Cal3_S2()
body_p_sensor = gtsam.Pose3()
alpha = 0.1
measured = np.array([0.13257015, 0.0004157])
class TestProjectionFactorRollingShutter(GtsamTestCase):
def test_constructor(self):
'''
test constructor for the ProjectionFactorRollingShutter
'''
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal)
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal,
body_p_sensor)
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal, True, False)
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal, True, False,
body_p_sensor)
def test_error(self):
'''
test the factor error for a specific example
'''
values = gtsam.Values()
values.insert(0, pose1)
values.insert(1, pose2)
values.insert(2, point)
factor = gtsam_unstable.ProjectionFactorRollingShutter(measured, alpha, point_noise, 0, 1, 2, cal)
self.gtsamAssertEquals(factor.error(values), np.array(0), tol=1e-9)
if __name__ == '__main__':
unittest.main()

View File

@ -19,7 +19,7 @@ install:
if ($env:PLATFORM -eq "x64") { $env:PYTHON = "$env:PYTHON-x64" }
$env:PATH = "C:\Python$env:PYTHON\;C:\Python$env:PYTHON\Scripts\;$env:PATH"
python -W ignore -m pip install --upgrade pip wheel
python -W ignore -m pip install pytest numpy --no-warn-script-location
python -W ignore -m pip install pytest numpy --no-warn-script-location pytest-timeout
- ps: |
Start-FileDownload 'https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.zip'
7z x eigen-3.3.7.zip -y > $null

View File

@ -0,0 +1,19 @@
---
# See all possible options and defaults with:
# clang-format --style=llvm --dump-config
BasedOnStyle: LLVM
AccessModifierOffset: -4
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BreakBeforeBinaryOperators: All
BreakConstructorInitializers: BeforeColon
ColumnLimit: 99
IndentCaseLabels: true
IndentPPDirectives: AfterHash
IndentWidth: 4
Language: Cpp
SpaceAfterCStyleCast: true
Standard: Cpp11
TabWidth: 4
...

View File

@ -1,13 +1,66 @@
FormatStyle: file
Checks: '
*bugprone*,
cppcoreguidelines-init-variables,
cppcoreguidelines-slicing,
clang-analyzer-optin.cplusplus.VirtualCall,
google-explicit-constructor,
llvm-namespace-comment,
modernize-use-override,
readability-container-size-empty,
modernize-use-using,
modernize-use-equals-default,
misc-misplaced-const,
misc-non-copyable-objects,
misc-static-assert,
misc-throw-by-value-catch-by-reference,
misc-uniqueptr-reset-release,
misc-unused-parameters,
modernize-avoid-bind,
modernize-make-shared,
modernize-redundant-void-arg,
modernize-replace-auto-ptr,
modernize-replace-disallow-copy-and-assign-macro,
modernize-replace-random-shuffle,
modernize-shrink-to-fit,
modernize-use-auto,
modernize-use-bool-literals,
modernize-use-equals-default,
modernize-use-equals-delete,
modernize-use-default-member-init,
modernize-use-noexcept,
modernize-use-emplace,
modernize-use-override,
modernize-use-using,
*performance*,
readability-avoid-const-params-in-decls,
readability-const-return-type,
readability-container-size-empty,
readability-delete-null-pointer,
readability-else-after-return,
readability-implicit-bool-conversion,
readability-make-member-function-const,
readability-misplaced-array-index,
readability-non-const-parameter,
readability-redundant-function-ptr-dereference,
readability-redundant-smartptr-get,
readability-redundant-string-cstr,
readability-simplify-subscript-expr,
readability-static-accessed-through-instance,
readability-static-definition-in-anonymous-namespace,
readability-string-compare,
readability-suspicious-call-argument,
readability-uniqueptr-delete-release,
-bugprone-exception-escape,
-bugprone-reserved-identifier,
-bugprone-unused-raii,
'
CheckOptions:
- key: performance-for-range-copy.WarnOnAllAutoCopies
value: true
- key: performance-unnecessary-value-param.AllowedTypes
value: 'exception_ptr$;'
- key: readability-implicit-bool-conversion.AllowPointerConditions
value: true
HeaderFilterRegex: 'pybind11/.*h'
WarningsAsErrors: '*'

9
wrap/pybind11/.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,9 @@
*.cmake @henryiii
CMakeLists.txt @henryiii
*.yml @henryiii
*.yaml @henryiii
/tools/ @henryiii
/pybind11/ @henryiii
noxfile.py @henryiii
.clang-format @henryiii
.clang-tidy @henryiii

View File

@ -53,6 +53,33 @@ derivative works thereof, in binary and source code form.
## Development of pybind11
### Quick setup
To setup a quick development environment, use [`nox`](https://nox.thea.codes).
This will allow you to do some common tasks with minimal setup effort, but will
take more time to run and be less flexible than a full development environment.
If you use [`pipx run nox`](https://pipx.pypa.io), you don't even need to
install `nox`. Examples:
```bash
# List all available sessions
nox -l
# Run linters
nox -s lint
# Run tests on Python 3.9
nox -s tests-3.9
# Build and preview docs
nox -s docs -- serve
# Build SDists and wheels
nox -s build
```
### Full setup
To setup an ideal development environment, run the following commands on a
system with CMake 3.14+:
@ -93,7 +120,7 @@ The valid options are:
* `-DPYBIND11_NOPYTHON=ON`: Disable all Python searching (disables tests)
* `-DBUILD_TESTING=ON`: Enable the tests
* `-DDOWNLOAD_CATCH=ON`: Download catch to build the C++ tests
* `-DOWNLOAD_EIGEN=ON`: Download Eigen for the NumPy tests
* `-DDOWNLOAD_EIGEN=ON`: Download Eigen for the NumPy tests
* `-DPYBIND11_INSTALL=ON/OFF`: Enable the install target (on by default for the
master project)
* `-DUSE_PYTHON_INSTALL_DIR=ON`: Try to install into the python dir
@ -126,13 +153,26 @@ cmake --build build --target check
`--target` can be spelled `-t` in CMake 3.15+. You can also run individual
tests with these targets:
* `pytest`: Python tests only
* `pytest`: Python tests only, using the
[pytest](https://docs.pytest.org/en/stable/) framework
* `cpptest`: C++ tests only
* `test_cmake_build`: Install / subdirectory tests
If you want to build just a subset of tests, use
`-DPYBIND11_TEST_OVERRIDE="test_callbacks.cpp;test_pickling.cpp"`. If this is
empty, all tests will be built.
`-DPYBIND11_TEST_OVERRIDE="test_callbacks;test_pickling"`. If this is
empty, all tests will be built. Tests are specified without an extension if they need both a .py and
.cpp file.
You may also pass flags to the `pytest` target by editing `tests/pytest.ini` or
by using the `PYTEST_ADDOPTS` environment variable
(see [`pytest` docs](https://docs.pytest.org/en/2.7.3/customize.html#adding-default-options)). As an example:
```bash
env PYTEST_ADDOPTS="--capture=no --exitfirst" \
cmake --build build --target pytest
# Or using abbreviated flags
env PYTEST_ADDOPTS="-s -x" cmake --build build --target pytest
```
### Formatting
@ -164,16 +204,42 @@ name, pre-commit):
pre-commit install
```
### Clang-Tidy
### Clang-Format
To run Clang tidy, the following recipe should work. Files will be modified in
place, so you can use git to monitor the changes.
As of v2.6.2, pybind11 ships with a [`clang-format`][clang-format]
configuration file at the top level of the repo (the filename is
`.clang-format`). Currently, formatting is NOT applied automatically, but
manually using `clang-format` for newly developed files is highly encouraged.
To check if a file needs formatting:
```bash
docker run --rm -v $PWD:/pybind11 -it silkeh/clang:10
apt-get update && apt-get install python3-dev python3-pytest
cmake -S pybind11/ -B build -DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy);-fix"
cmake --build build
clang-format -style=file --dry-run some.cpp
```
The output will show things to be fixed, if any. To actually format the file:
```bash
clang-format -style=file -i some.cpp
```
Note that the `-style-file` option searches the parent directories for the
`.clang-format` file, i.e. the commands above can be run in any subdirectory
of the pybind11 repo.
### Clang-Tidy
[`clang-tidy`][clang-tidy] performs deeper static code analyses and is
more complex to run, compared to `clang-format`, but support for `clang-tidy`
is built into the pybind11 CMake configuration. To run `clang-tidy`, the
following recipe should work. Run the `docker` command from the top-level
directory inside your pybind11 git clone. Files will be modified in place,
so you can use git to monitor the changes.
```bash
docker run --rm -v $PWD:/mounted_pybind11 -it silkeh/clang:12
apt-get update && apt-get install -y python3-dev python3-pytest
cmake -S /mounted_pybind11/ -B build -DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy);-fix" -DDOWNLOAD_EIGEN=ON -DDOWNLOAD_CATCH=ON -DCMAKE_CXX_STANDARD=17
cmake --build build -j 2 -- --keep-going
```
### Include what you use
@ -186,7 +252,7 @@ cmake -S . -B build-iwyu -DCMAKE_CXX_INCLUDE_WHAT_YOU_USE=$(which include-what-y
cmake --build build
```
The report is sent to stderr; you can pip it into a file if you wish.
The report is sent to stderr; you can pipe it into a file if you wish.
### Build recipes
@ -313,6 +379,8 @@ if you really want to.
[pre-commit]: https://pre-commit.com
[clang-format]: https://clang.llvm.org/docs/ClangFormat.html
[clang-tidy]: https://clang.llvm.org/extra/clang-tidy/
[pybind11.readthedocs.org]: http://pybind11.readthedocs.org/en/latest
[issue tracker]: https://github.com/pybind/pybind11/issues
[gitter]: https://gitter.im/pybind/Lobby

View File

@ -1,28 +0,0 @@
---
name: Bug Report
about: File an issue about a bug
title: "[BUG] "
---
Make sure you've completed the following steps before submitting your issue -- thank you!
1. Make sure you've read the [documentation][]. Your issue may be addressed there.
2. Search the [issue tracker][] to verify that this hasn't already been reported. +1 or comment there if it has.
3. Consider asking first in the [Gitter chat room][].
4. Include a self-contained and minimal piece of code that reproduces the problem. If that's not possible, try to make the description as clear as possible.
a. If possible, make a PR with a new, failing test to give us a starting point to work on!
[documentation]: https://pybind11.readthedocs.io
[issue tracker]: https://github.com/pybind/pybind11/issues
[Gitter chat room]: https://gitter.im/pybind/Lobby
*After reading, remove this checklist and the template text in parentheses below.*
## Issue description
(Provide a short description, state the expected behavior and what actually happens.)
## Reproducible example code
(The code should be minimal, have no external dependencies, isolate the function(s) that cause breakage. Submit matched and complete C++ and Python snippets that can be easily compiled and run to diagnose the issue.)

View File

@ -0,0 +1,45 @@
name: Bug Report
description: File an issue about a bug
title: "[BUG]: "
labels: [triage]
body:
- type: markdown
attributes:
value: |
Maintainers will only make a best effort to triage PRs. Please do your best to make the issue as easy to act on as possible, and only open if clearly a problem with pybind11 (ask first if unsure).
- type: checkboxes
id: steps
attributes:
label: Required prerequisites
description: Make sure you've completed the following steps before submitting your issue -- thank you!
options:
- label: Make sure you've read the [documentation](https://pybind11.readthedocs.io). Your issue may be addressed there.
required: true
- label: Search the [issue tracker](https://github.com/pybind/pybind11/issues) and [Discussions](https:/pybind/pybind11/discussions) to verify that this hasn't already been reported. +1 or comment there if it has.
required: true
- label: Consider asking first in the [Gitter chat room](https://gitter.im/pybind/Lobby) or in a [Discussion](https:/pybind/pybind11/discussions/new).
required: false
- type: textarea
id: description
attributes:
label: Problem description
placeholder: >-
Provide a short description, state the expected behavior and what
actually happens. Include relevant information like what version of
pybind11 you are using, what system you are on, and any useful commands
/ output.
validations:
required: true
- type: textarea
id: code
attributes:
label: Reproducible example code
placeholder: >-
The code should be minimal, have no external dependencies, isolate the
function(s) that cause breakage. Submit matched and complete C++ and
Python snippets that can be easily compiled and run to diagnose the
issue. If possible, make a PR with a new, failing test to give us a
starting point to work on!
render: text

View File

@ -1,5 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: Ask a question
url: https://github.com/pybind/pybind11/discussions/new
about: Please ask and answer questions here, or propose new ideas.
- name: Gitter room
url: https://gitter.im/pybind/Lobby
about: A room for discussing pybind11 with an active community

View File

@ -1,16 +0,0 @@
---
name: Feature Request
about: File an issue about adding a feature
title: "[FEAT] "
---
Make sure you've completed the following steps before submitting your issue -- thank you!
1. Check if your feature has already been mentioned / rejected / planned in other issues.
2. If those resources didn't help, consider asking in the [Gitter chat room][] to see if this is interesting / useful to a larger audience and possible to implement reasonably,
4. If you have a useful feature that passes the previous items (or not suitable for chat), please fill in the details below.
[Gitter chat room]: https://gitter.im/pybind/Lobby
*After reading, remove this checklist.*

View File

@ -1,21 +0,0 @@
---
name: Question
about: File an issue about unexplained behavior
title: "[QUESTION] "
---
If you have a question, please check the following first:
1. Check if your question has already been answered in the [FAQ][] section.
2. Make sure you've read the [documentation][]. Your issue may be addressed there.
3. If those resources didn't help and you only have a short question (not a bug report), consider asking in the [Gitter chat room][]
4. Search the [issue tracker][], including the closed issues, to see if your question has already been asked/answered. +1 or comment if it has been asked but has no answer.
5. If you have a more complex question which is not answered in the previous items (or not suitable for chat), please fill in the details below.
6. Include a self-contained and minimal piece of code that illustrates your question. If that's not possible, try to make the description as clear as possible.
[FAQ]: http://pybind11.readthedocs.io/en/latest/faq.html
[documentation]: https://pybind11.readthedocs.io
[issue tracker]: https://github.com/pybind/pybind11/issues
[Gitter chat room]: https://gitter.im/pybind/Lobby
*After reading, remove this checklist.*

16
wrap/pybind11/.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,16 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
ignore:
# Official actions have moving tags like v1
# that are used, so they don't need updates here
- dependency-name: "actions/checkout"
- dependency-name: "actions/setup-python"
- dependency-name: "actions/cache"
- dependency-name: "actions/upload-artifact"
- dependency-name: "actions/download-artifact"
- dependency-name: "actions/labeler"

8
wrap/pybind11/.github/labeler.yml vendored Normal file
View File

@ -0,0 +1,8 @@
docs:
- any:
- 'docs/**/*.rst'
- '!docs/changelog.rst'
- '!docs/upgrade.rst'
ci:
- '.github/workflows/*.yml'

View File

@ -0,0 +1,3 @@
needs changelog:
- all:
- '!docs/changelog.rst'

View File

@ -0,0 +1,19 @@
<!--
Title (above): please place [branch_name] at the beginning if you are targeting a branch other than master. *Do not target stable*.
It is recommended to use conventional commit format, see conventionalcommits.org, but not required.
-->
## Description
<!-- Include relevant issues or PRs here, describe what changed and why -->
## Suggested changelog entry:
<!-- Fill in the below block with the expected RestructuredText entry. Delete if no entry needed;
but do not delete header or rst block if an entry is needed! Will be collected via a script. -->
```rst
```
<!-- If the upgrade guide needs updating, note that here too -->

View File

@ -9,6 +9,13 @@ on:
- stable
- v*
concurrency:
group: test-${{ github.ref }}
cancel-in-progress: true
env:
PIP_ONLY_BINARY: numpy
jobs:
# This is the "main" test suite, which tests a large number of different
# versions of default compilers and Python versions in GitHub Actions.
@ -16,71 +23,42 @@ jobs:
strategy:
fail-fast: false
matrix:
runs-on: [ubuntu-latest, windows-latest, macos-latest]
arch: [x64]
runs-on: [ubuntu-latest, windows-2022, macos-latest]
python:
- 2.7
- 3.5
- 3.8
- pypy2
- pypy3
- '2.7'
- '3.5'
- '3.6'
- '3.9'
- '3.10'
- 'pypy-3.7-v7.3.7'
- 'pypy-3.8-v7.3.7'
# Items in here will either be added to the build matrix (if not
# present), or add new keys to an existing matrix element if all the
# existing keys match.
#
# We support three optional keys: args (both build), args1 (first
# build), and args2 (second build).
# We support an optional key: args, for cmake args
include:
# Just add a key
- runs-on: ubuntu-latest
python: 3.6
arch: x64
python: '3.6'
args: >
-DPYBIND11_FINDPYTHON=ON
- runs-on: windows-2016
python: 3.7
arch: x86
args2: >
-DCMAKE_CXX_FLAGS="/permissive- /EHsc /GR"
-DCMAKE_CXX_FLAGS="-D_=1"
- runs-on: windows-latest
python: 3.6
arch: x64
python: '3.6'
args: >
-DPYBIND11_FINDPYTHON=ON
- runs-on: windows-latest
python: 3.7
arch: x64
- runs-on: ubuntu-latest
python: 3.9-dev
arch: x64
- runs-on: macos-latest
python: 3.9-dev
arch: x64
args: >
-DPYBIND11_FINDPYTHON=ON
python: 'pypy-2.7'
# Inject a couple Windows 2019 runs
- runs-on: windows-2019
python: '3.9'
- runs-on: windows-2019
python: '2.7'
# These items will be removed from the build matrix, keys must match.
exclude:
# Currently 32bit only, and we build 64bit
- runs-on: windows-latest
python: pypy2
arch: x64
- runs-on: windows-latest
python: pypy3
arch: x64
# Currently broken on embed_test
- runs-on: windows-latest
python: 3.8
arch: x64
- runs-on: windows-latest
python: 3.9-dev
arch: x64
name: "🐍 ${{ matrix.python }} • ${{ matrix.runs-on }} • ${{ matrix.arch }} ${{ matrix.args }}"
name: "🐍 ${{ matrix.python }} • ${{ matrix.runs-on }} • x64 ${{ matrix.args }}"
runs-on: ${{ matrix.runs-on }}
continue-on-error: ${{ endsWith(matrix.python, 'dev') }}
steps:
- uses: actions/checkout@v2
@ -89,13 +67,18 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
architecture: ${{ matrix.arch }}
- name: Setup Boost (Windows / Linux latest)
run: echo "::set-env name=BOOST_ROOT::$BOOST_ROOT_1_72_0"
- name: Setup Boost (Linux)
# Can't use boost + define _
if: runner.os == 'Linux' && matrix.python != '3.6'
run: sudo apt-get install libboost-dev
- name: Setup Boost (macOS)
if: runner.os == 'macOS'
run: brew install boost
- name: Update CMake
uses: jwlawson/actions-setup-cmake@v1.3
uses: jwlawson/actions-setup-cmake@v1.12
- name: Cache wheels
if: runner.os == 'macOS'
@ -106,10 +89,11 @@ jobs:
# for ways to do this more generally
path: ~/Library/Caches/pip
# Look to see if there is a cache hit for the corresponding requirements file
key: ${{ runner.os }}-pip-${{ matrix.python }}-${{ matrix.arch }}-${{ hashFiles('tests/requirements.txt') }}
key: ${{ runner.os }}-pip-${{ matrix.python }}-x64-${{ hashFiles('tests/requirements.txt') }}
- name: Prepare env
run: python -m pip install -r tests/requirements.txt --prefer-binary
run: |
python -m pip install -r tests/requirements.txt
- name: Setup annotations on Linux
if: runner.os == 'Linux'
@ -132,6 +116,8 @@ jobs:
run: cmake --build . --target pytest -j 2
- name: C++11 tests
# TODO: Figure out how to load the DLL on Python 3.8+
if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11-dev' || matrix.python == 'pypy-3.8'))"
run: cmake --build . --target cpptest -j 2
- name: Interface test C++11
@ -141,7 +127,7 @@ jobs:
run: git clean -fdx
# Second build - C++17 mode and in a build directory
- name: Configure ${{ matrix.args2 }}
- name: Configure C++17
run: >
cmake -S . -B build2
-DPYBIND11_WERROR=ON
@ -149,7 +135,6 @@ jobs:
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17
${{ matrix.args }}
${{ matrix.args2 }}
- name: Build
run: cmake --build build2 -j 2
@ -158,8 +143,28 @@ jobs:
run: cmake --build build2 --target pytest
- name: C++ tests
# TODO: Figure out how to load the DLL on Python 3.8+
if: "!(runner.os == 'Windows' && (matrix.python == 3.8 || matrix.python == 3.9 || matrix.python == '3.10' || matrix.python == '3.11-dev' || matrix.python == 'pypy-3.8'))"
run: cmake --build build2 --target cpptest
# Third build - C++17 mode with unstable ABI
- name: Configure (unstable ABI)
run: >
cmake -S . -B build3
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17
-DPYBIND11_INTERNALS_VERSION=10000000
"-DPYBIND11_TEST_OVERRIDE=test_call_policies.cpp;test_gil_scoped.cpp;test_thread.cpp"
${{ matrix.args }}
- name: Build (unstable ABI)
run: cmake --build build3 -j 2
- name: Python tests (unstable ABI)
run: cmake --build build3 --target pytest
- name: Interface test
run: cmake --build build2 --target test_cmake_build
@ -167,21 +172,105 @@ jobs:
# MSVC, but for now, this action works:
- name: Prepare compiler environment for Windows 🐍 2.7
if: matrix.python == 2.7 && runner.os == 'Windows'
uses: ilammy/msvc-dev-cmd@v1
uses: ilammy/msvc-dev-cmd@v1.10.0
with:
arch: x64
# This makes two environment variables available in the following step(s)
- name: Set Windows 🐍 2.7 environment variables
if: matrix.python == 2.7 && runner.os == 'Windows'
shell: bash
run: |
echo "::set-env name=DISTUTILS_USE_SDK::1"
echo "::set-env name=MSSdk::1"
echo "DISTUTILS_USE_SDK=1" >> $GITHUB_ENV
echo "MSSdk=1" >> $GITHUB_ENV
# This makes sure the setup_helpers module can build packages using
# setuptools
- name: Setuptools helpers test
run: pytest tests/extra_setuptools
if: "!(matrix.python == '3.5' && matrix.runs-on == 'windows-2022')"
deadsnakes:
strategy:
fail-fast: false
matrix:
include:
# TODO: Fails on 3.10, investigate
- python-version: "3.9"
python-debug: true
valgrind: true
# - python-version: "3.11-dev"
# python-debug: false
name: "🐍 ${{ matrix.python-version }}${{ matrix.python-debug && '-dbg' || '' }} (deadsnakes)${{ matrix.valgrind && ' • Valgrind' || '' }} • x64"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Python ${{ matrix.python-version }} (deadsnakes)
uses: deadsnakes/action@v2.1.1
with:
python-version: ${{ matrix.python-version }}
debug: ${{ matrix.python-debug }}
- name: Update CMake
uses: jwlawson/actions-setup-cmake@v1.12
- name: Valgrind cache
if: matrix.valgrind
uses: actions/cache@v2
id: cache-valgrind
with:
path: valgrind
key: 3.16.1 # Valgrind version
- name: Compile Valgrind
if: matrix.valgrind && steps.cache-valgrind.outputs.cache-hit != 'true'
run: |
VALGRIND_VERSION=3.16.1
curl https://sourceware.org/pub/valgrind/valgrind-$VALGRIND_VERSION.tar.bz2 -o - | tar xj
mv valgrind-$VALGRIND_VERSION valgrind
cd valgrind
./configure
make -j 2 > /dev/null
- name: Install Valgrind
if: matrix.valgrind
working-directory: valgrind
run: |
sudo make install
sudo apt-get update
sudo apt-get install libc6-dbg # Needed by Valgrind
- name: Prepare env
run: |
python -m pip install -r tests/requirements.txt
- name: Configure
env:
SETUPTOOLS_USE_DISTUTILS: stdlib
run: >
cmake -S . -B build
-DCMAKE_BUILD_TYPE=Debug
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17
- name: Build
run: cmake --build build -j 2
- name: Python tests
run: cmake --build build --target pytest
- name: C++ tests
run: cmake --build build --target cpptest
- name: Run Valgrind on Python tests
if: matrix.valgrind
run: cmake --build build --target memcheck
# Testing on clang using the excellent silkeh clang docker images
@ -194,12 +283,20 @@ jobs:
- 3.6
- 3.7
- 3.9
- 5
- 7
- 9
- dev
std:
- 11
include:
- clang: 5
std: 14
- clang: 10
std: 20
- clang: 10
std: 17
name: "🐍 3 • Clang ${{ matrix.clang }} • x64"
name: "🐍 3 • Clang ${{ matrix.clang }} • C++${{ matrix.std }} • x64"
container: "silkeh/clang:${{ matrix.clang }}"
steps:
@ -214,6 +311,7 @@ jobs:
cmake -S . -B build
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DCMAKE_CXX_STANDARD=${{ matrix.std }}
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
- name: Build
@ -252,50 +350,54 @@ jobs:
run: cmake --build build --target pytest
# Testing CentOS 8 + PGI compilers
centos-nvhpc8:
runs-on: ubuntu-latest
name: "🐍 3 • CentOS8 / PGI 20.7 • x64"
container: centos:8
steps:
- uses: actions/checkout@v2
- name: Add Python 3 and a few requirements
run: yum update -y && yum install -y git python3-devel python3-numpy python3-pytest make environment-modules
- name: Install CMake with pip
run: |
python3 -m pip install --upgrade pip
python3 -m pip install cmake --prefer-binary
- name: Install NVidia HPC SDK
run: yum -y install https://developer.download.nvidia.com/hpc-sdk/nvhpc-20-7-20.7-1.x86_64.rpm https://developer.download.nvidia.com/hpc-sdk/nvhpc-2020-20.7-1.x86_64.rpm
- name: Configure
shell: bash
run: |
source /etc/profile.d/modules.sh
module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.7
cmake -S . -B build -DDOWNLOAD_CATCH=ON -DCMAKE_CXX_STANDARD=14 -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
- name: Build
run: cmake --build build -j 2 --verbose
- name: Python tests
run: cmake --build build --target pytest
- name: C++ tests
run: cmake --build build --target cpptest
- name: Interface test
run: cmake --build build --target test_cmake_build
# TODO: Internal compiler error - report to NVidia
# # Testing CentOS 8 + PGI compilers
# centos-nvhpc8:
# runs-on: ubuntu-latest
# name: "🐍 3 • CentOS8 / PGI 20.11 • x64"
# container: centos:8
#
# steps:
# - uses: actions/checkout@v2
#
# - name: Add Python 3 and a few requirements
# run: yum update -y && yum install -y git python3-devel python3-numpy python3-pytest make environment-modules
#
# - name: Install CMake with pip
# run: |
# python3 -m pip install --upgrade pip
# python3 -m pip install cmake --prefer-binary
#
# - name: Install NVidia HPC SDK
# run: >
# yum -y install
# https://developer.download.nvidia.com/hpc-sdk/20.11/nvhpc-20-11-20.11-1.x86_64.rpm
# https://developer.download.nvidia.com/hpc-sdk/20.11/nvhpc-2020-20.11-1.x86_64.rpm
#
# - name: Configure
# shell: bash
# run: |
# source /etc/profile.d/modules.sh
# module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.11
# cmake -S . -B build -DDOWNLOAD_CATCH=ON -DCMAKE_CXX_STANDARD=14 -DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
#
# - name: Build
# run: cmake --build build -j 2 --verbose
#
# - name: Python tests
# run: cmake --build build --target pytest
#
# - name: C++ tests
# run: cmake --build build --target cpptest
#
# - name: Interface test
# run: cmake --build build --target test_cmake_build
# Testing on CentOS 7 + PGI compilers, which seems to require more workarounds
centos-nvhpc7:
runs-on: ubuntu-latest
name: "🐍 3 • CentOS7 / PGI 20.7 • x64"
name: "🐍 3 • CentOS7 / PGI 20.9 • x64"
container: centos:7
steps:
@ -305,17 +407,17 @@ jobs:
run: yum update -y && yum install -y epel-release && yum install -y git python3-devel make environment-modules cmake3
- name: Install NVidia HPC SDK
run: yum -y install https://developer.download.nvidia.com/hpc-sdk/nvhpc-20-7-20.7-1.x86_64.rpm https://developer.download.nvidia.com/hpc-sdk/nvhpc-2020-20.7-1.x86_64.rpm
run: yum -y install https://developer.download.nvidia.com/hpc-sdk/20.9/nvhpc-20-9-20.9-1.x86_64.rpm https://developer.download.nvidia.com/hpc-sdk/20.9/nvhpc-2020-20.9-1.x86_64.rpm
# On CentOS 7, we have to filter a few tests (compiler internal error)
# and allow deeper templete recursion (not needed on CentOS 8 with a newer
# and allow deeper template recursion (not needed on CentOS 8 with a newer
# standard library). On some systems, you many need further workarounds:
# https://github.com/pybind/pybind11/pull/2475
- name: Configure
shell: bash
run: |
source /etc/profile.d/modules.sh
module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.7
module load /opt/nvidia/hpc_sdk/modulefiles/nvhpc/20.9
cmake3 -S . -B build -DDOWNLOAD_CATCH=ON \
-DCMAKE_CXX_STANDARD=11 \
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") \
@ -340,6 +442,7 @@ jobs:
- name: Interface test
run: cmake3 --build build --target test_cmake_build
# Testing on GCC using the GCC docker images (only recent images supported)
gcc:
runs-on: ubuntu-latest
@ -349,8 +452,13 @@ jobs:
gcc:
- 7
- latest
std:
- 11
include:
- gcc: 10
std: 20
name: "🐍 3 • GCC ${{ matrix.gcc }} • x64"
name: "🐍 3 • GCC ${{ matrix.gcc }} • C++${{ matrix.std }}• x64"
container: "gcc:${{ matrix.gcc }}"
steps:
@ -362,10 +470,8 @@ jobs:
- name: Update pip
run: python3 -m pip install --upgrade pip
- name: Setup CMake 3.18
uses: jwlawson/actions-setup-cmake@v1.3
with:
cmake-version: 3.18
- name: Update CMake
uses: jwlawson/actions-setup-cmake@v1.12
- name: Configure
shell: bash
@ -373,7 +479,7 @@ jobs:
cmake -S . -B build
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DCMAKE_CXX_STANDARD=11
-DCMAKE_CXX_STANDARD=${{ matrix.std }}
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
- name: Build
@ -389,6 +495,103 @@ jobs:
run: cmake --build build --target test_cmake_build
# Testing on ICC using the oneAPI apt repo
icc:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
name: "🐍 3 • ICC latest • x64"
steps:
- uses: actions/checkout@v2
- name: Add apt repo
run: |
sudo apt-get update
sudo apt-get install -y wget build-essential pkg-config cmake ca-certificates gnupg
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB
echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list
- name: Add ICC & Python 3
run: sudo apt-get update; sudo apt-get install -y intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic cmake python3-dev python3-numpy python3-pytest python3-pip
- name: Update pip
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
python3 -m pip install --upgrade pip
- name: Install dependencies
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
python3 -m pip install -r tests/requirements.txt
- name: Configure C++11
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
cmake -S . -B build-11 \
-DPYBIND11_WERROR=ON \
-DDOWNLOAD_CATCH=ON \
-DDOWNLOAD_EIGEN=OFF \
-DCMAKE_CXX_STANDARD=11 \
-DCMAKE_CXX_COMPILER=$(which icpc) \
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
- name: Build C++11
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
cmake --build build-11 -j 2 -v
- name: Python tests C++11
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
sudo service apport stop
cmake --build build-11 --target check
- name: C++ tests C++11
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
cmake --build build-11 --target cpptest
- name: Interface test C++11
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
cmake --build build-11 --target test_cmake_build
- name: Configure C++17
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
cmake -S . -B build-17 \
-DPYBIND11_WERROR=ON \
-DDOWNLOAD_CATCH=ON \
-DDOWNLOAD_EIGEN=OFF \
-DCMAKE_CXX_STANDARD=17 \
-DCMAKE_CXX_COMPILER=$(which icpc) \
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
- name: Build C++17
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
cmake --build build-17 -j 2 -v
- name: Python tests C++17
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
sudo service apport stop
cmake --build build-17 --target check
- name: C++ tests C++17
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
cmake --build build-17 --target cpptest
- name: Interface test C++17
run: |
set +e; source /opt/intel/oneapi/setvars.sh; set -e
cmake --build build-17 --target test_cmake_build
# Testing on CentOS (manylinux uses a centos base, and this is an easy way
# to get GCC 4.8, which is the manylinux1 compiler).
centos:
@ -397,11 +600,11 @@ jobs:
fail-fast: false
matrix:
centos:
- 7 # GCC 4.8
- 8
- centos7 # GCC 4.8
- stream8
name: "🐍 3 • CentOS ${{ matrix.centos }} • x64"
container: "centos:${{ matrix.centos }}"
container: "quay.io/centos/centos:${{ matrix.centos }}"
steps:
- uses: actions/checkout@v2
@ -413,12 +616,14 @@ jobs:
run: python3 -m pip install --upgrade pip
- name: Install dependencies
run: python3 -m pip install cmake -r tests/requirements.txt --prefer-binary
run: |
python3 -m pip install cmake -r tests/requirements.txt
- name: Configure
shell: bash
run: >
cmake -S . -B build
-DCMAKE_BUILD_TYPE=MinSizeRel
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
@ -476,7 +681,7 @@ jobs:
-DPYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)")
working-directory: /build-tests
- name: Run tests
- name: Python tests
run: make pytest -j 2
working-directory: /build-tests
@ -493,16 +698,13 @@ jobs:
- uses: actions/setup-python@v2
- name: Install Doxygen
run: sudo apt install -y doxygen
- name: Install docs & setup requirements
run: python3 -m pip install -r docs/requirements.txt
run: sudo apt-get install -y doxygen librsvg2-bin # Changed to rsvg-convert in 20.04
- name: Build docs
run: python3 -m sphinx -W -b html docs docs/.build
run: pipx run nox -s docs
- name: Make SDist
run: python3 setup.py sdist
run: pipx run nox -s build -- --sdist
- run: git status --ignored
@ -514,6 +716,250 @@ jobs:
- name: Compare Dists (headers only)
working-directory: include
run: |
python3 -m pip install --user -U ../dist/*
python3 -m pip install --user -U ../dist/*.tar.gz
installed=$(python3 -c "import pybind11; print(pybind11.get_include() + '/pybind11')")
diff -rq $installed ./pybind11
win32:
strategy:
fail-fast: false
matrix:
python:
- 3.5
- 3.6
- 3.7
- 3.8
- 3.9
- pypy-3.6
include:
- python: 3.9
args: -DCMAKE_CXX_STANDARD=20 -DDOWNLOAD_EIGEN=OFF
- python: 3.8
args: -DCMAKE_CXX_STANDARD=17
name: "🐍 ${{ matrix.python }} • MSVC 2019 • x86 ${{ matrix.args }}"
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Setup Python ${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
architecture: x86
- name: Update CMake
uses: jwlawson/actions-setup-cmake@v1.12
- name: Prepare MSVC
uses: ilammy/msvc-dev-cmd@v1.10.0
with:
arch: x86
- name: Prepare env
run: |
python -m pip install -r tests/requirements.txt
# First build - C++11 mode and inplace
- name: Configure ${{ matrix.args }}
run: >
cmake -S . -B build
-G "Visual Studio 16 2019" -A Win32
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
${{ matrix.args }}
- name: Build C++11
run: cmake --build build -j 2
- name: Python tests
run: cmake --build build -t pytest
win32-msvc2015:
name: "🐍 ${{ matrix.python }} • MSVC 2015 • x64"
runs-on: windows-latest
strategy:
fail-fast: false
matrix:
python:
- 2.7
- 3.6
- 3.7
# todo: check/cpptest does not support 3.8+ yet
steps:
- uses: actions/checkout@v2
- name: Setup 🐍 ${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Update CMake
uses: jwlawson/actions-setup-cmake@v1.12
- name: Prepare MSVC
uses: ilammy/msvc-dev-cmd@v1.10.0
with:
toolset: 14.0
- name: Prepare env
run: |
python -m pip install -r tests/requirements.txt
# First build - C++11 mode and inplace
- name: Configure
run: >
cmake -S . -B build
-G "Visual Studio 14 2015" -A x64
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
- name: Build C++14
run: cmake --build build -j 2
- name: Run all checks
run: cmake --build build -t check
win32-msvc2017:
name: "🐍 ${{ matrix.python }} • MSVC 2017 • x64"
runs-on: windows-2016
strategy:
fail-fast: false
matrix:
python:
- 2.7
- 3.5
- 3.7
std:
- 14
include:
- python: 2.7
std: 17
args: >
-DCMAKE_CXX_FLAGS="/permissive- /EHsc /GR"
- python: 3.7
std: 17
args: >
-DCMAKE_CXX_FLAGS="/permissive- /EHsc /GR"
steps:
- uses: actions/checkout@v2
- name: Setup 🐍 ${{ matrix.python }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python }}
- name: Update CMake
uses: jwlawson/actions-setup-cmake@v1.12
- name: Prepare env
run: |
python -m pip install -r tests/requirements.txt
# First build - C++11 mode and inplace
- name: Configure
run: >
cmake -S . -B build
-G "Visual Studio 15 2017" -A x64
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=${{ matrix.std }}
${{ matrix.args }}
- name: Build ${{ matrix.std }}
run: cmake --build build -j 2
- name: Run all checks
run: cmake --build build -t check
mingw:
name: "🐍 3 • windows-latest • ${{ matrix.sys }}"
runs-on: windows-latest
defaults:
run:
shell: msys2 {0}
strategy:
fail-fast: false
matrix:
include:
- { sys: mingw64, env: x86_64 }
- { sys: mingw32, env: i686 }
steps:
- uses: msys2/setup-msys2@v2
with:
msystem: ${{matrix.sys}}
install: >-
git
mingw-w64-${{matrix.env}}-gcc
mingw-w64-${{matrix.env}}-python-pip
mingw-w64-${{matrix.env}}-python-numpy
mingw-w64-${{matrix.env}}-python-scipy
mingw-w64-${{matrix.env}}-cmake
mingw-w64-${{matrix.env}}-make
mingw-w64-${{matrix.env}}-python-pytest
mingw-w64-${{matrix.env}}-eigen3
mingw-w64-${{matrix.env}}-boost
mingw-w64-${{matrix.env}}-catch
- uses: actions/checkout@v2
- name: Configure C++11
# LTO leads to many undefined reference like
# `pybind11::detail::function_call::function_call(pybind11::detail::function_call&&)
run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=11 -S . -B build
- name: Build C++11
run: cmake --build build -j 2
- name: Python tests C++11
run: cmake --build build --target pytest -j 2
- name: C++11 tests
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build --target cpptest -j 2
- name: Interface test C++11
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build --target test_cmake_build
- name: Clean directory
run: git clean -fdx
- name: Configure C++14
run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=14 -S . -B build2
- name: Build C++14
run: cmake --build build2 -j 2
- name: Python tests C++14
run: cmake --build build2 --target pytest -j 2
- name: C++14 tests
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build2 --target cpptest -j 2
- name: Interface test C++14
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build2 --target test_cmake_build
- name: Clean directory
run: git clean -fdx
- name: Configure C++17
run: cmake -G "MinGW Makefiles" -DCMAKE_CXX_STANDARD=17 -S . -B build3
- name: Build C++17
run: cmake --build build3 -j 2
- name: Python tests C++17
run: cmake --build build3 --target pytest -j 2
- name: C++17 tests
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build3 --target cpptest -j 2
- name: Interface test C++17
run: PYTHONHOME=/${{matrix.sys}} PYTHONPATH=/${{matrix.sys}} cmake --build build3 --target test_cmake_build

View File

@ -18,7 +18,7 @@ jobs:
matrix:
runs-on: [ubuntu-latest, macos-latest, windows-latest]
arch: [x64]
cmake: [3.18]
cmake: ["3.21"]
include:
- runs-on: ubuntu-latest
@ -55,7 +55,7 @@ jobs:
# An action for adding a specific version of CMake:
# https://github.com/jwlawson/actions-setup-cmake
- name: Setup CMake ${{ matrix.cmake }}
uses: jwlawson/actions-setup-cmake@v1.3
uses: jwlawson/actions-setup-cmake@v1.12
with:
cmake-version: ${{ matrix.cmake }}
@ -82,57 +82,3 @@ jobs:
working-directory: build dir
if: github.event_name == 'workflow_dispatch'
run: cmake --build . --config Release --target check
# This builds the sdists and wheels and makes sure the files are exactly as
# expected. Using Windows and Python 2.7, since that is often the most
# challenging matrix element.
test-packaging:
name: 🐍 2.7 • 📦 tests • windows-latest
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Setup 🐍 2.7
uses: actions/setup-python@v2
with:
python-version: 2.7
- name: Prepare env
run: python -m pip install -r tests/requirements.txt --prefer-binary
- name: Python Packaging tests
run: pytest tests/extra_python_package/
# This runs the packaging tests and also builds and saves the packages as
# artifacts.
packaging:
name: 🐍 3.8 • 📦 & 📦 tests • ubuntu-latest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup 🐍 3.8
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Prepare env
run: python -m pip install -r tests/requirements.txt build twine --prefer-binary
- name: Python Packaging tests
run: pytest tests/extra_python_package/
- name: Build SDist and wheels
run: |
python -m build -s -w .
PYBIND11_GLOBAL_SDIST=1 python -m build -s -w .
- name: Check metadata
run: twine check dist/*
- uses: actions/upload-artifact@v2
with:
path: dist/*

View File

@ -19,15 +19,17 @@ jobs:
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- uses: pre-commit/action@v2.0.0
- uses: pre-commit/action@v2.0.3
with:
# Slow hooks are marked with manual - slow is okay here, run them too
extra_args: --hook-stage manual
extra_args: --hook-stage manual --all-files
clang-tidy:
# When making changes here, please also review the "Clang-Tidy" section
# in .github/CONTRIBUTING.md and update as needed.
name: Clang-Tidy
runs-on: ubuntu-latest
container: silkeh/clang:10
container: silkeh/clang:12
steps:
- uses: actions/checkout@v2
@ -35,7 +37,12 @@ jobs:
run: apt-get update && apt-get install -y python3-dev python3-pytest
- name: Configure
run: cmake -S . -B build -DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy);--warnings-as-errors=*"
run: >
cmake -S . -B build
-DCMAKE_CXX_CLANG_TIDY="$(which clang-tidy)"
-DDOWNLOAD_EIGEN=ON
-DDOWNLOAD_CATCH=ON
-DCMAKE_CXX_STANDARD=17
- name: Build
run: cmake --build build -j 2
run: cmake --build build -j 2 -- --keep-going

View File

@ -0,0 +1,16 @@
name: Labeler
on:
pull_request_target:
types: [closed]
jobs:
label:
name: Labeler
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@main
if: github.event.pull_request.merged == true
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
configuration-path: .github/labeler_merged.yml

108
wrap/pybind11/.github/workflows/pip.yml vendored Normal file
View File

@ -0,0 +1,108 @@
name: Pip
on:
workflow_dispatch:
pull_request:
push:
branches:
- master
- stable
- v*
release:
types:
- published
env:
PIP_ONLY_BINARY: numpy
jobs:
# This builds the sdists and wheels and makes sure the files are exactly as
# expected. Using Windows and Python 2.7, since that is often the most
# challenging matrix element.
test-packaging:
name: 🐍 2.7 • 📦 tests • windows-latest
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Setup 🐍 2.7
uses: actions/setup-python@v2
with:
python-version: 2.7
- name: Prepare env
run: |
python -m pip install -r tests/requirements.txt
- name: Python Packaging tests
run: pytest tests/extra_python_package/
# This runs the packaging tests and also builds and saves the packages as
# artifacts.
packaging:
name: 🐍 3.8 • 📦 & 📦 tests • ubuntu-latest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup 🐍 3.8
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Prepare env
run: |
python -m pip install -r tests/requirements.txt build twine
- name: Python Packaging tests
run: pytest tests/extra_python_package/
- name: Build SDist and wheels
run: |
python -m build
PYBIND11_GLOBAL_SDIST=1 python -m build
- name: Check metadata
run: twine check dist/*
- name: Save standard package
uses: actions/upload-artifact@v2
with:
name: standard
path: dist/pybind11-*
- name: Save global package
uses: actions/upload-artifact@v2
with:
name: global
path: dist/pybind11_global-*
# When a GitHub release is made, upload the artifacts to PyPI
upload:
name: Upload to PyPI
runs-on: ubuntu-latest
if: github.event_name == 'release' && github.event.action == 'published'
needs: [packaging]
steps:
- uses: actions/setup-python@v2
# Downloads all to directories matching the artifact names
- uses: actions/download-artifact@v2
- name: Publish standard package
uses: pypa/gh-action-pypi-publish@v1.5.0
with:
password: ${{ secrets.pypi_password }}
packages_dir: standard/
- name: Publish global package
uses: pypa/gh-action-pypi-publish@v1.5.0
with:
password: ${{ secrets.pypi_password_global }}
packages_dir: global/

View File

@ -0,0 +1,112 @@
name: Upstream
on:
workflow_dispatch:
pull_request:
concurrency:
group: upstream-${{ github.ref }}
cancel-in-progress: true
env:
PIP_ONLY_BINARY: numpy
jobs:
standard:
name: "🐍 3.11 dev • ubuntu-latest • x64"
runs-on: ubuntu-latest
if: "contains(github.event.pull_request.labels.*.name, 'python dev')"
steps:
- uses: actions/checkout@v2
- name: Setup Python 3.11
uses: actions/setup-python@v2
with:
python-version: "3.11-dev"
- name: Setup Boost (Linux)
if: runner.os == 'Linux'
run: sudo apt-get install libboost-dev
- name: Update CMake
uses: jwlawson/actions-setup-cmake@v1.12
- name: Prepare env
run: |
python -m pip install -r tests/requirements.txt
- name: Setup annotations on Linux
if: runner.os == 'Linux'
run: python -m pip install pytest-github-actions-annotate-failures
# First build - C++11 mode and inplace
- name: Configure C++11
run: >
cmake -S . -B .
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=11
- name: Build C++11
run: cmake --build . -j 2
- name: Python tests C++11
run: cmake --build . --target pytest -j 2
- name: C++11 tests
run: cmake --build . --target cpptest -j 2
- name: Interface test C++11
run: cmake --build . --target test_cmake_build
- name: Clean directory
run: git clean -fdx
# Second build - C++17 mode and in a build directory
- name: Configure C++17
run: >
cmake -S . -B build2
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17
${{ matrix.args }}
${{ matrix.args2 }}
- name: Build
run: cmake --build build2 -j 2
- name: Python tests
run: cmake --build build2 --target pytest
- name: C++ tests
run: cmake --build build2 --target cpptest
# Third build - C++17 mode with unstable ABI
- name: Configure (unstable ABI)
run: >
cmake -S . -B build3
-DPYBIND11_WERROR=ON
-DDOWNLOAD_CATCH=ON
-DDOWNLOAD_EIGEN=ON
-DCMAKE_CXX_STANDARD=17
-DPYBIND11_INTERNALS_VERSION=10000000
"-DPYBIND11_TEST_OVERRIDE=test_call_policies.cpp;test_gil_scoped.cpp;test_thread.cpp"
${{ matrix.args }}
- name: Build (unstable ABI)
run: cmake --build build3 -j 2
- name: Python tests (unstable ABI)
run: cmake --build build3 --target pytest
- name: Interface test
run: cmake --build build2 --target test_cmake_build
# This makes sure the setup_helpers module can build packages using
# setuptools
- name: Setuptools helpers test
run: pytest tests/extra_setuptools

View File

@ -41,3 +41,5 @@ pybind11Targets.cmake
/.vscode
/pybind11/include/*
/pybind11/share/*
/docs/_build/*
.ipynb_checkpoints/

View File

@ -15,12 +15,14 @@
repos:
# Standard hooks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
rev: v4.1.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-docstring-first
- id: check-merge-conflict
- id: check-symlinks
- id: check-toml
- id: check-yaml
- id: debug-statements
- id: end-of-file-fixer
@ -28,54 +30,115 @@ repos:
- id: requirements-txt-fixer
- id: trailing-whitespace
- id: fix-encoding-pragma
exclude: ^noxfile.py$
- repo: https://github.com/asottile/pyupgrade
rev: v2.31.0
hooks:
- id: pyupgrade
- repo: https://github.com/PyCQA/isort
rev: 5.10.1
hooks:
- id: isort
# Black, the code formatter, natively supports pre-commit
- repo: https://github.com/psf/black
rev: 20.8b1
rev: 21.12b0 # Keep in sync with blacken-docs
hooks:
- id: black
# Not all Python files are Blacked, yet
files: ^(setup.py|pybind11|tests/extra)
- repo: https://github.com/asottile/blacken-docs
rev: v1.12.0
hooks:
- id: blacken-docs
additional_dependencies:
- black==21.12b0 # keep in sync with black hook
# Changes tabs to spaces
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.1.9
rev: v1.1.10
hooks:
- id: remove-tabs
# Autoremoves unused imports
- repo: https://github.com/hadialqattan/pycln
rev: v1.1.0
hooks:
- id: pycln
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.9.0
hooks:
- id: python-check-blanket-noqa
- id: python-check-blanket-type-ignore
- id: python-no-log-warn
- id: rst-backticks
- id: rst-directive-colons
- id: rst-inline-touching-normal
# Flake8 also supports pre-commit natively (same author)
- repo: https://gitlab.com/pycqa/flake8
rev: 3.8.3
- repo: https://github.com/PyCQA/flake8
rev: 4.0.1
hooks:
- id: flake8
additional_dependencies: [flake8-bugbear, pep8-naming]
additional_dependencies: &flake8_dependencies
- flake8-bugbear
- pep8-naming
exclude: ^(docs/.*|tools/.*)$
- repo: https://github.com/asottile/yesqa
rev: v1.3.0
hooks:
- id: yesqa
additional_dependencies: *flake8_dependencies
# CMake formatting
- repo: https://github.com/cheshirekow/cmake-format-precommit
rev: v0.6.11
rev: v0.6.13
hooks:
- id: cmake-format
additional_dependencies: [pyyaml]
types: [file]
files: (\.cmake|CMakeLists.txt)(.in)?$
# Check static types with mypy
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.931
hooks:
- id: mypy
# Running per-file misbehaves a bit, so just run on all files, it's fast
pass_filenames: false
additional_dependencies: [typed_ast]
# Checks the manifest for missing files (native support)
- repo: https://github.com/mgedmin/check-manifest
rev: "0.42"
rev: "0.47"
hooks:
- id: check-manifest
# This is a slow hook, so only run this if --hook-stage manual is passed
stages: [manual]
additional_dependencies: [cmake, ninja]
- repo: https://github.com/codespell-project/codespell
rev: v2.1.0
hooks:
- id: codespell
exclude: ".supp$"
args: ["-L", "nd,ot,thist"]
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.8.0.3
hooks:
- id: shellcheck
# The original pybind11 checks for a few C++ style items
- repo: local
hooks:
- id: disallow-caps
name: Disallow improper capitalization
language: pygrep
entry: PyBind|Numpy|Cmake
entry: PyBind|Numpy|Cmake|CCache|PyTest
exclude: .pre-commit-config.yaml
- repo: local

View File

@ -7,13 +7,18 @@
cmake_minimum_required(VERSION 3.4)
# The `cmake_minimum_required(VERSION 3.4...3.18)` syntax does not work with
# The `cmake_minimum_required(VERSION 3.4...3.22)` syntax does not work with
# some versions of VS that have a patched CMake 3.11. This forces us to emulate
# the behavior using the following workaround:
if(${CMAKE_VERSION} VERSION_LESS 3.18)
if(${CMAKE_VERSION} VERSION_LESS 3.22)
cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
else()
cmake_policy(VERSION 3.18)
cmake_policy(VERSION 3.22)
endif()
# Avoid infinite recursion if tests include this as a subdirectory
if(DEFINED PYBIND11_MASTER_PROJECT)
return()
endif()
# Extract project version from source
@ -73,6 +78,10 @@ if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
endif()
set(pybind11_system "")
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
else()
set(PYBIND11_MASTER_PROJECT OFF)
set(pybind11_system SYSTEM)
@ -82,6 +91,9 @@ endif()
option(PYBIND11_INSTALL "Install pybind11 header files?" ${PYBIND11_MASTER_PROJECT})
option(PYBIND11_TEST "Build pybind11 test suite?" ${PYBIND11_MASTER_PROJECT})
option(PYBIND11_NOPYTHON "Disable search for Python" OFF)
set(PYBIND11_INTERNALS_VERSION
""
CACHE STRING "Override the ABI version, may be used to enable the unstable ABI.")
cmake_dependent_option(
USE_PYTHON_INCLUDE_DIR
@ -98,6 +110,7 @@ set(PYBIND11_HEADERS
include/pybind11/detail/descr.h
include/pybind11/detail/init.h
include/pybind11/detail/internals.h
include/pybind11/detail/type_caster_base.h
include/pybind11/detail/typeid.h
include/pybind11/attr.h
include/pybind11/buffer_info.h
@ -109,6 +122,7 @@ set(PYBIND11_HEADERS
include/pybind11/eigen.h
include/pybind11/embed.h
include/pybind11/eval.h
include/pybind11/gil.h
include/pybind11/iostream.h
include/pybind11/functional.h
include/pybind11/numpy.h
@ -116,7 +130,8 @@ set(PYBIND11_HEADERS
include/pybind11/pybind11.h
include/pybind11/pytypes.h
include/pybind11/stl.h
include/pybind11/stl_bind.h)
include/pybind11/stl_bind.h
include/pybind11/stl/filesystem.h)
# Compare with grep and warn if mismatched
if(PYBIND11_MASTER_PROJECT AND NOT CMAKE_VERSION VERSION_LESS 3.12)
@ -142,22 +157,45 @@ endif()
string(REPLACE "include/" "${CMAKE_CURRENT_SOURCE_DIR}/include/" PYBIND11_HEADERS
"${PYBIND11_HEADERS}")
# Cache variables so pybind11_add_module can be used in parent projects
set(PYBIND11_INCLUDE_DIR
# Cache variable so this can be used in parent projects
set(pybind11_INCLUDE_DIR
"${CMAKE_CURRENT_LIST_DIR}/include"
CACHE INTERNAL "")
CACHE INTERNAL "Directory where pybind11 headers are located")
# Backward compatible variable for add_subdirectory mode
if(NOT PYBIND11_MASTER_PROJECT)
set(PYBIND11_INCLUDE_DIR
"${pybind11_INCLUDE_DIR}"
CACHE INTERNAL "")
endif()
# Note: when creating targets, you cannot use if statements at configure time -
# you need generator expressions, because those will be placed in the target file.
# You can also place ifs *in* the Config.in, but not here.
# This section builds targets, but does *not* touch Python
# Non-IMPORT targets cannot be defined twice
if(NOT TARGET pybind11_headers)
# Build the headers-only target (no Python included):
# (long name used here to keep this from clashing in subdirectory mode)
add_library(pybind11_headers INTERFACE)
add_library(pybind11::pybind11_headers ALIAS pybind11_headers) # to match exported target
add_library(pybind11::headers ALIAS pybind11_headers) # easier to use/remember
# Build the headers-only target (no Python included):
# (long name used here to keep this from clashing in subdirectory mode)
add_library(pybind11_headers INTERFACE)
add_library(pybind11::pybind11_headers ALIAS pybind11_headers) # to match exported target
add_library(pybind11::headers ALIAS pybind11_headers) # easier to use/remember
target_include_directories(
pybind11_headers ${pybind11_system} INTERFACE $<BUILD_INTERFACE:${pybind11_INCLUDE_DIR}>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
target_compile_features(pybind11_headers INTERFACE cxx_inheriting_constructors cxx_user_literals
cxx_right_angle_brackets)
if(NOT "${PYBIND11_INTERNALS_VERSION}" STREQUAL "")
target_compile_definitions(
pybind11_headers INTERFACE "PYBIND11_INTERNALS_VERSION=${PYBIND11_INTERNALS_VERSION}")
endif()
else()
# It is invalid to install a target twice, too.
set(PYBIND11_INSTALL OFF)
endif()
include("${CMAKE_CURRENT_SOURCE_DIR}/tools/pybind11Common.cmake")
@ -168,21 +206,18 @@ elseif(USE_PYTHON_INCLUDE_DIR AND DEFINED PYTHON_INCLUDE_DIR)
file(RELATIVE_PATH CMAKE_INSTALL_INCLUDEDIR ${CMAKE_INSTALL_PREFIX} ${PYTHON_INCLUDE_DIRS})
endif()
# Fill in headers target
target_include_directories(
pybind11_headers ${pybind11_system} INTERFACE $<BUILD_INTERFACE:${PYBIND11_INCLUDE_DIR}>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
target_compile_features(pybind11_headers INTERFACE cxx_inheriting_constructors cxx_user_literals
cxx_right_angle_brackets)
if(PYBIND11_INSTALL)
install(DIRECTORY ${PYBIND11_INCLUDE_DIR}/pybind11 DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
# GNUInstallDirs "DATADIR" wrong here; CMake search path wants "share".
install(DIRECTORY ${pybind11_INCLUDE_DIR}/pybind11 DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
set(PYBIND11_CMAKECONFIG_INSTALL_DIR
"share/cmake/${PROJECT_NAME}"
"${CMAKE_INSTALL_DATAROOTDIR}/cmake/${PROJECT_NAME}"
CACHE STRING "install path for pybind11Config.cmake")
if(IS_ABSOLUTE "${CMAKE_INSTALL_INCLUDEDIR}")
set(pybind11_INCLUDEDIR "${CMAKE_INSTALL_FULL_INCLUDEDIR}")
else()
set(pybind11_INCLUDEDIR "\$\{PACKAGE_PREFIX_DIR\}/${CMAKE_INSTALL_INCLUDEDIR}")
endif()
configure_package_config_file(
tools/${PROJECT_NAME}Config.cmake.in "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
INSTALL_DESTINATION ${PYBIND11_CMAKECONFIG_INSTALL_DIR})
@ -260,8 +295,5 @@ endif()
if(NOT PYBIND11_MASTER_PROJECT)
set(pybind11_FOUND
TRUE
CACHE INTERNAL "true if pybind11 and all required components found on the system")
set(pybind11_INCLUDE_DIR
"${PYBIND11_INCLUDE_DIR}"
CACHE INTERNAL "Directory where pybind11 headers are located")
CACHE INTERNAL "True if pybind11 and all required components found on the system")
endif()

View File

@ -1,4 +1,6 @@
recursive-include pybind11/include/pybind11 *.h
recursive-include pybind11 *.py
recursive-include pybind11 py.typed
recursive-include pybind11 *.pyi
include pybind11/share/cmake/pybind11/*.cmake
include LICENSE README.md pyproject.toml setup.py setup.cfg
include LICENSE README.rst pyproject.toml setup.py setup.cfg

View File

@ -1,145 +0,0 @@
![pybind11 logo](https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png)
# pybind11 — Seamless operability between C++11 and Python
[![Documentation Status](https://readthedocs.org/projects/pybind11/badge/?version=master)](http://pybind11.readthedocs.org/en/master/?badge=master)
[![Documentation Status](https://readthedocs.org/projects/pybind11/badge/?version=stable)](http://pybind11.readthedocs.org/en/stable/?badge=stable)
[![Gitter chat](https://img.shields.io/gitter/room/gitterHQ/gitter.svg)](https://gitter.im/pybind/Lobby)
[![CI](https://github.com/pybind/pybind11/workflows/CI/badge.svg)](https://github.com/pybind/pybind11/actions)
[![Build status](https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true)](https://ci.appveyor.com/project/wjakob/pybind11)
**pybind11** is a lightweight header-only library that exposes C++ types in
Python and vice versa, mainly to create Python bindings of existing C++ code.
Its goals and syntax are similar to the excellent [Boost.Python][] library by
David Abrahams: to minimize boilerplate code in traditional extension modules
by inferring type information using compile-time introspection.
The main issue with Boost.Python—and the reason for creating such a similar
project—is Boost. Boost is an enormously large and complex suite of utility
libraries that works with almost every C++ compiler in existence. This
compatibility has its cost: arcane template tricks and workarounds are
necessary to support the oldest and buggiest of compiler specimens. Now that
C++11-compatible compilers are widely available, this heavy machinery has
become an excessively large and unnecessary dependency.
Think of this library as a tiny self-contained version of Boost.Python with
everything stripped away that isn't relevant for binding generation. Without
comments, the core header files only require ~4K lines of code and depend on
Python (2.7 or 3.5+, or PyPy) and the C++ standard library. This compact
implementation was possible thanks to some of the new C++11 language features
(specifically: tuples, lambda functions and variadic templates). Since its
creation, this library has grown beyond Boost.Python in many ways, leading to
dramatically simpler binding code in many common situations.
Tutorial and reference documentation is provided at
[pybind11.readthedocs.org][]. A PDF version of the manual is available
[here][docs-pdf].
## Core features
pybind11 can map the following core C++ features to Python:
- Functions accepting and returning custom data structures per value, reference, or pointer
- Instance methods and static methods
- Overloaded functions
- Instance attributes and static attributes
- Arbitrary exception types
- Enumerations
- Callbacks
- Iterators and ranges
- Custom operators
- Single and multiple inheritance
- STL data structures
- Smart pointers with reference counting like `std::shared_ptr`
- Internal references with correct reference counting
- C++ classes with virtual (and pure virtual) methods can be extended in Python
## Goodies
In addition to the core functionality, pybind11 provides some extra goodies:
- Python 2.7, 3.5+, and PyPy (tested on 7.3) are supported with an implementation-agnostic
interface.
- It is possible to bind C++11 lambda functions with captured variables. The
lambda capture data is stored inside the resulting Python function object.
- pybind11 uses C++11 move constructors and move assignment operators whenever
possible to efficiently transfer custom data types.
- It's easy to expose the internal storage of custom data types through
Pythons' buffer protocols. This is handy e.g. for fast conversion between
C++ matrix classes like Eigen and NumPy without expensive copy operations.
- pybind11 can automatically vectorize functions so that they are transparently
applied to all entries of one or more NumPy array arguments.
- Python's slice-based access and assignment operations can be supported with
just a few lines of code.
- Everything is contained in just a few header files; there is no need to link
against any additional libraries.
- Binaries are generally smaller by a factor of at least 2 compared to
equivalent bindings generated by Boost.Python. A recent pybind11 conversion
of PyRosetta, an enormous Boost.Python binding project,
[reported][pyrosetta-report] a binary size reduction of **5.4x** and compile
time reduction by **5.8x**.
- Function signatures are precomputed at compile time (using `constexpr`),
leading to smaller binaries.
- With little extra effort, C++ types can be pickled and unpickled similar to
regular Python objects.
## Supported compilers
1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or newer)
2. GCC 4.8 or newer
3. Microsoft Visual Studio 2015 Update 3 or newer
4. Intel C++ compiler 17 or newer (16 with pybind11 v2.0 and 15 with pybind11
v2.0 and a [workaround][intel-15-workaround])
5. Cygwin/GCC (tested on 2.5.1)
6. NVCC (CUDA 11 tested)
7. NVIDIA PGI (20.7 tested)
## About
This project was created by [Wenzel Jakob](http://rgl.epfl.ch/people/wjakob).
Significant features and/or improvements to the code were contributed by
Jonas Adler,
Lori A. Burns,
Sylvain Corlay,
Trent Houliston,
Axel Huebl,
@hulucc,
Sergey Lyskov
Johan Mabille,
Tomasz Miąsko,
Dean Moldovan,
Ben Pritchard,
Jason Rhinelander,
Boris Schäling,
Pim Schellart,
Henry Schreiner,
Ivan Smirnov, and
Patrick Stewart.
### Contributing
See the [contributing guide][] for information on building and contributing to
pybind11.
### License
pybind11 is provided under a BSD-style license that can be found in the
[`LICENSE`][] file. By using, distributing, or contributing to this project,
you agree to the terms and conditions of this license.
[pybind11.readthedocs.org]: http://pybind11.readthedocs.org/en/master
[docs-pdf]: https://media.readthedocs.org/pdf/pybind11/master/pybind11.pdf
[Boost.Python]: http://www.boost.org/doc/libs/1_58_0/libs/python/doc/
[pyrosetta-report]: http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf
[contributing guide]: https://github.com/pybind/pybind11/blob/master/.github/CONTRIBUTING.md
[`LICENSE`]: https://github.com/pybind/pybind11/blob/master/LICENSE
[intel-15-workaround]: https://github.com/pybind/pybind11/issues/276

180
wrap/pybind11/README.rst Normal file
View File

@ -0,0 +1,180 @@
.. figure:: https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png
:alt: pybind11 logo
**pybind11 — Seamless operability between C++11 and Python**
|Latest Documentation Status| |Stable Documentation Status| |Gitter chat| |GitHub Discussions| |CI| |Build status|
|Repology| |PyPI package| |Conda-forge| |Python Versions|
`Setuptools example <https://github.com/pybind/python_example>`_
`Scikit-build example <https://github.com/pybind/scikit_build_example>`_
`CMake example <https://github.com/pybind/cmake_example>`_
.. start
**pybind11** is a lightweight header-only library that exposes C++ types
in Python and vice versa, mainly to create Python bindings of existing
C++ code. Its goals and syntax are similar to the excellent
`Boost.Python <http://www.boost.org/doc/libs/1_58_0/libs/python/doc/>`_
library by David Abrahams: to minimize boilerplate code in traditional
extension modules by inferring type information using compile-time
introspection.
The main issue with Boost.Python—and the reason for creating such a
similar project—is Boost. Boost is an enormously large and complex suite
of utility libraries that works with almost every C++ compiler in
existence. This compatibility has its cost: arcane template tricks and
workarounds are necessary to support the oldest and buggiest of compiler
specimens. Now that C++11-compatible compilers are widely available,
this heavy machinery has become an excessively large and unnecessary
dependency.
Think of this library as a tiny self-contained version of Boost.Python
with everything stripped away that isnt relevant for binding
generation. Without comments, the core header files only require ~4K
lines of code and depend on Python (2.7 or 3.5+, or PyPy) and the C++
standard library. This compact implementation was possible thanks to
some of the new C++11 language features (specifically: tuples, lambda
functions and variadic templates). Since its creation, this library has
grown beyond Boost.Python in many ways, leading to dramatically simpler
binding code in many common situations.
Tutorial and reference documentation is provided at
`pybind11.readthedocs.io <https://pybind11.readthedocs.io/en/latest>`_.
A PDF version of the manual is available
`here <https://pybind11.readthedocs.io/_/downloads/en/latest/pdf/>`_.
And the source code is always available at
`github.com/pybind/pybind11 <https://github.com/pybind/pybind11>`_.
Core features
-------------
pybind11 can map the following core C++ features to Python:
- Functions accepting and returning custom data structures per value,
reference, or pointer
- Instance methods and static methods
- Overloaded functions
- Instance attributes and static attributes
- Arbitrary exception types
- Enumerations
- Callbacks
- Iterators and ranges
- Custom operators
- Single and multiple inheritance
- STL data structures
- Smart pointers with reference counting like ``std::shared_ptr``
- Internal references with correct reference counting
- C++ classes with virtual (and pure virtual) methods can be extended
in Python
Goodies
-------
In addition to the core functionality, pybind11 provides some extra
goodies:
- Python 2.7, 3.5+, and PyPy/PyPy3 7.3 are supported with an
implementation-agnostic interface.
- It is possible to bind C++11 lambda functions with captured
variables. The lambda capture data is stored inside the resulting
Python function object.
- pybind11 uses C++11 move constructors and move assignment operators
whenever possible to efficiently transfer custom data types.
- Its easy to expose the internal storage of custom data types through
Pythons buffer protocols. This is handy e.g. for fast conversion
between C++ matrix classes like Eigen and NumPy without expensive
copy operations.
- pybind11 can automatically vectorize functions so that they are
transparently applied to all entries of one or more NumPy array
arguments.
- Python's slice-based access and assignment operations can be
supported with just a few lines of code.
- Everything is contained in just a few header files; there is no need
to link against any additional libraries.
- Binaries are generally smaller by a factor of at least 2 compared to
equivalent bindings generated by Boost.Python. A recent pybind11
conversion of PyRosetta, an enormous Boost.Python binding project,
`reported <https://graylab.jhu.edu/Sergey/2016.RosettaCon/PyRosetta-4.pdf>`_
a binary size reduction of **5.4x** and compile time reduction by
**5.8x**.
- Function signatures are precomputed at compile time (using
``constexpr``), leading to smaller binaries.
- With little extra effort, C++ types can be pickled and unpickled
similar to regular Python objects.
Supported compilers
-------------------
1. Clang/LLVM 3.3 or newer (for Apple Xcodes clang, this is 5.0.0 or
newer)
2. GCC 4.8 or newer
3. Microsoft Visual Studio 2015 Update 3 or newer
4. Intel classic C++ compiler 18 or newer (ICC 20.2 tested in CI)
5. Cygwin/GCC (previously tested on 2.5.1)
6. NVCC (CUDA 11.0 tested in CI)
7. NVIDIA PGI (20.9 tested in CI)
About
-----
This project was created by `Wenzel
Jakob <http://rgl.epfl.ch/people/wjakob>`_. Significant features and/or
improvements to the code were contributed by Jonas Adler, Lori A. Burns,
Sylvain Corlay, Eric Cousineau, Aaron Gokaslan, Ralf Grosse-Kunstleve, Trent Houliston, Axel
Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov Johan Mabille, Tomasz Miąsko,
Dean Moldovan, Ben Pritchard, Jason Rhinelander, Boris Schäling, Pim
Schellart, Henry Schreiner, Ivan Smirnov, Boris Staletic, and Patrick Stewart.
We thank Google for a generous financial contribution to the continuous
integration infrastructure used by this project.
Contributing
~~~~~~~~~~~~
See the `contributing
guide <https://github.com/pybind/pybind11/blob/master/.github/CONTRIBUTING.md>`_
for information on building and contributing to pybind11.
License
~~~~~~~
pybind11 is provided under a BSD-style license that can be found in the
`LICENSE <https://github.com/pybind/pybind11/blob/master/LICENSE>`_
file. By using, distributing, or contributing to this project, you agree
to the terms and conditions of this license.
.. |Latest Documentation Status| image:: https://readthedocs.org/projects/pybind11/badge?version=latest
:target: http://pybind11.readthedocs.org/en/latest
.. |Stable Documentation Status| image:: https://img.shields.io/badge/docs-stable-blue.svg
:target: http://pybind11.readthedocs.org/en/stable
.. |Gitter chat| image:: https://img.shields.io/gitter/room/gitterHQ/gitter.svg
:target: https://gitter.im/pybind/Lobby
.. |CI| image:: https://github.com/pybind/pybind11/workflows/CI/badge.svg
:target: https://github.com/pybind/pybind11/actions
.. |Build status| image:: https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true
:target: https://ci.appveyor.com/project/wjakob/pybind11
.. |PyPI package| image:: https://img.shields.io/pypi/v/pybind11.svg
:target: https://pypi.org/project/pybind11/
.. |Conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pybind11.svg
:target: https://github.com/conda-forge/pybind11-feedstock
.. |Repology| image:: https://repology.org/badge/latest-versions/python:pybind11.svg
:target: https://repology.org/project/python:pybind11/versions
.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pybind11.svg
:target: https://pypi.org/project/pybind11/
.. |GitHub Discussions| image:: https://img.shields.io/static/v1?label=Discussions&message=Ask&color=blue&logo=github
:target: https://github.com/pybind/pybind11/discussions

View File

@ -18,5 +18,5 @@ ALIASES += "endrst=\endverbatim"
QUIET = YES
WARNINGS = YES
WARN_IF_UNDOCUMENTED = NO
PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS \
PY_MAJOR_VERSION=3
PREDEFINED = PY_MAJOR_VERSION=3 \
PYBIND11_NOINLINE

View File

@ -26,7 +26,9 @@ The following Python snippet demonstrates the intended usage from the Python sid
def __int__(self):
return 123
from example import print
print(A())
To register the necessary conversion routines, it is necessary to add an
@ -44,7 +46,7 @@ type is explicitly allowed.
* function signatures and declares a local variable
* 'value' of type inty
*/
PYBIND11_TYPE_CASTER(inty, _("inty"));
PYBIND11_TYPE_CASTER(inty, const_name("inty"));
/**
* Conversion part 1 (Python->C++): convert a PyObject into a inty

View File

@ -52,7 +52,7 @@ can be mapped *and* if the numpy array is writeable (that is
the passed variable will be transparently carried out directly on the
``numpy.ndarray``.
This means you can can write code such as the following and have it work as
This means you can write code such as the following and have it work as
expected:
.. code-block:: cpp
@ -112,7 +112,7 @@ example:
.. code-block:: python
a = MyClass()
m = a.get_matrix() # flags.writeable = True, flags.owndata = False
m = a.get_matrix() # flags.writeable = True, flags.owndata = False
v = a.view_matrix() # flags.writeable = False, flags.owndata = False
c = a.copy_matrix() # flags.writeable = True, flags.owndata = True
# m[5,6] and v[5,6] refer to the same element, c[5,6] does not.
@ -203,7 +203,7 @@ adding the ``order='F'`` option when creating an array:
.. code-block:: python
myarray = np.array(source, order='F')
myarray = np.array(source, order="F")
Such an object will be passable to a bound function accepting an
``Eigen::Ref<MatrixXd>`` (or similar column-major Eigen type).

View File

@ -75,91 +75,97 @@ The following basic data types are supported out of the box (some may require
an additional extension header to be included). To pass other data structures
as arguments and return values, refer to the section on binding :ref:`classes`.
+------------------------------------+---------------------------+-------------------------------+
| Data type | Description | Header file |
+====================================+===========================+===============================+
| ``int8_t``, ``uint8_t`` | 8-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``int16_t``, ``uint16_t`` | 16-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``int32_t``, ``uint32_t`` | 32-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``int64_t``, ``uint64_t`` | 64-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``ssize_t``, ``size_t`` | Platform-dependent size | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``float``, ``double`` | Floating point types | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``bool`` | Two-state Boolean type | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``char`` | Character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``char16_t`` | UTF-16 character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``char32_t`` | UTF-32 character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``wchar_t`` | Wide character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``const char *`` | UTF-8 string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``const char16_t *`` | UTF-16 string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``const char32_t *`` | UTF-32 string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``const wchar_t *`` | Wide string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::string`` | STL dynamic UTF-8 string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::u16string`` | STL dynamic UTF-16 string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::u32string`` | STL dynamic UTF-32 string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::wstring`` | STL dynamic wide string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::string_view``, | STL C++17 string views | :file:`pybind11/pybind11.h` |
| ``std::u16string_view``, etc. | | |
+------------------------------------+---------------------------+-------------------------------+
| ``std::pair<T1, T2>`` | Pair of two custom types | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::tuple<...>`` | Arbitrary tuple of types | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::reference_wrapper<...>`` | Reference type wrapper | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::complex<T>`` | Complex numbers | :file:`pybind11/complex.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::array<T, Size>`` | STL static array | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::vector<T>`` | STL dynamic array | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::deque<T>`` | STL double-ended queue | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::valarray<T>`` | STL value array | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::list<T>`` | STL linked list | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::map<T1, T2>`` | STL ordered map | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::unordered_map<T1, T2>`` | STL unordered map | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::set<T>`` | STL ordered set | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::unordered_set<T>`` | STL unordered set | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::optional<T>`` | STL optional type (C++17) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::experimental::optional<T>`` | STL optional type (exp.) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::variant<...>`` | Type-safe union (C++17) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::function<...>`` | STL polymorphic function | :file:`pybind11/functional.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::chrono::duration<...>`` | STL time duration | :file:`pybind11/chrono.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``std::chrono::time_point<...>`` | STL date/time | :file:`pybind11/chrono.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``Eigen::Matrix<...>`` | Eigen: dense matrix | :file:`pybind11/eigen.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``Eigen::Map<...>`` | Eigen: mapped memory | :file:`pybind11/eigen.h` |
+------------------------------------+---------------------------+-------------------------------+
| ``Eigen::SparseMatrix<...>`` | Eigen: sparse matrix | :file:`pybind11/eigen.h` |
+------------------------------------+---------------------------+-------------------------------+
+------------------------------------+---------------------------+-----------------------------------+
| Data type | Description | Header file |
+====================================+===========================+===================================+
| ``int8_t``, ``uint8_t`` | 8-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``int16_t``, ``uint16_t`` | 16-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``int32_t``, ``uint32_t`` | 32-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``int64_t``, ``uint64_t`` | 64-bit integers | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``ssize_t``, ``size_t`` | Platform-dependent size | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``float``, ``double`` | Floating point types | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``bool`` | Two-state Boolean type | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``char`` | Character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``char16_t`` | UTF-16 character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``char32_t`` | UTF-32 character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``wchar_t`` | Wide character literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``const char *`` | UTF-8 string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``const char16_t *`` | UTF-16 string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``const char32_t *`` | UTF-32 string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``const wchar_t *`` | Wide string literal | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::string`` | STL dynamic UTF-8 string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::u16string`` | STL dynamic UTF-16 string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::u32string`` | STL dynamic UTF-32 string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::wstring`` | STL dynamic wide string | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::string_view``, | STL C++17 string views | :file:`pybind11/pybind11.h` |
| ``std::u16string_view``, etc. | | |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::pair<T1, T2>`` | Pair of two custom types | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::tuple<...>`` | Arbitrary tuple of types | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::reference_wrapper<...>`` | Reference type wrapper | :file:`pybind11/pybind11.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::complex<T>`` | Complex numbers | :file:`pybind11/complex.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::array<T, Size>`` | STL static array | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::vector<T>`` | STL dynamic array | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::deque<T>`` | STL double-ended queue | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::valarray<T>`` | STL value array | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::list<T>`` | STL linked list | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::map<T1, T2>`` | STL ordered map | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::unordered_map<T1, T2>`` | STL unordered map | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::set<T>`` | STL ordered set | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::unordered_set<T>`` | STL unordered set | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::optional<T>`` | STL optional type (C++17) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::experimental::optional<T>`` | STL optional type (exp.) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::variant<...>`` | Type-safe union (C++17) | :file:`pybind11/stl.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::filesystem::path<T>`` | STL path (C++17) [#]_ | :file:`pybind11/stl/filesystem.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::function<...>`` | STL polymorphic function | :file:`pybind11/functional.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::chrono::duration<...>`` | STL time duration | :file:`pybind11/chrono.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``std::chrono::time_point<...>`` | STL date/time | :file:`pybind11/chrono.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``Eigen::Matrix<...>`` | Eigen: dense matrix | :file:`pybind11/eigen.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``Eigen::Map<...>`` | Eigen: mapped memory | :file:`pybind11/eigen.h` |
+------------------------------------+---------------------------+-----------------------------------+
| ``Eigen::SparseMatrix<...>`` | Eigen: sparse matrix | :file:`pybind11/eigen.h` |
+------------------------------------+---------------------------+-----------------------------------+
.. [#] ``std::filesystem::path`` is converted to ``pathlib.Path`` and
``os.PathLike`` is converted to ``std::filesystem::path``, but this requires
Python 3.6 (for ``__fspath__`` support).

View File

@ -5,7 +5,7 @@ Automatic conversion
====================
When including the additional header file :file:`pybind11/stl.h`, conversions
between ``std::vector<>``/``std::deque<>``/``std::list<>``/``std::array<>``,
between ``std::vector<>``/``std::deque<>``/``std::list<>``/``std::array<>``/``std::valarray<>``,
``std::set<>``/``std::unordered_set<>``, and
``std::map<>``/``std::unordered_map<>`` and the Python ``list``, ``set`` and
``dict`` data structures are automatically enabled. The types ``std::pair<>``
@ -72,6 +72,17 @@ The ``visit_helper`` specialization is not required if your ``name::variant`` pr
a ``name::visit()`` function. For any other function name, the specialization must be
included to tell pybind11 how to visit the variant.
.. warning::
When converting a ``variant`` type, pybind11 follows the same rules as when
determining which function overload to call (:ref:`overload_resolution`), and
so the same caveats hold. In particular, the order in which the ``variant``'s
alternatives are listed is important, since pybind11 will try conversions in
this order. This means that, for example, when converting ``variant<int, bool>``,
the ``bool`` variant will never be selected, as any Python ``bool`` is already
an ``int`` and is convertible to a C++ ``int``. Changing the order of alternatives
(and using ``variant<bool, int>``, in this example) provides a solution.
.. note::
pybind11 only supports the modern implementation of ``boost::variant``

View File

@ -36,13 +36,13 @@ everywhere <http://utf8everywhere.org/>`_.
}
);
.. code-block:: python
.. code-block:: pycon
>>> utf8_test('🎂')
>>> utf8_test("🎂")
utf-8 is icing on the cake.
🎂
>>> utf8_charptr('🍕')
>>> utf8_charptr("🍕")
My favorite food is
🍕
@ -80,7 +80,7 @@ raise a ``UnicodeDecodeError``.
}
);
.. code-block:: python
.. code-block:: pycon
>>> isinstance(example.std_string_return(), str)
True
@ -114,7 +114,7 @@ conversion has the same overhead as implicit conversion.
}
);
.. code-block:: python
.. code-block:: pycon
>>> str_output()
'Send your résumé to Alice in HR'
@ -143,7 +143,7 @@ returned to Python as ``bytes``, then one can return the data as a
}
);
.. code-block:: python
.. code-block:: pycon
>>> example.return_bytes()
b'\xba\xd0\xba\xd0'
@ -160,7 +160,7 @@ encoding, but cannot convert ``std::string`` back to ``bytes`` implicitly.
}
);
.. code-block:: python
.. code-block:: pycon
>>> isinstance(example.asymmetry(b"have some bytes"), str)
True
@ -229,16 +229,16 @@ character.
m.def("pass_char", [](char c) { return c; });
m.def("pass_wchar", [](wchar_t w) { return w; });
.. code-block:: python
.. code-block:: pycon
>>> example.pass_char('A')
>>> example.pass_char("A")
'A'
While C++ will cast integers to character types (``char c = 0x65;``), pybind11
does not convert Python integers to characters implicitly. The Python function
``chr()`` can be used to convert integers to characters.
.. code-block:: python
.. code-block:: pycon
>>> example.pass_char(0x65)
TypeError
@ -259,17 +259,17 @@ a combining acute accent). The combining character will be lost if the
two-character sequence is passed as an argument, even though it renders as a
single grapheme.
.. code-block:: python
.. code-block:: pycon
>>> example.pass_wchar('é')
>>> example.pass_wchar("é")
'é'
>>> combining_e_acute = 'e' + '\u0301'
>>> combining_e_acute = "e" + "\u0301"
>>> combining_e_acute
'é'
>>> combining_e_acute == 'é'
>>> combining_e_acute == "é"
False
>>> example.pass_wchar(combining_e_acute)
@ -278,9 +278,9 @@ single grapheme.
Normalizing combining characters before passing the character literal to C++
may resolve *some* of these issues:
.. code-block:: python
.. code-block:: pycon
>>> example.pass_wchar(unicodedata.normalize('NFC', combining_e_acute))
>>> example.pass_wchar(unicodedata.normalize("NFC", combining_e_acute))
'é'
In some languages (Thai for example), there are `graphemes that cannot be

View File

@ -9,7 +9,7 @@ that you are already familiar with the basics from :doc:`/classes`.
Overriding virtual functions in Python
======================================
Suppose that a C++ class or interface has a virtual function that we'd like to
Suppose that a C++ class or interface has a virtual function that we'd like
to override from within Python (we'll focus on the class ``Animal``; ``Dog`` is
given as a specific example of how one would do this with traditional C++
code).
@ -136,7 +136,7 @@ a virtual method call.
u'woof! woof! woof! '
>>> class Cat(Animal):
... def go(self, n_times):
... return "meow! " * n_times
... return "meow! " * n_times
...
>>> c = Cat()
>>> call_go(c)
@ -159,8 +159,9 @@ Here is an example:
class Dachshund(Dog):
def __init__(self, name):
Dog.__init__(self) # Without this, a TypeError is raised.
Dog.__init__(self) # Without this, a TypeError is raised.
self.name = name
def bark(self):
return "yap!"
@ -259,7 +260,7 @@ override the ``name()`` method):
.. note::
Note the trailing commas in the ``PYBIND11_OVERIDE`` calls to ``name()``
Note the trailing commas in the ``PYBIND11_OVERRIDE`` calls to ``name()``
and ``bark()``. These are needed to portably implement a trampoline for a
function that does not take any arguments. For functions that take
a nonzero number of arguments, the trailing comma must be omitted.
@ -804,7 +805,7 @@ to bind these two functions:
}
));
The ``__setstate__`` part of the ``py::picke()`` definition follows the same
The ``__setstate__`` part of the ``py::pickle()`` definition follows the same
rules as the single-argument version of ``py::init()``. The return type can be
a value, pointer or holder type. See :ref:`custom_constructors` for details.
@ -1153,12 +1154,65 @@ error:
>>> class PyFinalChild(IsFinal):
... pass
...
TypeError: type 'IsFinal' is not an acceptable base type
.. note:: This attribute is currently ignored on PyPy
.. versionadded:: 2.6
Binding classes with template parameters
========================================
pybind11 can also wrap classes that have template parameters. Consider these classes:
.. code-block:: cpp
struct Cat {};
struct Dog {};
template <typename PetType>
struct Cage {
Cage(PetType& pet);
PetType& get();
};
C++ templates may only be instantiated at compile time, so pybind11 can only
wrap instantiated templated classes. You cannot wrap a non-instantiated template:
.. code-block:: cpp
// BROKEN (this will not compile)
py::class_<Cage>(m, "Cage");
.def("get", &Cage::get);
You must explicitly specify each template/type combination that you want to
wrap separately.
.. code-block:: cpp
// ok
py::class_<Cage<Cat>>(m, "CatCage")
.def("get", &Cage<Cat>::get);
// ok
py::class_<Cage<Dog>>(m, "DogCage")
.def("get", &Cage<Dog>::get);
If your class methods have template parameters you can wrap those as well,
but once again each instantiation must be explicitly specified:
.. code-block:: cpp
typename <typename T>
struct MyClass {
template <typename V>
T fn(V v);
};
py::class<MyClass<int>>(m, "MyClassT")
.def("fn", &MyClass<int>::fn<std::string>);
Custom automatic downcasters
============================
@ -1247,7 +1301,7 @@ Accessing the type object
You can get the type object from a C++ class that has already been registered using:
.. code-block:: python
.. code-block:: cpp
py::type T_py = py::type::of<T>();
@ -1259,3 +1313,37 @@ object, just like ``type(ob)`` in Python.
Other types, like ``py::type::of<int>()``, do not work, see :ref:`type-conversions`.
.. versionadded:: 2.6
Custom type setup
=================
For advanced use cases, such as enabling garbage collection support, you may
wish to directly manipulate the ``PyHeapTypeObject`` corresponding to a
``py::class_`` definition.
You can do that using ``py::custom_type_setup``:
.. code-block:: cpp
struct OwnsPythonObjects {
py::object value = py::none();
};
py::class_<OwnsPythonObjects> cls(
m, "OwnsPythonObjects", py::custom_type_setup([](PyHeapTypeObject *heap_type) {
auto *type = &heap_type->ht_type;
type->tp_flags |= Py_TPFLAGS_HAVE_GC;
type->tp_traverse = [](PyObject *self_base, visitproc visit, void *arg) {
auto &self = py::cast<OwnsPythonObjects&>(py::handle(self_base));
Py_VISIT(self.value.ptr());
return 0;
};
type->tp_clear = [](PyObject *self_base) {
auto &self = py::cast<OwnsPythonObjects&>(py::handle(self_base));
self.value = py::none();
return 0;
};
}));
cls.def(py::init<>());
cls.def_readwrite("value", &OwnsPythonObjects::value);
.. versionadded:: 2.8

View File

@ -40,15 +40,15 @@ The essential structure of the ``main.cpp`` file looks like this:
}
The interpreter must be initialized before using any Python API, which includes
all the functions and classes in pybind11. The RAII guard class `scoped_interpreter`
all the functions and classes in pybind11. The RAII guard class ``scoped_interpreter``
takes care of the interpreter lifetime. After the guard is destroyed, the interpreter
shuts down and clears its memory. No Python functions can be called after this.
Executing Python code
=====================
There are a few different ways to run Python code. One option is to use `eval`,
`exec` or `eval_file`, as explained in :ref:`eval`. Here is a quick example in
There are a few different ways to run Python code. One option is to use ``eval``,
``exec`` or ``eval_file``, as explained in :ref:`eval`. Here is a quick example in
the context of an executable with an embedded interpreter:
.. code-block:: cpp
@ -108,11 +108,11 @@ The two approaches can also be combined:
Importing modules
=================
Python modules can be imported using `module::import()`:
Python modules can be imported using ``module_::import()``:
.. code-block:: cpp
py::module sys = py::module::import("sys");
py::module_ sys = py::module_::import("sys");
py::print(sys.attr("path"));
For convenience, the current working directory is included in ``sys.path`` when
@ -122,18 +122,19 @@ embedding the interpreter. This makes it easy to import local Python files:
"""calc.py located in the working directory"""
def add(i, j):
return i + j
.. code-block:: cpp
py::module calc = py::module::import("calc");
py::module_ calc = py::module_::import("calc");
py::object result = calc.attr("add")(1, 2);
int n = result.cast<int>();
assert(n == 3);
Modules can be reloaded using `module::reload()` if the source is modified e.g.
Modules can be reloaded using ``module_::reload()`` if the source is modified e.g.
by an external process. This can be useful in scenarios where the application
imports a user defined data processing script which needs to be updated after
changes by the user. Note that this function does not reload modules recursively.
@ -143,7 +144,7 @@ changes by the user. Note that this function does not reload modules recursively
Adding embedded modules
=======================
Embedded binary modules can be added using the `PYBIND11_EMBEDDED_MODULE` macro.
Embedded binary modules can be added using the ``PYBIND11_EMBEDDED_MODULE`` macro.
Note that the definition must be placed at global scope. They can be imported
like any other module.
@ -153,7 +154,7 @@ like any other module.
namespace py = pybind11;
PYBIND11_EMBEDDED_MODULE(fast_calc, m) {
// `m` is a `py::module` which is used to bind functions and classes
// `m` is a `py::module_` which is used to bind functions and classes
m.def("add", [](int i, int j) {
return i + j;
});
@ -162,14 +163,14 @@ like any other module.
int main() {
py::scoped_interpreter guard{};
auto fast_calc = py::module::import("fast_calc");
auto fast_calc = py::module_::import("fast_calc");
auto result = fast_calc.attr("add")(1, 2).cast<int>();
assert(result == 3);
}
Unlike extension modules where only a single binary module can be created, on
the embedded side an unlimited number of modules can be added using multiple
`PYBIND11_EMBEDDED_MODULE` definitions (as long as they have unique names).
``PYBIND11_EMBEDDED_MODULE`` definitions (as long as they have unique names).
These modules are added to Python's list of builtins, so they can also be
imported in pure Python files loaded by the interpreter. Everything interacts
@ -196,7 +197,7 @@ naturally:
int main() {
py::scoped_interpreter guard{};
auto py_module = py::module::import("py_module");
auto py_module = py::module_::import("py_module");
auto locals = py::dict("fmt"_a="{} + {} = {}", **py_module.attr("__dict__"));
assert(locals["a"].cast<int>() == 1);
@ -215,9 +216,9 @@ naturally:
Interpreter lifetime
====================
The Python interpreter shuts down when `scoped_interpreter` is destroyed. After
The Python interpreter shuts down when ``scoped_interpreter`` is destroyed. After
this, creating a new instance will restart the interpreter. Alternatively, the
`initialize_interpreter` / `finalize_interpreter` pair of functions can be used
``initialize_interpreter`` / ``finalize_interpreter`` pair of functions can be used
to directly set the state at any time.
Modules created with pybind11 can be safely re-initialized after the interpreter
@ -229,8 +230,8 @@ global data. All the details can be found in the CPython documentation.
.. warning::
Creating two concurrent `scoped_interpreter` guards is a fatal error. So is
calling `initialize_interpreter` for a second time after the interpreter
Creating two concurrent ``scoped_interpreter`` guards is a fatal error. So is
calling ``initialize_interpreter`` for a second time after the interpreter
has already been initialized.
Do not use the raw CPython API functions ``Py_Initialize`` and
@ -241,7 +242,7 @@ global data. All the details can be found in the CPython documentation.
Sub-interpreter support
=======================
Creating multiple copies of `scoped_interpreter` is not possible because it
Creating multiple copies of ``scoped_interpreter`` is not possible because it
represents the main Python interpreter. Sub-interpreters are something different
and they do permit the existence of multiple interpreters. This is an advanced
feature of the CPython API and should be handled with care. pybind11 does not
@ -257,5 +258,5 @@ We'll just mention a couple of caveats the sub-interpreters support in pybind11:
2. Managing multiple threads, multiple interpreters and the GIL can be
challenging and there are several caveats here, even within the pure
CPython API (please refer to the Python docs for details). As for
pybind11, keep in mind that `gil_scoped_release` and `gil_scoped_acquire`
pybind11, keep in mind that ``gil_scoped_release`` and ``gil_scoped_acquire``
do not take sub-interpreters into account.

View File

@ -43,18 +43,28 @@ at its exception handler.
| | of bounds access in ``__getitem__``, |
| | ``__setitem__``, etc.) |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::value_error` | ``ValueError`` (used to indicate |
| | wrong value passed in |
| | ``container.remove(...)``) |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::key_error` | ``KeyError`` (used to indicate out |
| | of bounds access in ``__getitem__``, |
| | ``__setitem__`` in dict-like |
| | objects, etc.) |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::value_error` | ``ValueError`` (used to indicate |
| | wrong value passed in |
| | ``container.remove(...)``) |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::type_error` | ``TypeError`` |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::buffer_error` | ``BufferError`` |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::import_error` | ``ImportError`` |
+--------------------------------------+--------------------------------------+
| :class:`pybind11::attribute_error` | ``AttributeError`` |
+--------------------------------------+--------------------------------------+
| Any other exception | ``RuntimeError`` |
+--------------------------------------+--------------------------------------+
Exception translation is not bidirectional. That is, *catching* the C++
exceptions defined above above will not trap exceptions that originate from
exceptions defined above will not trap exceptions that originate from
Python. For that, catch :class:`pybind11::error_already_set`. See :ref:`below
<handling_python_exceptions_cpp>` for further details.
@ -67,9 +77,10 @@ Registering custom translators
If the default exception conversion policy described above is insufficient,
pybind11 also provides support for registering custom exception translators.
To register a simple exception conversion that translates a C++ exception into
a new Python exception using the C++ exception's ``what()`` method, a helper
function is available:
Similar to pybind11 classes, exception translators can be local to the module
they are defined in or global to the entire python session. To register a simple
exception conversion that translates a C++ exception into a new Python exception
using the C++ exception's ``what()`` method, a helper function is available:
.. code-block:: cpp
@ -79,29 +90,39 @@ This call creates a Python exception class with the name ``PyExp`` in the given
module and automatically converts any encountered exceptions of type ``CppExp``
into Python exceptions of type ``PyExp``.
A matching function is available for registering a local exception translator:
.. code-block:: cpp
py::register_local_exception<CppExp>(module, "PyExp");
It is possible to specify base class for the exception using the third
parameter, a `handle`:
parameter, a ``handle``:
.. code-block:: cpp
py::register_exception<CppExp>(module, "PyExp", PyExc_RuntimeError);
py::register_local_exception<CppExp>(module, "PyExp", PyExc_RuntimeError);
Then `PyExp` can be caught both as `PyExp` and `RuntimeError`.
Then ``PyExp`` can be caught both as ``PyExp`` and ``RuntimeError``.
The class objects of the built-in Python exceptions are listed in the Python
documentation on `Standard Exceptions <https://docs.python.org/3/c-api/exceptions.html#standard-exceptions>`_.
The default base class is `PyExc_Exception`.
The default base class is ``PyExc_Exception``.
When more advanced exception translation is needed, the function
``py::register_exception_translator(translator)`` can be used to register
When more advanced exception translation is needed, the functions
``py::register_exception_translator(translator)`` and
``py::register_local_exception_translator(translator)`` can be used to register
functions that can translate arbitrary exception types (and which may include
additional logic to do so). The function takes a stateless callable (e.g. a
additional logic to do so). The functions takes a stateless callable (e.g. a
function pointer or a lambda function without captured variables) with the call
signature ``void(std::exception_ptr)``.
When a C++ exception is thrown, the registered exception translators are tried
in reverse order of registration (i.e. the last registered translator gets the
first shot at handling the exception).
first shot at handling the exception). All local translators will be tried
before a global translator is tried.
Inside the translator, ``std::rethrow_exception`` should be used within
a try block to re-throw the exception. One or more catch clauses to catch
@ -156,6 +177,57 @@ section.
may be explicitly (re-)thrown to delegate it to the other,
previously-declared existing exception translators.
Note that ``libc++`` and ``libstdc++`` `behave differently <https://stackoverflow.com/questions/19496643/using-clang-fvisibility-hidden-and-typeinfo-and-type-erasure/28827430>`_
with ``-fvisibility=hidden``. Therefore exceptions that are used across ABI boundaries need to be explicitly exported, as exercised in ``tests/test_exceptions.h``.
See also: "Problems with C++ exceptions" under `GCC Wiki <https://gcc.gnu.org/wiki/Visibility>`_.
Local vs Global Exception Translators
=====================================
When a global exception translator is registered, it will be applied across all
modules in the reverse order of registration. This can create behavior where the
order of module import influences how exceptions are translated.
If module1 has the following translator:
.. code-block:: cpp
py::register_exception_translator([](std::exception_ptr p) {
try {
if (p) std::rethrow_exception(p);
} catch (const std::invalid_argument &e) {
PyErr_SetString("module1 handled this")
}
}
and module2 has the following similar translator:
.. code-block:: cpp
py::register_exception_translator([](std::exception_ptr p) {
try {
if (p) std::rethrow_exception(p);
} catch (const std::invalid_argument &e) {
PyErr_SetString("module2 handled this")
}
}
then which translator handles the invalid_argument will be determined by the
order that module1 and module2 are imported. Since exception translators are
applied in the reverse order of registration, which ever module was imported
last will "win" and that translator will be applied.
If there are multiple pybind11 modules that share exception types (either
standard built-in or custom) loaded into a single python instance and
consistent error handling behavior is needed, then local translators should be
used.
Changing the previous example to use ``register_local_exception_translator``
would mean that when invalid_argument is thrown in the module2 code, the
module2 translator will always handle it, while in module1, the module1
translator will do the same.
.. _handling_python_exceptions_cpp:
Handling exceptions from Python in C++
@ -182,13 +254,13 @@ For example:
try {
// open("missing.txt", "r")
auto file = py::module::import("io").attr("open")("missing.txt", "r");
auto file = py::module_::import("io").attr("open")("missing.txt", "r");
auto text = file.attr("read")();
file.attr("close")();
} catch (py::error_already_set &e) {
if (e.matches(PyExc_FileNotFoundError)) {
py::print("missing.txt not found");
} else if (e.match(PyExc_PermissionError)) {
} else if (e.matches(PyExc_PermissionError)) {
py::print("missing.txt found but not accessible");
} else {
throw;
@ -253,6 +325,34 @@ Alternately, to ignore the error, call `PyErr_Clear
Any Python error must be thrown or cleared, or Python/pybind11 will be left in
an invalid state.
Chaining exceptions ('raise from')
==================================
In Python 3.3 a mechanism for indicating that exceptions were caused by other
exceptions was introduced:
.. code-block:: py
try:
print(1 / 0)
except Exception as exc:
raise RuntimeError("could not divide by zero") from exc
To do a similar thing in pybind11, you can use the ``py::raise_from`` function. It
sets the current python error indicator, so to continue propagating the exception
you should ``throw py::error_already_set()`` (Python 3 only).
.. code-block:: cpp
try {
py::eval("print(1 / 0"));
} catch (py::error_already_set &e) {
py::raise_from(e, PyExc_RuntimeError, "could not divide by zero");
throw py::error_already_set();
}
.. versionadded:: 2.8
.. _unraisable_exceptions:
Handling unraisable exceptions

View File

@ -17,7 +17,7 @@ bindings for functions that return a non-trivial type. Just by looking at the
type information, it is not clear whether Python should take charge of the
returned value and eventually free its resources, or if this is handled on the
C++ side. For this reason, pybind11 provides a several *return value policy*
annotations that can be passed to the :func:`module::def` and
annotations that can be passed to the :func:`module_::def` and
:func:`class_::def` functions. The default policy is
:enum:`return_value_policy::automatic`.
@ -50,7 +50,7 @@ implied transfer of ownership, i.e.:
.. code-block:: cpp
m.def("get_data", &get_data, return_value_policy::reference);
m.def("get_data", &get_data, py::return_value_policy::reference);
On the other hand, this is not the right policy for many other situations,
where ignoring ownership could lead to resource leaks.
@ -90,17 +90,18 @@ The following table provides an overview of available policies:
| | return value is referenced by Python. This is the default policy for |
| | property getters created via ``def_property``, ``def_readwrite``, etc. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::automatic` | **Default policy.** This policy falls back to the policy |
| :enum:`return_value_policy::automatic` | This policy falls back to the policy |
| | :enum:`return_value_policy::take_ownership` when the return value is a |
| | pointer. Otherwise, it uses :enum:`return_value_policy::move` or |
| | :enum:`return_value_policy::copy` for rvalue and lvalue references, |
| | respectively. See above for a description of what all of these different |
| | policies do. |
| | policies do. This is the default policy for ``py::class_``-wrapped types. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::automatic_reference` | As above, but use policy :enum:`return_value_policy::reference` when the |
| | return value is a pointer. This is the default conversion policy for |
| | function arguments when calling Python functions manually from C++ code |
| | (i.e. via handle::operator()). You probably won't need to use this. |
| | (i.e. via ``handle::operator()``) and the casters in ``pybind11/stl.h``. |
| | You probably won't need to use this explicitly. |
+--------------------------------------------------+----------------------------------------------------------------------------+
Return value policies can also be applied to properties:
@ -119,7 +120,7 @@ targeted arguments can be passed through the :class:`cpp_function` constructor:
.. code-block:: cpp
class_<MyClass>(m, "MyClass")
.def_property("data"
.def_property("data",
py::cpp_function(&MyClass::getData, py::return_value_policy::copy),
py::cpp_function(&MyClass::setData)
);
@ -182,6 +183,9 @@ relies on the ability to create a *weak reference* to the nurse object. When
the nurse object is not a pybind11-registered type and does not support weak
references, an exception will be thrown.
If you use an incorrect argument index, you will get a ``RuntimeError`` saying
``Could not activate keep_alive!``. You should review the indices you're using.
Consider the following example: here, the binding code for a list append
operation ties the lifetime of the newly added element to the underlying
container:
@ -228,7 +232,7 @@ is equivalent to the following pseudocode:
});
The only requirement is that ``T`` is default-constructible, but otherwise any
scope guard will work. This is very useful in combination with `gil_scoped_release`.
scope guard will work. This is very useful in combination with ``gil_scoped_release``.
See :ref:`gil`.
Multiple guards can also be specified as ``py::call_guard<T1, T2, T3...>``. The
@ -251,7 +255,7 @@ For instance, the following statement iterates over a Python ``dict``:
.. code-block:: cpp
void print_dict(py::dict dict) {
void print_dict(const py::dict& dict) {
/* Easily interact with Python types */
for (auto item : dict)
std::cout << "key=" << std::string(py::str(item.first)) << ", "
@ -268,7 +272,7 @@ And used in Python as usual:
.. code-block:: pycon
>>> print_dict({'foo': 123, 'bar': 'hello'})
>>> print_dict({"foo": 123, "bar": "hello"})
key=foo, value=123
key=bar, value=hello
@ -289,7 +293,7 @@ Such functions can also be created using pybind11:
.. code-block:: cpp
void generic(py::args args, py::kwargs kwargs) {
void generic(py::args args, const py::kwargs& kwargs) {
/// .. do something with args
if (kwargs)
/// .. do something with kwargs
@ -302,8 +306,9 @@ The class ``py::args`` derives from ``py::tuple`` and ``py::kwargs`` derives
from ``py::dict``.
You may also use just one or the other, and may combine these with other
arguments as long as the ``py::args`` and ``py::kwargs`` arguments are the last
arguments accepted by the function.
arguments. Note, however, that ``py::kwargs`` must always be the last argument
of the function, and ``py::args`` implies that any further arguments are
keyword-only (see :ref:`keyword_only_arguments`).
Please refer to the other examples for details on how to iterate over these,
and on how to cast their entries into C++ objects. A demonstration is also
@ -362,6 +367,8 @@ like so:
py::class_<MyClass>("MyClass")
.def("myFunction", py::arg("arg") = static_cast<SomeType *>(nullptr));
.. _keyword_only_arguments:
Keyword-only arguments
======================
@ -373,10 +380,11 @@ argument in a function definition:
def f(a, *, b): # a can be positional or via keyword; b must be via keyword
pass
f(a=1, b=2) # good
f(b=2, a=1) # good
f(1, b=2) # good
f(1, 2) # TypeError: f() takes 1 positional argument but 2 were given
f(1, b=2) # good
f(1, 2) # TypeError: f() takes 1 positional argument but 2 were given
Pybind11 provides a ``py::kw_only`` object that allows you to implement
the same behaviour by specifying the object between positional and keyword-only
@ -392,6 +400,15 @@ feature does *not* require Python 3 to work.
.. versionadded:: 2.6
As of pybind11 2.9, a ``py::args`` argument implies that any following arguments
are keyword-only, as if ``py::kw_only()`` had been specified in the same
relative location of the argument list as the ``py::args`` argument. The
``py::kw_only()`` may be included to be explicit about this, but is not
required. (Prior to 2.9 ``py::args`` may only occur at the end of the argument
list, or immediately before a ``py::kwargs`` argument at the end).
.. versionadded:: 2.9
Positional-only arguments
=========================
@ -524,6 +541,8 @@ The default behaviour when the tag is unspecified is to allow ``None``.
not allow ``None`` as argument. To pass optional argument of these copied types consider
using ``std::optional<T>``
.. _overload_resolution:
Overload resolution order
=========================
@ -540,11 +559,13 @@ an explicit ``py::arg().noconvert()`` attribute in the function definition).
If the second pass also fails a ``TypeError`` is raised.
Within each pass, overloads are tried in the order they were registered with
pybind11.
pybind11. If the ``py::prepend()`` tag is added to the definition, a function
can be placed at the beginning of the overload sequence instead, allowing user
overloads to proceed built in functions.
What this means in practice is that pybind11 will prefer any overload that does
not require conversion of arguments to an overload that does, but otherwise prefers
earlier-defined overloads to later-defined ones.
not require conversion of arguments to an overload that does, but otherwise
prefers earlier-defined overloads to later-defined ones.
.. note::
@ -553,3 +574,42 @@ earlier-defined overloads to later-defined ones.
requiring one conversion over one requiring three, but only prioritizes
overloads requiring no conversion at all to overloads that require
conversion of at least one argument.
.. versionadded:: 2.6
The ``py::prepend()`` tag.
Binding functions with template parameters
==========================================
You can bind functions that have template parameters. Here's a function:
.. code-block:: cpp
template <typename T>
void set(T t);
C++ templates cannot be instantiated at runtime, so you cannot bind the
non-instantiated function:
.. code-block:: cpp
// BROKEN (this will not compile)
m.def("set", &set);
You must bind each instantiated function template separately. You may bind
each instantiation with the same name, which will be treated the same as
an overloaded function:
.. code-block:: cpp
m.def("set", &set<int>);
m.def("set", &set<std::string>);
Sometimes it's more clear to bind them with separate names, which is also
an option:
.. code-block:: cpp
m.def("setInt", &set<int>);
m.def("setString", &set<std::string>);

View File

@ -84,7 +84,7 @@ could be realized as follows (important changes highlighted):
});
}
The ``call_go`` wrapper can also be simplified using the `call_guard` policy
The ``call_go`` wrapper can also be simplified using the ``call_guard`` policy
(see :ref:`call_policies`) which yields the same result:
.. code-block:: cpp
@ -132,7 +132,7 @@ However, it can be acquired as follows:
.. code-block:: cpp
py::object pet = (py::object) py::module::import("basic").attr("Pet");
py::object pet = (py::object) py::module_::import("basic").attr("Pet");
py::class_<Dog>(m, "Dog", pet)
.def(py::init<const std::string &>())
@ -146,7 +146,7 @@ has been executed:
.. code-block:: cpp
py::module::import("basic");
py::module_::import("basic");
py::class_<Dog, Pet>(m, "Dog")
.def(py::init<const std::string &>())
@ -223,7 +223,7 @@ avoids this issue involves weak reference with a cleanup callback:
.. code-block:: cpp
// Register a callback function that is invoked when the BaseClass object is colelcted
// Register a callback function that is invoked when the BaseClass object is collected
py::cpp_function cleanup_callback(
[](py::handle weakref) {
// perform cleanup here -- this function is called with the GIL held
@ -237,13 +237,13 @@ avoids this issue involves weak reference with a cleanup callback:
.. note::
PyPy (at least version 5.9) does not garbage collect objects when the
interpreter exits. An alternative approach (which also works on CPython) is to use
the :py:mod:`atexit` module [#f7]_, for example:
PyPy does not garbage collect objects when the interpreter exits. An alternative
approach (which also works on CPython) is to use the :py:mod:`atexit` module [#f7]_,
for example:
.. code-block:: cpp
auto atexit = py::module::import("atexit");
auto atexit = py::module_::import("atexit");
atexit.attr("register")(py::cpp_function([]() {
// perform cleanup here -- this function is called with the GIL held
}));
@ -284,7 +284,7 @@ work, it is important that all lines are indented consistently, i.e.:
)mydelimiter");
By default, pybind11 automatically generates and prepends a signature to the docstring of a function
registered with ``module::def()`` and ``class_::def()``. Sometimes this
registered with ``module_::def()`` and ``class_::def()``. Sometimes this
behavior is not desirable, because you want to provide your own signature or remove
the docstring completely to exclude the function from the Sphinx documentation.
The class ``options`` allows you to selectively suppress auto-generated signatures:

View File

@ -57,11 +57,11 @@ specification.
struct buffer_info {
void *ptr;
ssize_t itemsize;
py::ssize_t itemsize;
std::string format;
ssize_t ndim;
std::vector<ssize_t> shape;
std::vector<ssize_t> strides;
py::ssize_t ndim;
std::vector<py::ssize_t> shape;
std::vector<py::ssize_t> strides;
};
To create a C++ function that can take a Python buffer object as an argument,
@ -150,8 +150,10 @@ NumPy array containing double precision values.
When it is invoked with a different type (e.g. an integer or a list of
integers), the binding code will attempt to cast the input into a NumPy array
of the requested type. Note that this feature requires the
:file:`pybind11/numpy.h` header to be included.
of the requested type. This feature requires the :file:`pybind11/numpy.h`
header to be included. Note that :file:`pybind11/numpy.h` does not depend on
the NumPy headers, and thus can be used without declaring a build-time
dependency on NumPy; NumPy>=1.7.0 is a runtime dependency.
Data in NumPy arrays is not guaranteed to packed in a dense manner;
furthermore, entries can be separated by arbitrary column and row strides.
@ -169,6 +171,31 @@ template parameter, and it ensures that non-conforming arguments are converted
into an array satisfying the specified requirements instead of trying the next
function overload.
There are several methods on arrays; the methods listed below under references
work, as well as the following functions based on the NumPy API:
- ``.dtype()`` returns the type of the contained values.
- ``.strides()`` returns a pointer to the strides of the array (optionally pass
an integer axis to get a number).
- ``.flags()`` returns the flag settings. ``.writable()`` and ``.owndata()``
are directly available.
- ``.offset_at()`` returns the offset (optionally pass indices).
- ``.squeeze()`` returns a view with length-1 axes removed.
- ``.view(dtype)`` returns a view of the array with a different dtype.
- ``.reshape({i, j, ...})`` returns a view of the array with a different shape.
``.resize({...})`` is also available.
- ``.index_at(i, j, ...)`` gets the count from the beginning to a given index.
There are also several methods for getting references (described below).
Structured types
================
@ -231,8 +258,8 @@ by the compiler. The result is returned as a NumPy array of type
.. code-block:: pycon
>>> x = np.array([[1, 3],[5, 7]])
>>> y = np.array([[2, 4],[6, 8]])
>>> x = np.array([[1, 3], [5, 7]])
>>> y = np.array([[2, 4], [6, 8]])
>>> z = 3
>>> result = vectorized_func(x, y, z)
@ -309,17 +336,17 @@ where ``N`` gives the required dimensionality of the array:
m.def("sum_3d", [](py::array_t<double> x) {
auto r = x.unchecked<3>(); // x must have ndim = 3; can be non-writeable
double sum = 0;
for (ssize_t i = 0; i < r.shape(0); i++)
for (ssize_t j = 0; j < r.shape(1); j++)
for (ssize_t k = 0; k < r.shape(2); k++)
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
for (py::ssize_t k = 0; k < r.shape(2); k++)
sum += r(i, j, k);
return sum;
});
m.def("increment_3d", [](py::array_t<double> x) {
auto r = x.mutable_unchecked<3>(); // Will throw if ndim != 3 or flags.writeable is false
for (ssize_t i = 0; i < r.shape(0); i++)
for (ssize_t j = 0; j < r.shape(1); j++)
for (ssize_t k = 0; k < r.shape(2); k++)
for (py::ssize_t i = 0; i < r.shape(0); i++)
for (py::ssize_t j = 0; j < r.shape(1); j++)
for (py::ssize_t k = 0; k < r.shape(2); k++)
r(i, j, k) += 1.0;
}, py::arg().noconvert());
@ -343,21 +370,21 @@ The returned proxy object supports some of the same methods as ``py::array`` so
that it can be used as a drop-in replacement for some existing, index-checked
uses of ``py::array``:
- ``r.ndim()`` returns the number of dimensions
- ``.ndim()`` returns the number of dimensions
- ``r.data(1, 2, ...)`` and ``r.mutable_data(1, 2, ...)``` returns a pointer to
- ``.data(1, 2, ...)`` and ``r.mutable_data(1, 2, ...)``` returns a pointer to
the ``const T`` or ``T`` data, respectively, at the given indices. The
latter is only available to proxies obtained via ``a.mutable_unchecked()``.
- ``itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``.
- ``.itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``.
- ``ndim()`` returns the number of dimensions.
- ``.ndim()`` returns the number of dimensions.
- ``shape(n)`` returns the size of dimension ``n``
- ``.shape(n)`` returns the size of dimension ``n``
- ``size()`` returns the total number of elements (i.e. the product of the shapes).
- ``.size()`` returns the total number of elements (i.e. the product of the shapes).
- ``nbytes()`` returns the number of bytes used by the referenced elements
- ``.nbytes()`` returns the number of bytes used by the referenced elements
(i.e. ``itemsize()`` times ``size()``).
.. seealso::
@ -376,7 +403,7 @@ In Python 2, the syntactic sugar ``...`` is not available, but the singleton
.. code-block:: python
a = # a NumPy array
a = ... # a NumPy array
b = a[0, ..., 0]
The function ``py::ellipsis()`` function can be used to perform the same
@ -388,7 +415,7 @@ operation on the C++ side:
py::array b = a[py::make_tuple(0, py::ellipsis(), 0)];
.. versionchanged:: 2.6
``py::ellipsis()`` is now also avaliable in Python 2.
``py::ellipsis()`` is now also available in Python 2.
Memory view
===========

View File

@ -20,6 +20,40 @@ Available types include :class:`handle`, :class:`object`, :class:`bool_`,
Be sure to review the :ref:`pytypes_gotchas` before using this heavily in
your C++ API.
.. _instantiating_compound_types:
Instantiating compound Python types from C++
============================================
Dictionaries can be initialized in the :class:`dict` constructor:
.. code-block:: cpp
using namespace pybind11::literals; // to bring in the `_a` literal
py::dict d("spam"_a=py::none(), "eggs"_a=42);
A tuple of python objects can be instantiated using :func:`py::make_tuple`:
.. code-block:: cpp
py::tuple tup = py::make_tuple(42, py::none(), "spam");
Each element is converted to a supported Python type.
A `simple namespace`_ can be instantiated using
.. code-block:: cpp
using namespace pybind11::literals; // to bring in the `_a` literal
py::object SimpleNamespace = py::module_::import("types").attr("SimpleNamespace");
py::object ns = SimpleNamespace("spam"_a=py::none(), "eggs"_a=42);
Attributes on a namespace can be modified with the :func:`py::delattr`,
:func:`py::getattr`, and :func:`py::setattr` functions. Simple namespaces can
be useful as lightweight stand-ins for class instances.
.. _simple namespace: https://docs.python.org/3/library/types.html#types.SimpleNamespace
.. _casting_back_and_forth:
Casting back and forth
@ -30,7 +64,7 @@ types to Python, which can be done using :func:`py::cast`:
.. code-block:: cpp
MyClass *cls = ..;
MyClass *cls = ...;
py::object obj = py::cast(cls);
The reverse direction uses the following syntax:
@ -56,12 +90,12 @@ This example obtains a reference to the Python ``Decimal`` class.
.. code-block:: cpp
// Equivalent to "from decimal import Decimal"
py::object Decimal = py::module::import("decimal").attr("Decimal");
py::object Decimal = py::module_::import("decimal").attr("Decimal");
.. code-block:: cpp
// Try to import scipy
py::object scipy = py::module::import("scipy");
py::object scipy = py::module_::import("scipy");
return scipy.attr("__version__");
@ -81,7 +115,7 @@ via ``operator()``.
.. code-block:: cpp
// Use Python to make our directories
py::object os = py::module::import("os");
py::object os = py::module_::import("os");
py::object makedirs = os.attr("makedirs");
makedirs("/tmp/path/to/somewhere");
@ -132,6 +166,7 @@ Keyword arguments are also supported. In Python, there is the usual call syntax:
def f(number, say, to):
... # function code
f(1234, say="hello", to=some_instance) # keyword call in Python
In C++, the same call can be made using:
@ -196,9 +231,9 @@ C++ functions that require a specific subtype rather than a generic :class:`obje
#include <pybind11/numpy.h>
using namespace pybind11::literals;
py::module os = py::module::import("os");
py::module path = py::module::import("os.path"); // like 'import os.path as path'
py::module np = py::module::import("numpy"); // like 'import numpy as np'
py::module_ os = py::module_::import("os");
py::module_ path = py::module_::import("os.path"); // like 'import os.path as path'
py::module_ np = py::module_::import("numpy"); // like 'import numpy as np'
py::str curdir_abs = path.attr("abspath")(path.attr("curdir"));
py::print(py::str("Current directory: ") + curdir_abs);

View File

@ -28,7 +28,7 @@ Capturing standard output from ostream
Often, a library will use the streams ``std::cout`` and ``std::cerr`` to print,
but this does not play well with Python's standard ``sys.stdout`` and ``sys.stderr``
redirection. Replacing a library's printing with `py::print <print>` may not
redirection. Replacing a library's printing with ``py::print <print>`` may not
be feasible. This can be fixed using a guard around the library function that
redirects output to the corresponding Python streams:
@ -42,20 +42,31 @@ redirects output to the corresponding Python streams:
m.def("noisy_func", []() {
py::scoped_ostream_redirect stream(
std::cout, // std::ostream&
py::module::import("sys").attr("stdout") // Python output
py::module_::import("sys").attr("stdout") // Python output
);
call_noisy_func();
});
.. warning::
The implementation in ``pybind11/iostream.h`` is NOT thread safe. Multiple
threads writing to a redirected ostream concurrently cause data races
and potentially buffer overflows. Therefore it is currently a requirement
that all (possibly) concurrent redirected ostream writes are protected by
a mutex. #HelpAppreciated: Work on iostream.h thread safety. For more
background see the discussions under
`PR #2982 <https://github.com/pybind/pybind11/pull/2982>`_ and
`PR #2995 <https://github.com/pybind/pybind11/pull/2995>`_.
This method respects flushes on the output streams and will flush if needed
when the scoped guard is destroyed. This allows the output to be redirected in
real time, such as to a Jupyter notebook. The two arguments, the C++ stream and
the Python output, are optional, and default to standard output if not given. An
extra type, `py::scoped_estream_redirect <scoped_estream_redirect>`, is identical
extra type, ``py::scoped_estream_redirect <scoped_estream_redirect>``, is identical
except for defaulting to ``std::cerr`` and ``sys.stderr``; this can be useful with
`py::call_guard`, which allows multiple items, but uses the default constructor:
``py::call_guard``, which allows multiple items, but uses the default constructor:
.. code-block:: py
.. code-block:: cpp
// Alternative: Call single function using call guard
m.def("noisy_func", &call_noisy_function,
@ -63,7 +74,7 @@ except for defaulting to ``std::cerr`` and ``sys.stderr``; this can be useful wi
py::scoped_estream_redirect>());
The redirection can also be done in Python with the addition of a context
manager, using the `py::add_ostream_redirect() <add_ostream_redirect>` function:
manager, using the ``py::add_ostream_redirect() <add_ostream_redirect>`` function:
.. code-block:: cpp
@ -92,7 +103,7 @@ arguments to disable one of the streams if needed.
Evaluating Python expressions from strings and files
====================================================
pybind11 provides the `eval`, `exec` and `eval_file` functions to evaluate
pybind11 provides the ``eval``, ``exec`` and ``eval_file`` functions to evaluate
Python expressions and statements. The following example illustrates how they
can be used.
@ -104,7 +115,7 @@ can be used.
...
// Evaluate in scope of main module
py::object scope = py::module::import("__main__").attr("__dict__");
py::object scope = py::module_::import("__main__").attr("__dict__");
// Evaluate an isolated expression
int result = py::eval("my_variable + 10", scope).cast<int>();

View File

@ -77,6 +77,7 @@ segmentation fault).
.. code-block:: python
from example import Parent
print(Parent().get_child())
The problem is that ``Parent::get_child()`` returns a pointer to an instance of

View File

@ -39,7 +39,7 @@ on various C++11 language features that break older versions of Visual Studio.
To use the C++17 in Visual Studio 2017 (MSVC 14.1), pybind11 requires the flag
``/permissive-`` to be passed to the compiler `to enforce standard conformance`_. When
building with Visual Studio 2019, this is not strictly necessary, but still adviced.
building with Visual Studio 2019, this is not strictly necessary, but still advised.
.. _`to enforce standard conformance`: https://docs.microsoft.com/en-us/cpp/build/reference/permissive-standards-conformance?view=vs-2017
@ -109,7 +109,7 @@ a file named :file:`example.cpp` with the following contents:
PYBIND11_MODULE(example, m) {
m.doc() = "pybind11 example plugin"; // optional module docstring
m.def("add", &add, "A function which adds two numbers");
m.def("add", &add, "A function that adds two numbers");
}
.. [#f1] In practice, implementation and binding code will generally be located
@ -118,8 +118,8 @@ a file named :file:`example.cpp` with the following contents:
The :func:`PYBIND11_MODULE` macro creates a function that will be called when an
``import`` statement is issued from within Python. The module name (``example``)
is given as the first macro argument (it should not be in quotes). The second
argument (``m``) defines a variable of type :class:`py::module <module>` which
is the main interface for creating bindings. The method :func:`module::def`
argument (``m``) defines a variable of type :class:`py::module_ <module>` which
is the main interface for creating bindings. The method :func:`module_::def`
generates binding code that exposes the ``add()`` function to Python.
.. note::
@ -136,7 +136,14 @@ On Linux, the above example can be compiled using the following command:
.. code-block:: bash
$ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`
$ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix)
.. note::
If you used :ref:`include_as_a_submodule` to get the pybind11 source, then
use ``$(python3-config --includes) -Iextern/pybind11/include`` instead of
``$(python3 -m pybind11 --includes)`` in the above compilation, as
explained in :ref:`building_manually`.
For more details on the required compiler flags on Linux and macOS, see
:ref:`building_manually`. For complete cross-platform compilation instructions,
@ -181,7 +188,7 @@ names of the arguments ("i" and "j" in this case).
py::arg("i"), py::arg("j"));
:class:`arg` is one of several special tag classes which can be used to pass
metadata into :func:`module::def`. With this modified binding code, we can now
metadata into :func:`module_::def`. With this modified binding code, we can now
call the function using keyword arguments, which is a more readable alternative
particularly for functions taking many parameters:

View File

@ -1,8 +1,7 @@
# -*- coding: utf-8 -*-
import random
import os
import time
import datetime as dt
import os
import random
nfns = 4 # Functions per class
nargs = 4 # Arguments per function
@ -14,7 +13,7 @@ def generate_dummy_code_pybind11(nclasses=10):
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += '\n'
decl += "\n"
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
@ -22,18 +21,17 @@ def generate_dummy_code_pybind11(nclasses=10):
bindings += ' py::class_<cl%03i>(m, "cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % \
(fn, cl, fn)
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i)\n' % (fn, cl, fn)
decl += "};\n\n"
bindings += ' ;\n'
bindings += " ;\n"
result = "#include <pybind11/pybind11.h>\n\n"
result += "namespace py = pybind11;\n\n"
result += decl + '\n'
result += decl + "\n"
result += "PYBIND11_MODULE(example, m) {\n"
result += bindings
result += "}"
@ -46,7 +44,7 @@ def generate_dummy_code_boost(nclasses=10):
for cl in range(nclasses):
decl += "class cl%03i;\n" % cl
decl += '\n'
decl += "\n"
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
@ -54,18 +52,20 @@ def generate_dummy_code_boost(nclasses=10):
bindings += ' py::class_<cl%03i>("cl%03i")\n' % (cl, cl)
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += " cl%03i *fn_%03i(" % (ret, fn)
decl += ", ".join("cl%03i *" % p for p in params)
decl += ");\n"
bindings += ' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n' % \
(fn, cl, fn)
bindings += (
' .def("fn_%03i", &cl%03i::fn_%03i, py::return_value_policy<py::manage_new_object>())\n'
% (fn, cl, fn)
)
decl += "};\n\n"
bindings += ' ;\n'
bindings += " ;\n"
result = "#include <boost/python.hpp>\n\n"
result += "namespace py = boost::python;\n\n"
result += decl + '\n'
result += decl + "\n"
result += "BOOST_PYTHON_MODULE(example) {\n"
result += bindings
result += "}"
@ -73,17 +73,19 @@ def generate_dummy_code_boost(nclasses=10):
for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]:
print ("{")
print("{")
for i in range(0, 10):
nclasses = 2 ** i
with open("test.cpp", "w") as f:
f.write(codegen(nclasses))
n1 = dt.datetime.now()
os.system("g++ -Os -shared -rdynamic -undefined dynamic_lookup "
os.system(
"g++ -Os -shared -rdynamic -undefined dynamic_lookup "
"-fvisibility=hidden -std=c++14 test.cpp -I include "
"-I /System/Library/Frameworks/Python.framework/Headers -o test.so")
"-I /System/Library/Frameworks/Python.framework/Headers -o test.so"
)
n2 = dt.datetime.now()
elapsed = (n2 - n1).total_seconds()
size = os.stat('test.so').st_size
size = os.stat("test.so").st_size
print(" {%i, %f, %i}," % (nclasses * nfns, elapsed, size))
print ("}")
print("}")

View File

@ -6,21 +6,697 @@ Changelog
Starting with version 1.8.0, pybind11 releases use a `semantic versioning
<http://semver.org>`_ policy.
v2.6.0 (IN PROGRESS)
Version 2.9.1 (Feb 2, 2022)
---------------------------
Changes:
* If possible, attach Python exception with ``py::raise_from`` to ``TypeError``
when casting from C++ to Python. This will give additional info if Python
exceptions occur in the caster. Adds a test case of trying to convert a set
from C++ to Python when the hash function is not defined in Python.
`#3605 <https://github.com/pybind/pybind11/pull/3605>`_
* Add a mapping of C++11 nested exceptions to their Python exception
equivalent using ``py::raise_from``. This attaches the nested exceptions in
Python using the ``__cause__`` field.
`#3608 <https://github.com/pybind/pybind11/pull/3608>`_
* Propagate Python exception traceback using ``raise_from`` if a pybind11
function runs out of overloads.
`#3671 <https://github.com/pybind/pybind11/pull/3671>`_
* ``py::multiple_inheritance`` is now only needed when C++ bases are hidden
from pybind11.
`#3650 <https://github.com/pybind/pybind11/pull/3650>`_ and
`#3659 <https://github.com/pybind/pybind11/pull/3659>`_
Bug fixes:
* Remove a boolean cast in ``numpy.h`` that causes MSVC C4800 warnings when
compiling against Python 3.10 or newer.
`#3669 <https://github.com/pybind/pybind11/pull/3669>`_
* Render ``py::bool_`` and ``py::float_`` as ``bool`` and ``float``
respectively.
`#3622 <https://github.com/pybind/pybind11/pull/3622>`_
Build system improvements:
* Fix CMake extension suffix computation on Python 3.10+.
`#3663 <https://github.com/pybind/pybind11/pull/3663>`_
* Allow ``CMAKE_ARGS`` to override CMake args in pybind11's own ``setup.py``.
`#3577 <https://github.com/pybind/pybind11/pull/3577>`_
* Remove a few deprecated c-headers.
`#3610 <https://github.com/pybind/pybind11/pull/3610>`_
* More uniform handling of test targets.
`#3590 <https://github.com/pybind/pybind11/pull/3590>`_
* Add clang-tidy readability check to catch potentially swapped function args.
`#3611 <https://github.com/pybind/pybind11/pull/3611>`_
Version 2.9.0 (Dec 28, 2021)
----------------------------
This is the last version to support Python 2.7 and 3.5.
New Features:
* Allow ``py::args`` to be followed by other arguments; the remaining arguments
are implicitly keyword-only, as if a ``py::kw_only{}`` annotation had been
used.
`#3402 <https://github.com/pybind/pybind11/pull/3402>`_
Changes:
* Make str/bytes/memoryview more interoperable with ``std::string_view``.
`#3521 <https://github.com/pybind/pybind11/pull/3521>`_
* Replace ``_`` with ``const_name`` in internals, avoid defining ``pybind::_``
if ``_`` defined as macro (common gettext usage)
`#3423 <https://github.com/pybind/pybind11/pull/3423>`_
Bug fixes:
* Fix a rare warning about extra copy in an Eigen constructor.
`#3486 <https://github.com/pybind/pybind11/pull/3486>`_
* Fix caching of the C++ overrides.
`#3465 <https://github.com/pybind/pybind11/pull/3465>`_
* Add missing ``std::forward`` calls to some ``cpp_function`` overloads.
`#3443 <https://github.com/pybind/pybind11/pull/3443>`_
* Support PyPy 7.3.7 and the PyPy3.8 beta. Test python-3.11 on PRs with the
``python dev`` label.
`#3419 <https://github.com/pybind/pybind11/pull/3419>`_
* Replace usage of deprecated ``Eigen::MappedSparseMatrix`` with
``Eigen::Map<Eigen::SparseMatrix<...>>`` for Eigen 3.3+.
`#3499 <https://github.com/pybind/pybind11/pull/3499>`_
* Tweaks to support Microsoft Visual Studio 2022.
`#3497 <https://github.com/pybind/pybind11/pull/3497>`_
Build system improvements:
* Nicer CMake printout and IDE organisation for pybind11's own tests.
`#3479 <https://github.com/pybind/pybind11/pull/3479>`_
* CMake: report version type as part of the version string to avoid a spurious
space in the package status message.
`#3472 <https://github.com/pybind/pybind11/pull/3472>`_
* Flags starting with ``-g`` in ``$CFLAGS`` and ``$CPPFLAGS`` are no longer
overridden by ``.Pybind11Extension``.
`#3436 <https://github.com/pybind/pybind11/pull/3436>`_
* Ensure ThreadPool is closed in ``setup_helpers``.
`#3548 <https://github.com/pybind/pybind11/pull/3548>`_
* Avoid LTS on ``mips64`` and ``ppc64le`` (reported broken).
`#3557 <https://github.com/pybind/pybind11/pull/3557>`_
v2.8.1 (Oct 27, 2021)
---------------------
Changes and additions:
* The simple namespace creation shortcut added in 2.8.0 was deprecated due to
usage of CPython internal API, and will be removed soon. Use
``py::module_::import("types").attr("SimpleNamespace")``.
`#3374 <https://github.com/pybinyyd/pybind11/pull/3374>`_
* Add C++ Exception type to throw and catch ``AttributeError``. Useful for
defining custom ``__setattr__`` and ``__getattr__`` methods.
`#3387 <https://github.com/pybind/pybind11/pull/3387>`_
Fixes:
* Fixed the potential for dangling references when using properties with
``std::optional`` types.
`#3376 <https://github.com/pybind/pybind11/pull/3376>`_
* Modernize usage of ``PyCodeObject`` on Python 3.9+ (moving toward support for
Python 3.11a1)
`#3368 <https://github.com/pybind/pybind11/pull/3368>`_
* A long-standing bug in ``eigen.h`` was fixed (originally PR #3343). The bug
was unmasked by newly added ``static_assert``'s in the Eigen 3.4.0 release.
`#3352 <https://github.com/pybind/pybind11/pull/3352>`_
* Support multiple raw inclusion of CMake helper files (Conan.io does this for
multi-config generators).
`#3420 <https://github.com/pybind/pybind11/pull/3420>`_
* Fix harmless warning on upcoming CMake 3.22.
`#3368 <https://github.com/pybind/pybind11/pull/3368>`_
* Fix 2.8.0 regression with MSVC 2017 + C++17 mode + Python 3.
`#3407 <https://github.com/pybind/pybind11/pull/3407>`_
* Fix 2.8.0 regression that caused undefined behavior (typically
segfaults) in ``make_key_iterator``/``make_value_iterator`` if dereferencing
the iterator returned a temporary value instead of a reference.
`#3348 <https://github.com/pybind/pybind11/pull/3348>`_
v2.8.0 (Oct 4, 2021)
--------------------
New features:
* Added ``py::raise_from`` to enable chaining exceptions.
`#3215 <https://github.com/pybind/pybind11/pull/3215>`_
* Allow exception translators to be optionally registered local to a module
instead of applying globally across all pybind11 modules. Use
``register_local_exception_translator(ExceptionTranslator&& translator)``
instead of ``register_exception_translator(ExceptionTranslator&&
translator)`` to keep your exception remapping code local to the module.
`#2650 <https://github.com/pybinyyd/pybind11/pull/2650>`_
* Add ``make_simple_namespace`` function for instantiating Python
``SimpleNamespace`` objects. **Deprecated in 2.8.1.**
`#2840 <https://github.com/pybind/pybind11/pull/2840>`_
* ``pybind11::scoped_interpreter`` and ``initialize_interpreter`` have new
arguments to allow ``sys.argv`` initialization.
`#2341 <https://github.com/pybind/pybind11/pull/2341>`_
* Allow Python builtins to be used as callbacks in CPython.
`#1413 <https://github.com/pybind/pybind11/pull/1413>`_
* Added ``view`` to view arrays with a different datatype.
`#987 <https://github.com/pybind/pybind11/pull/987>`_
* Implemented ``reshape`` on arrays.
`#984 <https://github.com/pybind/pybind11/pull/984>`_
* Enable defining custom ``__new__`` methods on classes by fixing bug
preventing overriding methods if they have non-pybind11 siblings.
`#3265 <https://github.com/pybind/pybind11/pull/3265>`_
* Add ``make_value_iterator()``, and fix ``make_key_iterator()`` to return
references instead of copies.
`#3293 <https://github.com/pybind/pybind11/pull/3293>`_
* Improve the classes generated by ``bind_map``: `#3310 <https://github.com/pybind/pybind11/pull/3310>`_
* Change ``.items`` from an iterator to a dictionary view.
* Add ``.keys`` and ``.values`` (both dictionary views).
* Allow ``__contains__`` to take any object.
* ``pybind11::custom_type_setup`` was added, for customizing the
``PyHeapTypeObject`` corresponding to a class, which may be useful for
enabling garbage collection support, among other things.
`#3287 <https://github.com/pybind/pybind11/pull/3287>`_
Changes:
* Set ``__file__`` constant when running ``eval_file`` in an embedded interpreter.
`#3233 <https://github.com/pybind/pybind11/pull/3233>`_
* Python objects and (C++17) ``std::optional`` now accepted in ``py::slice``
constructor.
`#1101 <https://github.com/pybind/pybind11/pull/1101>`_
* The pybind11 proxy types ``str``, ``bytes``, ``bytearray``, ``tuple``,
``list`` now consistently support passing ``ssize_t`` values for sizes and
indexes. Previously, only ``size_t`` was accepted in several interfaces.
`#3219 <https://github.com/pybind/pybind11/pull/3219>`_
* Avoid evaluating ``PYBIND11_TLS_REPLACE_VALUE`` arguments more than once.
`#3290 <https://github.com/pybind/pybind11/pull/3290>`_
Fixes:
* Bug fix: enum value's ``__int__`` returning non-int when underlying type is
bool or of char type.
`#1334 <https://github.com/pybind/pybind11/pull/1334>`_
* Fixes bug in setting error state in Capsule's pointer methods.
`#3261 <https://github.com/pybind/pybind11/pull/3261>`_
* A long-standing memory leak in ``py::cpp_function::initialize`` was fixed.
`#3229 <https://github.com/pybind/pybind11/pull/3229>`_
* Fixes thread safety for some ``pybind11::type_caster`` which require lifetime
extension, such as for ``std::string_view``.
`#3237 <https://github.com/pybind/pybind11/pull/3237>`_
* Restore compatibility with gcc 4.8.4 as distributed by ubuntu-trusty, linuxmint-17.
`#3270 <https://github.com/pybind/pybind11/pull/3270>`_
Build system improvements:
* Fix regression in CMake Python package config: improper use of absolute path.
`#3144 <https://github.com/pybind/pybind11/pull/3144>`_
* Cached Python version information could become stale when CMake was re-run
with a different Python version. The build system now detects this and
updates this information.
`#3299 <https://github.com/pybind/pybind11/pull/3299>`_
* Specified UTF8-encoding in setup.py calls of open().
`#3137 <https://github.com/pybind/pybind11/pull/3137>`_
* Fix a harmless warning from CMake 3.21 with the classic Python discovery.
`#3220 <https://github.com/pybind/pybind11/pull/3220>`_
* Eigen repo and version can now be specified as cmake options.
`#3324 <https://github.com/pybind/pybind11/pull/3324>`_
Backend and tidying up:
* Reduced thread-local storage required for keeping alive temporary data for
type conversion to one key per ABI version, rather than one key per extension
module. This makes the total thread-local storage required by pybind11 2
keys per ABI version.
`#3275 <https://github.com/pybind/pybind11/pull/3275>`_
* Optimize NumPy array construction with additional moves.
`#3183 <https://github.com/pybind/pybind11/pull/3183>`_
* Conversion to ``std::string`` and ``std::string_view`` now avoids making an
extra copy of the data on Python >= 3.3.
`#3257 <https://github.com/pybind/pybind11/pull/3257>`_
* Remove const modifier from certain C++ methods on Python collections
(``list``, ``set``, ``dict``) such as (``clear()``, ``append()``,
``insert()``, etc...) and annotated them with ``py-non-const``.
* Enable readability ``clang-tidy-const-return`` and remove useless consts.
`#3254 <https://github.com/pybind/pybind11/pull/3254>`_
`#3194 <https://github.com/pybind/pybind11/pull/3194>`_
* The clang-tidy ``google-explicit-constructor`` option was enabled.
`#3250 <https://github.com/pybind/pybind11/pull/3250>`_
* Mark a pytype move constructor as noexcept (perf).
`#3236 <https://github.com/pybind/pybind11/pull/3236>`_
* Enable clang-tidy check to guard against inheritance slicing.
`#3210 <https://github.com/pybind/pybind11/pull/3210>`_
* Legacy warning suppression pragma were removed from eigen.h. On Unix
platforms, please use -isystem for Eigen include directories, to suppress
compiler warnings originating from Eigen headers. Note that CMake does this
by default. No adjustments are needed for Windows.
`#3198 <https://github.com/pybind/pybind11/pull/3198>`_
* Format pybind11 with isort consistent ordering of imports
`#3195 <https://github.com/pybind/pybind11/pull/3195>`_
* The warnings-suppression "pragma clamp" at the top/bottom of pybind11 was
removed, clearing the path to refactoring and IWYU cleanup.
`#3186 <https://github.com/pybind/pybind11/pull/3186>`_
* Enable most bugprone checks in clang-tidy and fix the found potential bugs
and poor coding styles.
`#3166 <https://github.com/pybind/pybind11/pull/3166>`_
* Add ``clang-tidy-readability`` rules to make boolean casts explicit improving
code readability. Also enabled other misc and readability clang-tidy checks.
`#3148 <https://github.com/pybind/pybind11/pull/3148>`_
* Move object in ``.pop()`` for list.
`#3116 <https://github.com/pybind/pybind11/pull/3116>`_
v2.7.1 (Aug 3, 2021)
---------------------
Minor missing functionality added:
* Allow Python builtins to be used as callbacks in CPython.
`#1413 <https://github.com/pybind/pybind11/pull/1413>`_
Bug fixes:
* Fix regression in CMake Python package config: improper use of absolute path.
`#3144 <https://github.com/pybind/pybind11/pull/3144>`_
* Fix Mingw64 and add to the CI testing matrix.
`#3132 <https://github.com/pybind/pybind11/pull/3132>`_
* Specified UTF8-encoding in setup.py calls of open().
`#3137 <https://github.com/pybind/pybind11/pull/3137>`_
* Add clang-tidy-readability rules to make boolean casts explicit improving
code readability. Also enabled other misc and readability clang-tidy checks.
`#3148 <https://github.com/pybind/pybind11/pull/3148>`_
* Move object in ``.pop()`` for list.
`#3116 <https://github.com/pybind/pybind11/pull/3116>`_
Backend and tidying up:
* Removed and fixed warning suppressions.
`#3127 <https://github.com/pybind/pybind11/pull/3127>`_
`#3129 <https://github.com/pybind/pybind11/pull/3129>`_
`#3135 <https://github.com/pybind/pybind11/pull/3135>`_
`#3141 <https://github.com/pybind/pybind11/pull/3141>`_
`#3142 <https://github.com/pybind/pybind11/pull/3142>`_
`#3150 <https://github.com/pybind/pybind11/pull/3150>`_
`#3152 <https://github.com/pybind/pybind11/pull/3152>`_
`#3160 <https://github.com/pybind/pybind11/pull/3160>`_
`#3161 <https://github.com/pybind/pybind11/pull/3161>`_
v2.7.0 (Jul 16, 2021)
---------------------
New features:
* Enable ``py::implicitly_convertible<py::none, ...>`` for
``py::class_``-wrapped types.
`#3059 <https://github.com/pybind/pybind11/pull/3059>`_
* Allow function pointer extraction from overloaded functions.
`#2944 <https://github.com/pybind/pybind11/pull/2944>`_
* NumPy: added ``.char_()`` to type which gives the NumPy public ``char``
result, which also distinguishes types by bit length (unlike ``.kind()``).
`#2864 <https://github.com/pybind/pybind11/pull/2864>`_
* Add ``pybind11::bytearray`` to manipulate ``bytearray`` similar to ``bytes``.
`#2799 <https://github.com/pybind/pybind11/pull/2799>`_
* ``pybind11/stl/filesystem.h`` registers a type caster that, on C++17/Python
3.6+, converts ``std::filesystem::path`` to ``pathlib.Path`` and any
``os.PathLike`` to ``std::filesystem::path``.
`#2730 <https://github.com/pybind/pybind11/pull/2730>`_
* A ``PYBIND11_VERSION_HEX`` define was added, similar to ``PY_VERSION_HEX``.
`#3120 <https://github.com/pybind/pybind11/pull/3120>`_
Changes:
* ``py::str`` changed to exclusively hold ``PyUnicodeObject``. Previously
``py::str`` could also hold ``bytes``, which is probably surprising, was
never documented, and can mask bugs (e.g. accidental use of ``py::str``
instead of ``py::bytes``).
`#2409 <https://github.com/pybind/pybind11/pull/2409>`_
* Add a safety guard to ensure that the Python GIL is held when C++ calls back
into Python via ``object_api<>::operator()`` (e.g. ``py::function``
``__call__``). (This feature is available for Python 3.6+ only.)
`#2919 <https://github.com/pybind/pybind11/pull/2919>`_
* Catch a missing ``self`` argument in calls to ``__init__()``.
`#2914 <https://github.com/pybind/pybind11/pull/2914>`_
* Use ``std::string_view`` if available to avoid a copy when passing an object
to a ``std::ostream``.
`#3042 <https://github.com/pybind/pybind11/pull/3042>`_
* An important warning about thread safety was added to the ``iostream.h``
documentation; attempts to make ``py::scoped_ostream_redirect`` thread safe
have been removed, as it was only partially effective.
`#2995 <https://github.com/pybind/pybind11/pull/2995>`_
Fixes:
* Performance: avoid unnecessary strlen calls.
`#3058 <https://github.com/pybind/pybind11/pull/3058>`_
* Fix auto-generated documentation string when using ``const T`` in
``pyarray_t``.
`#3020 <https://github.com/pybind/pybind11/pull/3020>`_
* Unify error messages thrown by ``simple_collector``/``unpacking_collector``.
`#3013 <https://github.com/pybind/pybind11/pull/3013>`_
* ``pybind11::builtin_exception`` is now explicitly exported, which means the
types included/defined in different modules are identical, and exceptions
raised in different modules can be caught correctly. The documentation was
updated to explain that custom exceptions that are used across module
boundaries need to be explicitly exported as well.
`#2999 <https://github.com/pybind/pybind11/pull/2999>`_
* Fixed exception when printing UTF-8 to a ``scoped_ostream_redirect``.
`#2982 <https://github.com/pybind/pybind11/pull/2982>`_
* Pickle support enhancement: ``setstate`` implementation will attempt to
``setattr`` ``__dict__`` only if the unpickled ``dict`` object is not empty,
to not force use of ``py::dynamic_attr()`` unnecessarily.
`#2972 <https://github.com/pybind/pybind11/pull/2972>`_
* Allow negative timedelta values to roundtrip.
`#2870 <https://github.com/pybind/pybind11/pull/2870>`_
* Fix unchecked errors could potentially swallow signals/other exceptions.
`#2863 <https://github.com/pybind/pybind11/pull/2863>`_
* Add null pointer check with ``std::localtime``.
`#2846 <https://github.com/pybind/pybind11/pull/2846>`_
* Fix the ``weakref`` constructor from ``py::object`` to create a new
``weakref`` on conversion.
`#2832 <https://github.com/pybind/pybind11/pull/2832>`_
* Avoid relying on exceptions in C++17 when getting a ``shared_ptr`` holder
from a ``shared_from_this`` class.
`#2819 <https://github.com/pybind/pybind11/pull/2819>`_
* Allow the codec's exception to be raised instead of :code:`RuntimeError` when
casting from :code:`py::str` to :code:`std::string`.
`#2903 <https://github.com/pybind/pybind11/pull/2903>`_
Build system improvements:
* In ``setup_helpers.py``, test for platforms that have some multiprocessing
features but lack semaphores, which ``ParallelCompile`` requires.
`#3043 <https://github.com/pybind/pybind11/pull/3043>`_
* Fix ``pybind11_INCLUDE_DIR`` in case ``CMAKE_INSTALL_INCLUDEDIR`` is
absolute.
`#3005 <https://github.com/pybind/pybind11/pull/3005>`_
* Fix bug not respecting ``WITH_SOABI`` or ``WITHOUT_SOABI`` to CMake.
`#2938 <https://github.com/pybind/pybind11/pull/2938>`_
* Fix the default ``Pybind11Extension`` compilation flags with a Mingw64 python.
`#2921 <https://github.com/pybind/pybind11/pull/2921>`_
* Clang on Windows: do not pass ``/MP`` (ignored flag).
`#2824 <https://github.com/pybind/pybind11/pull/2824>`_
* ``pybind11.setup_helpers.intree_extensions`` can be used to generate
``Pybind11Extension`` instances from cpp files placed in the Python package
source tree.
`#2831 <https://github.com/pybind/pybind11/pull/2831>`_
Backend and tidying up:
* Enable clang-tidy performance, readability, and modernization checks
throughout the codebase to enforce best coding practices.
`#3046 <https://github.com/pybind/pybind11/pull/3046>`_,
`#3049 <https://github.com/pybind/pybind11/pull/3049>`_,
`#3051 <https://github.com/pybind/pybind11/pull/3051>`_,
`#3052 <https://github.com/pybind/pybind11/pull/3052>`_,
`#3080 <https://github.com/pybind/pybind11/pull/3080>`_, and
`#3094 <https://github.com/pybind/pybind11/pull/3094>`_
* Checks for common misspellings were added to the pre-commit hooks.
`#3076 <https://github.com/pybind/pybind11/pull/3076>`_
* Changed ``Werror`` to stricter ``Werror-all`` for Intel compiler and fixed
minor issues.
`#2948 <https://github.com/pybind/pybind11/pull/2948>`_
* Fixed compilation with GCC < 5 when the user defines ``_GLIBCXX_USE_CXX11_ABI``.
`#2956 <https://github.com/pybind/pybind11/pull/2956>`_
* Added nox support for easier local testing and linting of contributions.
`#3101 <https://github.com/pybind/pybind11/pull/3101>`_ and
`#3121 <https://github.com/pybind/pybind11/pull/3121>`_
* Avoid RTD style issue with docutils 0.17+.
`#3119 <https://github.com/pybind/pybind11/pull/3119>`_
* Support pipx run, such as ``pipx run pybind11 --include`` for a quick compile.
`#3117 <https://github.com/pybind/pybind11/pull/3117>`_
v2.6.2 (Jan 26, 2021)
---------------------
Minor missing functionality added:
* enum: add missing Enum.value property.
`#2739 <https://github.com/pybind/pybind11/pull/2739>`_
* Allow thread termination to be avoided during shutdown for CPython 3.7+ via
``.disarm`` for ``gil_scoped_acquire``/``gil_scoped_release``.
`#2657 <https://github.com/pybind/pybind11/pull/2657>`_
Fixed or improved behavior in a few special cases:
* Fix bug where the constructor of ``object`` subclasses would not throw on
being passed a Python object of the wrong type.
`#2701 <https://github.com/pybind/pybind11/pull/2701>`_
* The ``type_caster`` for integers does not convert Python objects with
``__int__`` anymore with ``noconvert`` or during the first round of trying
overloads.
`#2698 <https://github.com/pybind/pybind11/pull/2698>`_
* When casting to a C++ integer, ``__index__`` is always called and not
considered as conversion, consistent with Python 3.8+.
`#2801 <https://github.com/pybind/pybind11/pull/2801>`_
Build improvements:
* Setup helpers: ``extra_compile_args`` and ``extra_link_args`` automatically set by
Pybind11Extension are now prepended, which allows them to be overridden
by user-set ``extra_compile_args`` and ``extra_link_args``.
`#2808 <https://github.com/pybind/pybind11/pull/2808>`_
* Setup helpers: Don't trigger unused parameter warning.
`#2735 <https://github.com/pybind/pybind11/pull/2735>`_
* CMake: Support running with ``--warn-uninitialized`` active.
`#2806 <https://github.com/pybind/pybind11/pull/2806>`_
* CMake: Avoid error if included from two submodule directories.
`#2804 <https://github.com/pybind/pybind11/pull/2804>`_
* CMake: Fix ``STATIC`` / ``SHARED`` being ignored in FindPython mode.
`#2796 <https://github.com/pybind/pybind11/pull/2796>`_
* CMake: Respect the setting for ``CMAKE_CXX_VISIBILITY_PRESET`` if defined.
`#2793 <https://github.com/pybind/pybind11/pull/2793>`_
* CMake: Fix issue with FindPython2/FindPython3 not working with ``pybind11::embed``.
`#2662 <https://github.com/pybind/pybind11/pull/2662>`_
* CMake: mixing local and installed pybind11's would prioritize the installed
one over the local one (regression in 2.6.0).
`#2716 <https://github.com/pybind/pybind11/pull/2716>`_
Bug fixes:
* Fixed segfault in multithreaded environments when using
``scoped_ostream_redirect``.
`#2675 <https://github.com/pybind/pybind11/pull/2675>`_
* Leave docstring unset when all docstring-related options are disabled, rather
than set an empty string.
`#2745 <https://github.com/pybind/pybind11/pull/2745>`_
* The module key in builtins that pybind11 uses to store its internals changed
from std::string to a python str type (more natural on Python 2, no change on
Python 3).
`#2814 <https://github.com/pybind/pybind11/pull/2814>`_
* Fixed assertion error related to unhandled (later overwritten) exception in
CPython 3.8 and 3.9 debug builds.
`#2685 <https://github.com/pybind/pybind11/pull/2685>`_
* Fix ``py::gil_scoped_acquire`` assert with CPython 3.9 debug build.
`#2683 <https://github.com/pybind/pybind11/pull/2683>`_
* Fix issue with a test failing on pytest 6.2.
`#2741 <https://github.com/pybind/pybind11/pull/2741>`_
Warning fixes:
* Fix warning modifying constructor parameter 'flag' that shadows a field of
'set_flag' ``[-Wshadow-field-in-constructor-modified]``.
`#2780 <https://github.com/pybind/pybind11/pull/2780>`_
* Suppressed some deprecation warnings about old-style
``__init__``/``__setstate__`` in the tests.
`#2759 <https://github.com/pybind/pybind11/pull/2759>`_
Valgrind work:
* Fix invalid access when calling a pybind11 ``__init__`` on a non-pybind11
class instance.
`#2755 <https://github.com/pybind/pybind11/pull/2755>`_
* Fixed various minor memory leaks in pybind11's test suite.
`#2758 <https://github.com/pybind/pybind11/pull/2758>`_
* Resolved memory leak in cpp_function initialization when exceptions occurred.
`#2756 <https://github.com/pybind/pybind11/pull/2756>`_
* Added a Valgrind build, checking for leaks and memory-related UB, to CI.
`#2746 <https://github.com/pybind/pybind11/pull/2746>`_
Compiler support:
* Intel compiler was not activating C++14 support due to a broken define.
`#2679 <https://github.com/pybind/pybind11/pull/2679>`_
* Support ICC and NVIDIA HPC SDK in C++17 mode.
`#2729 <https://github.com/pybind/pybind11/pull/2729>`_
* Support Intel OneAPI compiler (ICC 20.2) and add to CI.
`#2573 <https://github.com/pybind/pybind11/pull/2573>`_
v2.6.1 (Nov 11, 2020)
---------------------
* ``py::exec``, ``py::eval``, and ``py::eval_file`` now add the builtins module
as ``"__builtins__"`` to their ``globals`` argument, better matching ``exec``
and ``eval`` in pure Python.
`#2616 <https://github.com/pybind/pybind11/pull/2616>`_
* ``setup_helpers`` will no longer set a minimum macOS version higher than the
current version.
`#2622 <https://github.com/pybind/pybind11/pull/2622>`_
* Allow deleting static properties.
`#2629 <https://github.com/pybind/pybind11/pull/2629>`_
* Seal a leak in ``def_buffer``, cleaning up the ``capture`` object after the
``class_`` object goes out of scope.
`#2634 <https://github.com/pybind/pybind11/pull/2634>`_
* ``pybind11_INCLUDE_DIRS`` was incorrect, potentially causing a regression if
it was expected to include ``PYTHON_INCLUDE_DIRS`` (please use targets
instead).
`#2636 <https://github.com/pybind/pybind11/pull/2636>`_
* Added parameter names to the ``py::enum_`` constructor and methods, avoiding
``arg0`` in the generated docstrings.
`#2637 <https://github.com/pybind/pybind11/pull/2637>`_
* Added ``needs_recompile`` optional function to the ``ParallelCompiler``
helper, to allow a recompile to be skipped based on a user-defined function.
`#2643 <https://github.com/pybind/pybind11/pull/2643>`_
v2.6.0 (Oct 21, 2020)
---------------------
See :ref:`upgrade-guide-2.6` for help upgrading to the new version.
* Provide an additional spelling of ``py::module`` - ``py::module_`` (with a
trailing underscore), for C++20 compatibility. Only relevant when used
unqualified.
`#2489 <https://github.com/pybind/pybind11/pull/2489>`_
* ``pybind11_add_module()`` now accepts an optional ``OPT_SIZE`` flag that
switches the binding target to size-based optimization regardless global
CMake build type (except in debug mode, where optimizations remain disabled).
This reduces binary size quite substantially (~25%).
`#2463 <https://github.com/pybind/pybind11/pull/2463>`_
New features:
* Keyword-only arguments supported in Python 2 or 3 with ``py::kw_only()``.
`#2100 <https://github.com/pybind/pybind11/pull/2100>`_
@ -28,11 +704,17 @@ See :ref:`upgrade-guide-2.6` for help upgrading to the new version.
* Positional-only arguments supported in Python 2 or 3 with ``py::pos_only()``.
`#2459 <https://github.com/pybind/pybind11/pull/2459>`_
* ``py::is_final()`` class modifier to block subclassing (CPython only).
`#2151 <https://github.com/pybind/pybind11/pull/2151>`_
* Added ``py::prepend()``, allowing a function to be placed at the beginning of
the overload chain.
`#1131 <https://github.com/pybind/pybind11/pull/1131>`_
* Access to the type object now provided with ``py::type::of<T>()`` and
``py::type::of(h)``.
`#2364 <https://github.com/pybind/pybind11/pull/2364>`_
* Perfect forwarding support for methods.
`#2048 <https://github.com/pybind/pybind11/pull/2048>`_
@ -42,11 +724,48 @@ See :ref:`upgrade-guide-2.6` for help upgrading to the new version.
* ``py::hash`` is now public.
`#2217 <https://github.com/pybind/pybind11/pull/2217>`_
* ``py::is_final()`` class modifier to block subclassing (CPython only).
`#2151 <https://github.com/pybind/pybind11/pull/2151>`_
* ``py::class_<union_type>`` is now supported. Note that writing to one data
member of the union and reading another (type punning) is UB in C++. Thus
pybind11-bound enums should never be used for such conversions.
`#2320 <https://github.com/pybind/pybind11/pull/2320>`_.
* ``py::memoryview`` update and documentation.
`#2223 <https://github.com/pybind/pybind11/pull/2223>`_
* Classes now check local scope when registering members, allowing a subclass
to have a member with the same name as a parent (such as an enum).
`#2335 <https://github.com/pybind/pybind11/pull/2335>`_
Code correctness features:
* Error now thrown when ``__init__`` is forgotten on subclasses.
`#2152 <https://github.com/pybind/pybind11/pull/2152>`_
* Throw error if conversion to a pybind11 type if the Python object isn't a
valid instance of that type, such as ``py::bytes(o)`` when ``py::object o``
isn't a bytes instance.
`#2349 <https://github.com/pybind/pybind11/pull/2349>`_
* Throw if conversion to ``str`` fails.
`#2477 <https://github.com/pybind/pybind11/pull/2477>`_
API changes:
* ``py::module`` was renamed ``py::module_`` to avoid issues with C++20 when
used unqualified, but an alias ``py::module`` is provided for backward
compatibility.
`#2489 <https://github.com/pybind/pybind11/pull/2489>`_
* Public constructors for ``py::module_`` have been deprecated; please use
``pybind11::module_::create_extension_module`` if you were using the public
constructor (fairly rare after ``PYBIND11_MODULE`` was introduced).
`#2552 <https://github.com/pybind/pybind11/pull/2552>`_
* ``PYBIND11_OVERLOAD*`` macros and ``get_overload`` function replaced by
correctly-named ``PYBIND11_OVERRIDE*`` and ``get_override``, fixing
inconsistencies in the presence of a closing ``;`` in these macros.
``get_type_overload`` is deprecated.
`#2325 <https://github.com/pybind/pybind11/pull/2325>`_
Packaging / building improvements:
* The Python package was reworked to be more powerful and useful.
`#2433 <https://github.com/pybind/pybind11/pull/2433>`_
@ -54,7 +773,7 @@ See :ref:`upgrade-guide-2.6` for help upgrading to the new version.
* :ref:`build-setuptools` is easier thanks to a new
``pybind11.setup_helpers`` module, which provides utilities to use
setuptools with pybind11. It can be used via PEP 518, ``setup_requires``,
or by directly copying ``setup_helpers.py`` into your project.
or by directly importing or copying ``setup_helpers.py`` into your project.
* CMake configuration files are now included in the Python package. Use
``pybind11.get_cmake_dir()`` or ``python -m pybind11 --cmakedir`` to get
@ -62,17 +781,21 @@ See :ref:`upgrade-guide-2.6` for help upgrading to the new version.
site-packages location in your ``CMAKE_MODULE_PATH``. Or you can use the
new ``pybind11[global]`` extra when you install ``pybind11``, which
installs the CMake files and headers into your base environment in the
standard location
standard location.
* ``pybind11-config`` is another way to write ``python -m pybind11`` if you
have your PATH set up.
* Added external typing support to the helper module, code from
``import pybind11`` can now be type checked.
`#2588 <https://github.com/pybind/pybind11/pull/2588>`_
* Minimum CMake required increased to 3.4.
`#2338 <https://github.com/pybind/pybind11/pull/2338>`_ and
`#2370 <https://github.com/pybind/pybind11/pull/2370>`_
* Full integration with CMakes C++ standard system replaces
``PYBIND11_CPP_STANDARD``.
* Full integration with CMakes C++ standard system and compile features
replaces ``PYBIND11_CPP_STANDARD``.
* Generated config file is now portable to different Python/compiler/CMake
versions.
@ -85,27 +808,36 @@ See :ref:`upgrade-guide-2.6` for help upgrading to the new version.
``CMAKE_INTERPROCEDURAL_OPTIMIZATION``, ``set(CMAKE_CXX_VISIBILITY_PRESET
hidden)``.
* Optional :ref:`find-python-mode` and :ref:`nopython-mode` with CMake.
`#2370 <https://github.com/pybind/pybind11/pull/2370>`_
* ``CUDA`` as a language is now supported.
* Helper functions ``pybind11_strip``, ``pybind11_extension``,
``pybind11_find_import`` added, see :doc:`cmake/index`.
* Optional :ref:`find-python-mode` and :ref:`nopython-mode` with CMake.
`#2370 <https://github.com/pybind/pybind11/pull/2370>`_
* Uninstall target added.
`#2265 <https://github.com/pybind/pybind11/pull/2265>`_ and
`#2346 <https://github.com/pybind/pybind11/pull/2346>`_
* ``PYBIND11_OVERLOAD*`` macros and ``get_overload`` function replaced by
correctly-named ``PYBIND11_OVERRIDE*`` and ``get_override``, fixing
inconsistencies in the presene of a closing ``;`` in these macros.
``get_type_overload`` is deprecated.
`#2325 <https://github.com/pybind/pybind11/pull/2325>`_
* ``pybind11_add_module()`` now accepts an optional ``OPT_SIZE`` flag that
switches the binding target to size-based optimization if the global build
type can not always be fixed to ``MinSizeRel`` (except in debug mode, where
optimizations remain disabled). ``MinSizeRel`` or this flag reduces binary
size quite substantially (~25% on some platforms).
`#2463 <https://github.com/pybind/pybind11/pull/2463>`_
Smaller or developer focused features:
Smaller or developer focused features and fixes:
* Moved ``mkdoc.py`` to a new repo, `pybind11-mkdoc`_.
* Moved ``mkdoc.py`` to a new repo, `pybind11-mkdoc`_. There are no longer
submodules in the main repo.
.. _pybind11-mkdoc: https://github.com/pybind/pybind11-mkdoc
* ``py::memoryview`` segfault fix and update, with new
``py::memoryview::from_memory`` in Python 3, and documentation.
`#2223 <https://github.com/pybind/pybind11/pull/2223>`_
* Error now thrown when ``__init__`` is forgotten on subclasses.
`#2152 <https://github.com/pybind/pybind11/pull/2152>`_
* Fix for ``buffer_info`` on Python 2.
`#2503 <https://github.com/pybind/pybind11/pull/2503>`_
* If ``__eq__`` defined but not ``__hash__``, ``__hash__`` is now set to
``None``.
@ -114,12 +846,6 @@ Smaller or developer focused features:
* ``py::ellipsis`` now also works on Python 2.
`#2360 <https://github.com/pybind/pybind11/pull/2360>`_
* Throw if conversion to ``str`` fails.
`#2477 <https://github.com/pybind/pybind11/pull/2477>`_
* Added missing signature for ``py::array``.
`#2363 <https://github.com/pybind/pybind11/pull/2363>`_
* Pointer to ``std::tuple`` & ``std::pair`` supported in cast.
`#2334 <https://github.com/pybind/pybind11/pull/2334>`_
@ -127,7 +853,26 @@ Smaller or developer focused features:
argument type.
`#2293 <https://github.com/pybind/pybind11/pull/2293>`_
* Bugfixes related to more extensive testing
* Added missing signature for ``py::array``.
`#2363 <https://github.com/pybind/pybind11/pull/2363>`_
* ``unchecked_mutable_reference`` has access to operator ``()`` and ``[]`` when
const.
`#2514 <https://github.com/pybind/pybind11/pull/2514>`_
* ``py::vectorize`` is now supported on functions that return void.
`#1969 <https://github.com/pybind/pybind11/pull/1969>`_
* ``py::capsule`` supports ``get_pointer`` and ``set_pointer``.
`#1131 <https://github.com/pybind/pybind11/pull/1131>`_
* Fix crash when different instances share the same pointer of the same type.
`#2252 <https://github.com/pybind/pybind11/pull/2252>`_
* Fix for ``py::len`` not clearing Python's error state when it fails and throws.
`#2575 <https://github.com/pybind/pybind11/pull/2575>`_
* Bugfixes related to more extensive testing, new GitHub Actions CI.
`#2321 <https://github.com/pybind/pybind11/pull/2321>`_
* Bug in timezone issue in Eastern hemisphere midnight fixed.
@ -141,16 +886,27 @@ Smaller or developer focused features:
requested ordering.
`#2484 <https://github.com/pybind/pybind11/pull/2484>`_
* PyPy fixes, including support for PyPy3 and PyPy 7.
* Avoid a segfault on some compilers when types are removed in Python.
`#2564 <https://github.com/pybind/pybind11/pull/2564>`_
* ``py::arg::none()`` is now also respected when passing keyword arguments.
`#2611 <https://github.com/pybind/pybind11/pull/2611>`_
* PyPy fixes, PyPy 7.3.x now supported, including PyPy3. (Known issue with
PyPy2 and Windows `#2596 <https://github.com/pybind/pybind11/issues/2596>`_).
`#2146 <https://github.com/pybind/pybind11/pull/2146>`_
* CPython 3.9 fixes.
* CPython 3.9.0 workaround for undefined behavior (macOS segfault).
`#2576 <https://github.com/pybind/pybind11/pull/2576>`_
* CPython 3.9 warning fixes.
`#2253 <https://github.com/pybind/pybind11/pull/2253>`_
* More C++20 support.
* Improved C++20 support, now tested in CI.
`#2489 <https://github.com/pybind/pybind11/pull/2489>`_
`#2599 <https://github.com/pybind/pybind11/pull/2599>`_
* Debug Python interpreter support.
* Improved but still incomplete debug Python interpreter support.
`#2025 <https://github.com/pybind/pybind11/pull/2025>`_
* NVCC (CUDA 11) now supported and tested in CI.
@ -159,11 +915,20 @@ Smaller or developer focused features:
* NVIDIA PGI compilers now supported and tested in CI.
`#2475 <https://github.com/pybind/pybind11/pull/2475>`_
* Extensive style checking in CI, with `pre-commit`_ support.
* At least Intel 18 now explicitly required when compiling with Intel.
`#2577 <https://github.com/pybind/pybind11/pull/2577>`_
* Extensive style checking in CI, with `pre-commit`_ support. Code
modernization, checked by clang-tidy.
* Expanded docs, including new main page, new installing section, and CMake
helpers page, along with over a dozen new sections on existing pages.
* In GitHub, new docs for contributing and new issue templates.
.. _pre-commit: https://pre-commit.com
.. _pybind11-mkdoc: https://github.com/pybind/pybind11-mkdoc
v2.5.0 (Mar 31, 2020)
-----------------------------------------------------
@ -261,7 +1026,7 @@ v2.4.0 (Sep 19, 2019)
`#1888 <https://github.com/pybind/pybind11/pull/1888>`_.
* ``py::details::overload_cast_impl`` is available in C++11 mode, can be used
like ``overload_cast`` with an additional set of parantheses.
like ``overload_cast`` with an additional set of parentheses.
`#1581 <https://github.com/pybind/pybind11/pull/1581>`_.
* Fixed ``get_include()`` on Conda.
@ -520,7 +1285,7 @@ v2.2.2 (February 7, 2018)
v2.2.1 (September 14, 2017)
-----------------------------------------------------
* Added ``py::module::reload()`` member function for reloading a module.
* Added ``py::module_::reload()`` member function for reloading a module.
`#1040 <https://github.com/pybind/pybind11/pull/1040>`_.
* Fixed a reference leak in the number converter.
@ -583,6 +1348,7 @@ v2.2.0 (August 31, 2017)
from cpp_module import CppBase1, CppBase2
class PyDerived(CppBase1, CppBase2):
def __init__(self):
CppBase1.__init__(self) # C++ bases must be initialized explicitly
@ -795,7 +1561,7 @@ v2.2.0 (August 31, 2017)
* Intel C++ compiler compatibility fixes.
`#937 <https://github.com/pybind/pybind11/pull/937>`_.
* Fixed implicit conversion of `py::enum_` to integer types on Python 2.7.
* Fixed implicit conversion of ``py::enum_`` to integer types on Python 2.7.
`#821 <https://github.com/pybind/pybind11/pull/821>`_.
* Added ``py::hash`` to fetch the hash value of Python objects, and

View File

@ -44,12 +44,12 @@ interactive Python session demonstrating this example is shown below:
% python
>>> import example
>>> p = example.Pet('Molly')
>>> p = example.Pet("Molly")
>>> print(p)
<example.Pet object at 0x10cd98060>
>>> p.getName()
u'Molly'
>>> p.setName('Charly')
>>> p.setName("Charly")
>>> p.getName()
u'Charly'
@ -122,10 +122,10 @@ This makes it possible to write
.. code-block:: pycon
>>> p = example.Pet('Molly')
>>> p = example.Pet("Molly")
>>> p.name
u'Molly'
>>> p.name = 'Charly'
>>> p.name = "Charly"
>>> p.name
u'Charly'
@ -174,10 +174,10 @@ Native Python classes can pick up new attributes dynamically:
.. code-block:: pycon
>>> class Pet:
... name = 'Molly'
... name = "Molly"
...
>>> p = Pet()
>>> p.name = 'Charly' # overwrite existing
>>> p.name = "Charly" # overwrite existing
>>> p.age = 2 # dynamically add a new attribute
By default, classes exported from C++ do not support this and the only writable
@ -195,7 +195,7 @@ Trying to set any other attribute results in an error:
.. code-block:: pycon
>>> p = example.Pet()
>>> p.name = 'Charly' # OK, attribute defined in C++
>>> p.name = "Charly" # OK, attribute defined in C++
>>> p.age = 2 # fail
AttributeError: 'Pet' object has no attribute 'age'
@ -213,7 +213,7 @@ Now everything works as expected:
.. code-block:: pycon
>>> p = example.Pet()
>>> p.name = 'Charly' # OK, overwrite value in C++
>>> p.name = "Charly" # OK, overwrite value in C++
>>> p.age = 2 # OK, dynamically add a new attribute
>>> p.__dict__ # just like a native Python class
{'age': 2}
@ -280,7 +280,7 @@ expose fields and methods of both types:
.. code-block:: pycon
>>> p = example.Dog('Molly')
>>> p = example.Dog("Molly")
>>> p.name
u'Molly'
>>> p.bark()
@ -446,8 +446,7 @@ you can use ``py::detail::overload_cast_impl`` with an additional set of parenth
Enumerations and internal types
===============================
Let's now suppose that the example class contains an internal enumeration type,
e.g.:
Let's now suppose that the example class contains internal types like enumerations, e.g.:
.. code-block:: cpp
@ -457,10 +456,15 @@ e.g.:
Cat
};
struct Attributes {
float age = 0;
};
Pet(const std::string &name, Kind type) : name(name), type(type) { }
std::string name;
Kind type;
Attributes attr;
};
The binding code for this example looks as follows:
@ -471,22 +475,28 @@ The binding code for this example looks as follows:
pet.def(py::init<const std::string &, Pet::Kind>())
.def_readwrite("name", &Pet::name)
.def_readwrite("type", &Pet::type);
.def_readwrite("type", &Pet::type)
.def_readwrite("attr", &Pet::attr);
py::enum_<Pet::Kind>(pet, "Kind")
.value("Dog", Pet::Kind::Dog)
.value("Cat", Pet::Kind::Cat)
.export_values();
To ensure that the ``Kind`` type is created within the scope of ``Pet``, the
``pet`` :class:`class_` instance must be supplied to the :class:`enum_`.
py::class_<Pet::Attributes> attributes(pet, "Attributes")
.def(py::init<>())
.def_readwrite("age", &Pet::Attributes::age);
To ensure that the nested types ``Kind`` and ``Attributes`` are created within the scope of ``Pet``, the
``pet`` :class:`class_` instance must be supplied to the :class:`enum_` and :class:`class_`
constructor. The :func:`enum_::export_values` function exports the enum entries
into the parent scope, which should be skipped for newer C++11-style strongly
typed enums.
.. code-block:: pycon
>>> p = Pet('Lucy', Pet.Cat)
>>> p = Pet("Lucy", Pet.Cat)
>>> p.type
Kind.Cat
>>> int(p.type)
@ -508,7 +518,7 @@ The ``name`` property returns the name of the enum value as a unicode string.
.. code-block:: pycon
>>> p = Pet( "Lucy", Pet.Cat )
>>> p = Pet("Lucy", Pet.Cat)
>>> pet_type = p.type
>>> pet_type
Pet.Cat

View File

@ -0,0 +1,8 @@
CMake helpers
-------------
Pybind11 can be used with ``add_subdirectory(extern/pybind11)``, or from an
install with ``find_package(pybind11 CONFIG)``. The interface provided in
either case is functionally identical.
.. cmake-module:: ../../tools/pybind11Config.cmake.in

View File

@ -31,20 +31,18 @@ An example of a ``setup.py`` using pybind11's helpers:
.. code-block:: python
from glob import glob
from setuptools import setup
from pybind11.setup_helpers import Pybind11Extension
ext_modules = [
Pybind11Extension(
"python_example",
["src/main.cpp"],
sorted(glob("src/*.cpp")), # Sort source files for reproducibility
),
]
setup(
...,
ext_modules=ext_modules
)
setup(..., ext_modules=ext_modules)
If you want to do an automatic search for the highest supported C++ standard,
that is supported via a ``build_ext`` command override; it will only affect
@ -52,21 +50,81 @@ that is supported via a ``build_ext`` command override; it will only affect
.. code-block:: python
from glob import glob
from setuptools import setup
from pybind11.setup_helpers import Pybind11Extension, build_ext
ext_modules = [
Pybind11Extension(
"python_example",
["src/main.cpp"],
sorted(glob("src/*.cpp")),
),
]
setup(
...,
cmdclass={"build_ext": build_ext},
ext_modules=ext_modules
)
setup(..., cmdclass={"build_ext": build_ext}, ext_modules=ext_modules)
If you have single-file extension modules that are directly stored in the
Python source tree (``foo.cpp`` in the same directory as where a ``foo.py``
would be located), you can also generate ``Pybind11Extensions`` using
``setup_helpers.intree_extensions``: ``intree_extensions(["path/to/foo.cpp",
...])`` returns a list of ``Pybind11Extensions`` which can be passed to
``ext_modules``, possibly after further customizing their attributes
(``libraries``, ``include_dirs``, etc.). By doing so, a ``foo.*.so`` extension
module will be generated and made available upon installation.
``intree_extension`` will automatically detect if you are using a ``src``-style
layout (as long as no namespace packages are involved), but you can also
explicitly pass ``package_dir`` to it (as in ``setuptools.setup``).
Since pybind11 does not require NumPy when building, a light-weight replacement
for NumPy's parallel compilation distutils tool is included. Use it like this:
.. code-block:: python
from pybind11.setup_helpers import ParallelCompile
# Optional multithreaded build
ParallelCompile("NPY_NUM_BUILD_JOBS").install()
setup(...)
The argument is the name of an environment variable to control the number of
threads, such as ``NPY_NUM_BUILD_JOBS`` (as used by NumPy), though you can set
something different if you want; ``CMAKE_BUILD_PARALLEL_LEVEL`` is another choice
a user might expect. You can also pass ``default=N`` to set the default number
of threads (0 will take the number of threads available) and ``max=N``, the
maximum number of threads; if you have a large extension you may want set this
to a memory dependent number.
If you are developing rapidly and have a lot of C++ files, you may want to
avoid rebuilding files that have not changed. For simple cases were you are
using ``pip install -e .`` and do not have local headers, you can skip the
rebuild if an object file is newer than its source (headers are not checked!)
with the following:
.. code-block:: python
from pybind11.setup_helpers import ParallelCompile, naive_recompile
ParallelCompile("NPY_NUM_BUILD_JOBS", needs_recompile=naive_recompile).install()
If you have a more complex build, you can implement a smarter function and pass
it to ``needs_recompile``, or you can use [Ccache]_ instead. ``CXX="cache g++"
pip install -e .`` would be the way to use it with GCC, for example. Unlike the
simple solution, this even works even when not compiling in editable mode, but
it does require Ccache to be installed.
Keep in mind that Pip will not even attempt to rebuild if it thinks it has
already built a copy of your code, which it deduces from the version number.
One way to avoid this is to use [setuptools_scm]_, which will generate a
version number that includes the number of commits since your last tag and a
hash for a dirty directory. Another way to force a rebuild is purge your cache
or use Pip's ``--no-cache-dir`` option.
.. [Ccache] https://ccache.dev
.. [setuptools_scm] https://github.com/pypa/setuptools_scm
.. _setup_helpers-pep518:
@ -85,7 +143,7 @@ Your ``pyproject.toml`` file will likely look something like this:
.. code-block:: toml
[build-system]
requires = ["setuptools", "wheel", "pybind11==2.6.0"]
requires = ["setuptools>=42", "wheel", "pybind11~=2.6.1"]
build-backend = "setuptools.build_meta"
.. note::
@ -96,10 +154,12 @@ Your ``pyproject.toml`` file will likely look something like this:
in Python) using something like `cibuildwheel`_, remember that ``setup.py``
and ``pyproject.toml`` are not even contained in the wheel, so this high
Pip requirement is only for source builds, and will not affect users of
your binary wheels.
your binary wheels. If you are building SDists and wheels, then
`pypa-build`_ is the recommended official tool.
.. _PEP 517: https://www.python.org/dev/peps/pep-0517/
.. _cibuildwheel: https://cibuildwheel.readthedocs.io
.. _pypa-build: https://pypa-build.readthedocs.io/en/latest/
.. _setup_helpers-setup_requires:
@ -140,6 +200,23 @@ this, you will need to import from a local file in ``setup.py`` and ensure the
helper file is part of your MANIFEST.
Closely related, if you include pybind11 as a subproject, you can run the
``setup_helpers.py`` inplace. If loaded correctly, this should even pick up
the correct include for pybind11, though you can turn it off as shown above if
you want to input it manually.
Suggested usage if you have pybind11 as a submodule in ``extern/pybind11``:
.. code-block:: python
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(DIR, "extern", "pybind11"))
from pybind11.setup_helpers import Pybind11Extension # noqa: E402
del sys.path[-1]
.. versionchanged:: 2.6
Added ``setup_helpers`` file.
@ -184,6 +261,8 @@ PyPI integration, can be found in the [cmake_example]_ repository.
.. versionchanged:: 2.6
CMake 3.4+ is required.
Further information can be found at :doc:`cmake/index`.
pybind11_add_module
-------------------
@ -224,8 +303,15 @@ As stated above, LTO is enabled by default. Some newer compilers also support
different flavors of LTO such as `ThinLTO`_. Setting ``THIN_LTO`` will cause
the function to prefer this flavor if available. The function falls back to
regular LTO if ``-flto=thin`` is not available. If
``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` is set (either ON or OFF), then that
will be respected instead of the built-in flag search.
``CMAKE_INTERPROCEDURAL_OPTIMIZATION`` is set (either ``ON`` or ``OFF``), then
that will be respected instead of the built-in flag search.
.. note::
If you want to set the property form on targets or the
``CMAKE_INTERPROCEDURAL_OPTIMIZATION_<CONFIG>`` versions of this, you should
still use ``set(CMAKE_INTERPROCEDURAL_OPTIMIZATION OFF)`` (otherwise a
no-op) to disable pybind11's ipo flags.
The ``OPT_SIZE`` flag enables size-based optimization equivalent to the
standard ``/Os`` or ``-Os`` compiler flags and the ``MinSizeRel`` build type,
@ -252,10 +338,9 @@ standard explicitly with
.. code-block:: cmake
set(CMAKE_CXX_STANDARD 14) # or 11, 14, 17, 20
set(CMAKE_CXX_STANDARD 14 CACHE STRING "C++ version selection") # or 11, 14, 17, 20
set(CMAKE_CXX_STANDARD_REQUIRED ON) # optional, ensure standard is supported
set(CMAKE_CXX_EXTENSIONS OFF) # optional, keep compiler extensionsn off
set(CMAKE_CXX_EXTENSIONS OFF) # optional, keep compiler extensions off
The variables can also be set when calling CMake from the command line using
the ``-D<variable>=<value>`` flag. You can also manually set ``CXX_STANDARD``
@ -325,13 +410,14 @@ can refer to the same [cmake_example]_ repository for a full sample project
FindPython mode
---------------
CMake 3.12+ (3.15+ recommended) added a new module called FindPython that had a
highly improved search algorithm and modern targets and tools. If you use
FindPython, pybind11 will detect this and use the existing targets instead:
CMake 3.12+ (3.15+ recommended, 3.18.2+ ideal) added a new module called
FindPython that had a highly improved search algorithm and modern targets
and tools. If you use FindPython, pybind11 will detect this and use the
existing targets instead:
.. code-block:: cmake
cmake_minumum_required(VERSION 3.15...3.18)
cmake_minimum_required(VERSION 3.15...3.19)
project(example LANGUAGES CXX)
find_package(Python COMPONENTS Interpreter Development REQUIRED)
@ -357,6 +443,14 @@ setting ``Python_ROOT_DIR`` may be the most common one (though with
virtualenv/venv support, and Conda support, this tends to find the correct
Python version more often than the old system did).
.. warning::
When the Python libraries (i.e. ``libpythonXX.a`` and ``libpythonXX.so``
on Unix) are not available, as is the case on a manylinux image, the
``Development`` component will not be resolved by ``FindPython``. When not
using the embedding functionality, CMake 3.18+ allows you to specify
``Development.Module`` instead of ``Development`` to resolve this issue.
.. versionadded:: 2.6
Advanced: interface library targets
@ -428,7 +522,7 @@ Instead of setting properties, you can set ``CMAKE_*`` variables to initialize t
compiler flags are provided to ensure high quality code generation. In
contrast to the ``pybind11_add_module()`` command, the CMake interface
provides a *composable* set of targets to ensure that you retain flexibility.
It can be expecially important to provide or set these properties; the
It can be especially important to provide or set these properties; the
:ref:`FAQ <faq:symhidden>` contains an explanation on why these are needed.
.. versionadded:: 2.6
@ -481,7 +575,7 @@ On Linux, you can compile an example such as the one given in
.. code-block:: bash
$ c++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`
$ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix)
The flags given here assume that you're using Python 3. For Python 2, just
change the executable appropriately (to ``python`` or ``python2``).
@ -493,7 +587,7 @@ using ``pip`` or ``conda``. If it hasn't, you can also manually specify
``python3-config --includes``.
Note that Python 2.7 modules don't use a special suffix, so you should simply
use ``example.so`` instead of ``example`python3-config --extension-suffix```.
use ``example.so`` instead of ``example$(python3-config --extension-suffix)``.
Besides, the ``--extension-suffix`` option may or may not be available, depending
on the distribution; in the latter case, the module extension can be manually
set to ``.so``.
@ -504,7 +598,7 @@ building the module:
.. code-block:: bash
$ c++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup `python3 -m pybind11 --includes` example.cpp -o example`python3-config --extension-suffix`
$ c++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup $(python3 -m pybind11 --includes) example.cpp -o example$(python3-config --extension-suffix)
In general, it is advisable to include several additional build parameters
that can considerably reduce the size of the created binary. Refer to section
@ -523,23 +617,11 @@ build system that works on all platforms including Windows.
contains one (which will lead to a segfault).
Building with vcpkg
Building with Bazel
===================
You can download and install pybind11 using the Microsoft `vcpkg
<https://github.com/Microsoft/vcpkg/>`_ dependency manager:
.. code-block:: bash
git clone https://github.com/Microsoft/vcpkg.git
cd vcpkg
./bootstrap-vcpkg.sh
./vcpkg integrate install
vcpkg install pybind11
The pybind11 port in vcpkg is kept up to date by Microsoft team members and
community contributors. If the version is out of date, please `create an issue
or pull request <https://github.com/Microsoft/vcpkg/>`_ on the vcpkg
repository.
You can build with the Bazel build system using the `pybind11_bazel
<https://github.com/pybind/pybind11_bazel>`_ repository.
Generating binding code automatically
=====================================

View File

@ -13,57 +13,68 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import re
import subprocess
import sys
from pathlib import Path
DIR = Path(__file__).parent.resolve()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe']
extensions = [
"breathe",
"sphinxcontrib.rsvgconverter",
"sphinxcontrib.moderncmakedomain",
]
breathe_projects = {'pybind11': '.build/doxygenxml/'}
breathe_default_project = 'pybind11'
breathe_domain_by_extension = {'h': 'cpp'}
breathe_projects = {"pybind11": ".build/doxygenxml/"}
breathe_default_project = "pybind11"
breathe_domain_by_extension = {"h": "cpp"}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
templates_path = [".templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# General information about the project.
project = 'pybind11'
copyright = '2017, Wenzel Jakob'
author = 'Wenzel Jakob'
project = "pybind11"
copyright = "2017, Wenzel Jakob"
author = "Wenzel Jakob"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.5'
# Read the listed version
with open("../pybind11/_version.py") as f:
code = compile(f.read(), "../pybind11/_version.py", "exec")
loc = {}
exec(code, loc)
# The full version, including alpha/beta/rc tags.
release = '2.5.dev1'
version = loc["__version__"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@ -74,37 +85,37 @@ language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build', 'release.rst']
exclude_patterns = [".build", "release.rst"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'monokai'
# pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
@ -115,141 +126,150 @@ todo_include_todos = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_context = {
'css_files': [
'_static/theme_overrides.css'
]
}
html_context = {"css_files": ["_static/theme_overrides.css"]}
else:
html_context = {
'css_files': [
'//media.readthedocs.org/css/sphinx_rtd_theme.css',
'//media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css'
"css_files": [
"//media.readthedocs.org/css/sphinx_rtd_theme.css",
"//media.readthedocs.org/css/readthedocs-doc-embed.css",
"_static/theme_overrides.css",
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# "<project> v<version> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pybind11doc'
htmlhelp_basename = "pybind11doc"
# -- Options for LaTeX output ---------------------------------------------
latex_engine = "pdflatex"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
#
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
#
# Additional stuff for the LaTeX preamble.
# remove blank pages (between the title page and the TOC, etc.)
"classoptions": ",openany,oneside",
"preamble": r"""
\usepackage{fontawesome}
\usepackage{textgreek}
\DeclareUnicodeCharacter{00A0}{}
\DeclareUnicodeCharacter{2194}{\faArrowsH}
\DeclareUnicodeCharacter{1F382}{\faBirthdayCake}
\DeclareUnicodeCharacter{1F355}{\faAdjust}
\DeclareUnicodeCharacter{0301}{'}
\DeclareUnicodeCharacter{03C0}{\textpi}
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r'\DeclareUnicodeCharacter{00A0}{}',
# Latex figure (float) alignment
#'figure_align': 'htbp',
""",
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pybind11.tex', 'pybind11 Documentation',
'Wenzel Jakob', 'manual'),
(master_doc, "pybind11.tex", "pybind11 Documentation", "Wenzel Jakob", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
@ -258,32 +278,29 @@ latex_documents = [
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pybind11', 'pybind11 Documentation',
[author], 1)
]
man_pages = [(master_doc, "pybind11", "pybind11 Documentation", [author], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
@ -292,41 +309,73 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pybind11', 'pybind11 Documentation',
author, 'pybind11', 'One line description of project.',
'Miscellaneous'),
(
master_doc,
"pybind11",
"pybind11 Documentation",
author,
"pybind11",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# texinfo_no_detailmenu = False
primary_domain = 'cpp'
highlight_language = 'cpp'
primary_domain = "cpp"
highlight_language = "cpp"
def generate_doxygen_xml(app):
build_dir = os.path.join(app.confdir, '.build')
build_dir = os.path.join(app.confdir, ".build")
if not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
subprocess.call(['doxygen', '--version'])
retcode = subprocess.call(['doxygen'], cwd=app.confdir)
subprocess.call(["doxygen", "--version"])
retcode = subprocess.call(["doxygen"], cwd=app.confdir)
if retcode < 0:
sys.stderr.write("doxygen error code: {}\n".format(-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: {}\n".format(e))
def prepare(app):
with open(DIR.parent / "README.rst") as f:
contents = f.read()
if app.builder.name == "latex":
# Remove badges and stuff from start
contents = contents[contents.find(r".. start") :]
# Filter out section titles for index.rst for LaTeX
contents = re.sub(r"^(.*)\n[-~]{3,}$", r"**\1**", contents, flags=re.MULTILINE)
with open(DIR / "readme.rst", "w") as f:
f.write(contents)
def clean_up(app, exception):
(DIR / "readme.rst").unlink()
def setup(app):
"""Add hook for building doxygen xml when needed"""
# Add hook for building doxygen xml when needed
app.connect("builder-inited", generate_doxygen_xml)
# Copy the readme in
app.connect("builder-inited", prepare)
# Clean up the generated readme
app.connect("build-finished", clean_up)

View File

@ -5,7 +5,7 @@ Frequently asked questions
===========================================================
1. Make sure that the name specified in PYBIND11_MODULE is identical to the
filename of the extension library (without suffixes such as .so)
filename of the extension library (without suffixes such as ``.so``).
2. If the above did not fix the issue, you are likely using an incompatible
version of Python (for instance, the extension library was compiled against
@ -27,18 +27,6 @@ The Python interpreter immediately crashes when importing my module
See the first answer.
CMake doesn't detect the right Python version
=============================================
The CMake-based build system will try to automatically detect the installed
version of Python and link against that. When this fails, or when there are
multiple versions of Python and it finds the wrong one, delete
``CMakeCache.txt`` and then invoke CMake as follows:
.. code-block:: bash
cmake -DPYTHON_EXECUTABLE:FILEPATH=<path-to-python-executable> .
.. _faq_reference_arguments:
Limitations involving reference arguments
@ -66,7 +54,7 @@ provided by the caller -- in fact, it does nothing at all.
.. code-block:: python
def increment(i):
i += 1 # nope..
i += 1 # nope..
pybind11 is also affected by such language-level conventions, which means that
binding ``increment`` or ``increment_ptr`` will also create Python functions
@ -100,8 +88,8 @@ following example:
.. code-block:: cpp
void init_ex1(py::module &);
void init_ex2(py::module &);
void init_ex1(py::module_ &);
void init_ex2(py::module_ &);
/* ... */
PYBIND11_MODULE(example, m) {
@ -114,7 +102,7 @@ following example:
.. code-block:: cpp
void init_ex1(py::module &m) {
void init_ex1(py::module_ &m) {
m.def("add", [](int a, int b) { return a + b; });
}
@ -122,7 +110,7 @@ following example:
.. code-block:: cpp
void init_ex2(py::module &m) {
void init_ex2(py::module_ &m) {
m.def("sub", [](int a, int b) { return a - b; });
}
@ -181,8 +169,8 @@ can be changed, but even if it isn't it is not always enough to guarantee
complete independence of the symbols involved when not using
``-fvisibility=hidden``.
Additionally, ``-fvisiblity=hidden`` can deliver considerably binary size
savings. (See the following section for more details).
Additionally, ``-fvisibility=hidden`` can deliver considerably binary size
savings. (See the following section for more details.)
.. _`faq:symhidden`:
@ -192,7 +180,7 @@ How can I create smaller binaries?
To do its job, pybind11 extensively relies on a programming technique known as
*template metaprogramming*, which is a way of performing computation at compile
time using type information. Template metaprogamming usually instantiates code
time using type information. Template metaprogramming usually instantiates code
involving significant numbers of deeply nested types that are either completely
removed or reduced to just a few instructions during the compiler's optimization
phase. However, due to the nested nature of these types, the resulting symbol
@ -275,17 +263,34 @@ been received, you must either explicitly interrupt execution by throwing
});
}
CMake doesn't detect the right Python version
=============================================
The CMake-based build system will try to automatically detect the installed
version of Python and link against that. When this fails, or when there are
multiple versions of Python and it finds the wrong one, delete
``CMakeCache.txt`` and then add ``-DPYTHON_EXECUTABLE=$(which python)`` to your
CMake configure line. (Replace ``$(which python)`` with a path to python if
your prefer.)
You can alternatively try ``-DPYBIND11_FINDPYTHON=ON``, which will activate the
new CMake FindPython support instead of pybind11's custom search. Requires
CMake 3.12+, and 3.15+ or 3.18.2+ are even better. You can set this in your
``CMakeLists.txt`` before adding or finding pybind11, as well.
Inconsistent detection of Python version in CMake and pybind11
==============================================================
The functions ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` provided by CMake
for Python version detection are not used by pybind11 due to unreliability and limitations that make
them unsuitable for pybind11's needs. Instead pybind provides its own, more reliable Python detection
CMake code. Conflicts can arise, however, when using pybind11 in a project that *also* uses the CMake
Python detection in a system with several Python versions installed.
The functions ``find_package(PythonInterp)`` and ``find_package(PythonLibs)``
provided by CMake for Python version detection are modified by pybind11 due to
unreliability and limitations that make them unsuitable for pybind11's needs.
Instead pybind11 provides its own, more reliable Python detection CMake code.
Conflicts can arise, however, when using pybind11 in a project that *also* uses
the CMake Python detection in a system with several Python versions installed.
This difference may cause inconsistencies and errors if *both* mechanisms are used in the same project. Consider the following
CMake code executed in a system with Python 2.7 and 3.x installed:
This difference may cause inconsistencies and errors if *both* mechanisms are
used in the same project. Consider the following CMake code executed in a
system with Python 2.7 and 3.x installed:
.. code-block:: cmake
@ -303,10 +308,24 @@ In contrast this code:
find_package(PythonInterp)
find_package(PythonLibs)
will detect Python 3.x for pybind11 and may crash on ``find_package(PythonLibs)`` afterwards.
will detect Python 3.x for pybind11 and may crash on
``find_package(PythonLibs)`` afterwards.
It is advised to avoid using ``find_package(PythonInterp)`` and ``find_package(PythonLibs)`` from CMake and rely
on pybind11 in detecting Python version. If this is not possible CMake machinery should be called *before* including pybind11.
There are three possible solutions:
1. Avoid using ``find_package(PythonInterp)`` and ``find_package(PythonLibs)``
from CMake and rely on pybind11 in detecting Python version. If this is not
possible, the CMake machinery should be called *before* including pybind11.
2. Set ``PYBIND11_FINDPYTHON`` to ``True`` or use ``find_package(Python
COMPONENTS Interpreter Development)`` on modern CMake (3.12+, 3.15+ better,
3.18.2+ best). Pybind11 in these cases uses the new CMake FindPython instead
of the old, deprecated search tools, and these modules are much better at
finding the correct Python.
3. Set ``PYBIND11_NOPYTHON`` to ``TRUE``. Pybind11 will not search for Python.
However, you will have to use the target-based system, and do more setup
yourself, because it does not know about or include things that depend on
Python, like ``pybind11_add_module``. This might be ideal for integrating
into an existing system, like scikit-build's Python helpers.
How to cite this project?
=========================

View File

@ -1,18 +1,17 @@
.. only: not latex
.. only:: latex
.. image:: pybind11-logo.png
Intro
=====
pybind11 --- Seamless operability between C++11 and Python
==========================================================
.. include:: readme.rst
.. only: not latex
.. only:: not latex
Contents:
.. toctree::
:maxdepth: 1
intro
changelog
upgrade
@ -20,6 +19,7 @@ pybind11 --- Seamless operability between C++11 and Python
:caption: The Basics
:maxdepth: 2
installing
basics
classes
compiling
@ -45,3 +45,4 @@ pybind11 --- Seamless operability between C++11 and Python
benchmark
limitations
reference
cmake/index

Some files were not shown because too many files have changed in this diff Show More