From 1ed651b1a2bc537d5c7f249f8d30bdd1e42ff8e5 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Fri, 18 Sep 2020 23:14:07 -0700 Subject: [PATCH 01/38] wrap MFAS --- gtsam/gtsam.i | 21 +++++++++++++++++++++ gtsam/sfm/MFAS.h | 2 ++ python/CMakeLists.txt | 7 +++++-- python/gtsam/specializations.h | 1 + 4 files changed, 29 insertions(+), 2 deletions(-) diff --git a/gtsam/gtsam.i b/gtsam/gtsam.i index 52f5901ee..649d80ae3 100644 --- a/gtsam/gtsam.i +++ b/gtsam/gtsam.i @@ -3105,6 +3105,27 @@ class ShonanAveraging3 { pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; }; +#include + +class KeyPairDoubleMap { + KeyPairDoubleMap(); + KeyPairDoubleMap(const gtsam::KeyPairDoubleMap& other); + + size_t size() const; + bool empty() const; + void clear(); + size_t at(pair) const; +}; + +class MFAS { + MFAS(const KeyVector*& nodes, + const gtsam::BinaryMeasurementsUnit3& relativeTranslations, + const gtsam::Unit3& projectionDirection); + + KeyPairDoubleMap computeOutlierWeights() const; + KeyVector computeOrdering() const; +}; + #include class TranslationRecovery { TranslationRecovery(const gtsam::BinaryMeasurementsUnit3 &relativeTranslations, diff --git a/gtsam/sfm/MFAS.h b/gtsam/sfm/MFAS.h index 929aa5ff0..67a7df219 100644 --- a/gtsam/sfm/MFAS.h +++ b/gtsam/sfm/MFAS.h @@ -108,4 +108,6 @@ class MFAS { std::map computeOutlierWeights() const; }; +typedef std::map, double> KeyPairDoubleMap; + } // namespace gtsam diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index bec02fb64..00b537340 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -37,7 +37,8 @@ set(ignore gtsam::Point2Vector gtsam::Pose3Vector gtsam::KeyVector - gtsam::BinaryMeasurementsUnit3) + gtsam::BinaryMeasurementsUnit3 + gtsam::KeyPairDoubleMap) pybind_wrap(gtsam_py # target ${PROJECT_SOURCE_DIR}/gtsam/gtsam.i # interface_header @@ -80,7 +81,9 @@ set(ignore gtsam::Pose3Vector gtsam::KeyVector gtsam::FixedLagSmootherKeyTimestampMapValue - gtsam::BinaryMeasurementsUnit3) + gtsam::BinaryMeasurementsUnit3 + gtsam::KeyPairDoubleMap) + pybind_wrap(gtsam_unstable_py # target ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header "gtsam_unstable.cpp" # generated_cpp diff --git a/python/gtsam/specializations.h b/python/gtsam/specializations.h index 3f6b8fa38..cacad874c 100644 --- a/python/gtsam/specializations.h +++ b/python/gtsam/specializations.h @@ -12,3 +12,4 @@ py::bind_vector py::bind_vector > >(m_, "BinaryMeasurementsUnit3"); py::bind_map(m_, "IndexPairSetMap"); py::bind_vector(m_, "IndexPairVector"); +py::bind_map(m_, "KeyPairDoubleMap"); From 6c14605ed0860f648ef73321a434033d98ed0796 Mon Sep 17 00:00:00 2001 From: Akshay Krishnan Date: Sat, 19 Sep 2020 08:36:49 +0000 Subject: [PATCH 02/38] changing to boost shared_ptr --- gtsam/gtsam.i | 8 ++++---- gtsam/sfm/MFAS.cpp | 4 +++- gtsam/sfm/MFAS.h | 8 +++++--- gtsam/sfm/tests/testMFAS.cpp | 10 +++++++--- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/gtsam/gtsam.i b/gtsam/gtsam.i index 649d80ae3..df486e19e 100644 --- a/gtsam/gtsam.i +++ b/gtsam/gtsam.i @@ -3114,16 +3114,16 @@ class KeyPairDoubleMap { size_t size() const; bool empty() const; void clear(); - size_t at(pair) const; + size_t at(const pair& keypair) const; }; class MFAS { - MFAS(const KeyVector*& nodes, + MFAS(const gtsam::KeyVector* nodes, const gtsam::BinaryMeasurementsUnit3& relativeTranslations, const gtsam::Unit3& projectionDirection); - KeyPairDoubleMap computeOutlierWeights() const; - KeyVector computeOrdering() const; + gtsam::KeyPairDoubleMap computeOutlierWeights() const; + gtsam::KeyVector computeOrdering() const; }; #include diff --git a/gtsam/sfm/MFAS.cpp b/gtsam/sfm/MFAS.cpp index 6dccc2dee..0a407785b 100644 --- a/gtsam/sfm/MFAS.cpp +++ b/gtsam/sfm/MFAS.cpp @@ -7,6 +7,8 @@ #include +#include + #include #include #include @@ -111,7 +113,7 @@ void removeNodeFromGraph(const Key node, graph.erase(node); } -MFAS::MFAS(const std::shared_ptr>& nodes, +MFAS::MFAS(const boost::shared_ptr> nodes, const TranslationEdges& relativeTranslations, const Unit3& projectionDirection) : nodes_(nodes) { diff --git a/gtsam/sfm/MFAS.h b/gtsam/sfm/MFAS.h index 67a7df219..ca85d3248 100644 --- a/gtsam/sfm/MFAS.h +++ b/gtsam/sfm/MFAS.h @@ -22,6 +22,8 @@ #include #include +#include + #include #include #include @@ -57,7 +59,7 @@ class MFAS { private: // pointer to nodes in the graph - const std::shared_ptr> nodes_; + const boost::shared_ptr> nodes_; // edges with a direction such that all weights are positive // i.e, edges that originally had negative weights are flipped @@ -74,7 +76,7 @@ class MFAS { * @param nodes: Nodes in the graph * @param edgeWeights: weights of edges in the graph */ - MFAS(const std::shared_ptr> &nodes, + MFAS(const boost::shared_ptr> nodes, const std::map &edgeWeights) : nodes_(nodes), edgeWeights_(edgeWeights) {} @@ -88,7 +90,7 @@ class MFAS { * @param relativeTranslations translation directions between the cameras * @param projectionDirection direction in which edges are to be projected */ - MFAS(const std::shared_ptr> &nodes, + MFAS(const boost::shared_ptr> nodes, const TranslationEdges &relativeTranslations, const Unit3 &projectionDirection); diff --git a/gtsam/sfm/tests/testMFAS.cpp b/gtsam/sfm/tests/testMFAS.cpp index 58ea4cc84..53526cce1 100644 --- a/gtsam/sfm/tests/testMFAS.cpp +++ b/gtsam/sfm/tests/testMFAS.cpp @@ -6,9 +6,13 @@ */ #include -#include + +#include +#include #include +#include + using namespace std; using namespace gtsam; @@ -46,7 +50,7 @@ map getEdgeWeights(const vector &edges, // test the ordering and the outlierWeights function using weights2 - outlier // edge is rejected when projected in a direction that gives weights2 TEST(MFAS, OrderingWeights2) { - MFAS mfas_obj(make_shared>(nodes), getEdgeWeights(edges, weights2)); + MFAS mfas_obj(boost::make_shared>(nodes), getEdgeWeights(edges, weights2)); vector ordered_nodes = mfas_obj.computeOrdering(); @@ -76,7 +80,7 @@ TEST(MFAS, OrderingWeights2) { // weights1 (outlier edge is accepted when projected in a direction that // produces weights1) TEST(MFAS, OrderingWeights1) { - MFAS mfas_obj(make_shared>(nodes), getEdgeWeights(edges, weights1)); + MFAS mfas_obj(boost::make_shared>(nodes), getEdgeWeights(edges, weights1)); vector ordered_nodes = mfas_obj.computeOrdering(); From 1f5c6b8b4b712250e5e7261ae7ba68273d734741 Mon Sep 17 00:00:00 2001 From: Akshay Krishnan Date: Sun, 20 Sep 2020 20:33:37 +0000 Subject: [PATCH 03/38] remove unusede ptr member in MFAS --- gtsam/gtsam.i | 4 ++-- gtsam/sfm/MFAS.cpp | 6 ++---- gtsam/sfm/MFAS.h | 24 +++++------------------- gtsam/sfm/tests/testMFAS.cpp | 7 ++----- 4 files changed, 11 insertions(+), 30 deletions(-) diff --git a/gtsam/gtsam.i b/gtsam/gtsam.i index df486e19e..3a7bebaba 100644 --- a/gtsam/gtsam.i +++ b/gtsam/gtsam.i @@ -2972,6 +2972,7 @@ class BinaryMeasurement { size_t key1() const; size_t key2() const; T measured() const; + gtsam::noiseModel::Base* noiseModel() const; }; typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; @@ -3118,8 +3119,7 @@ class KeyPairDoubleMap { }; class MFAS { - MFAS(const gtsam::KeyVector* nodes, - const gtsam::BinaryMeasurementsUnit3& relativeTranslations, + MFAS(const gtsam::BinaryMeasurementsUnit3& relativeTranslations, const gtsam::Unit3& projectionDirection); gtsam::KeyPairDoubleMap computeOutlierWeights() const; diff --git a/gtsam/sfm/MFAS.cpp b/gtsam/sfm/MFAS.cpp index 0a407785b..bc66d0711 100644 --- a/gtsam/sfm/MFAS.cpp +++ b/gtsam/sfm/MFAS.cpp @@ -113,10 +113,8 @@ void removeNodeFromGraph(const Key node, graph.erase(node); } -MFAS::MFAS(const boost::shared_ptr> nodes, - const TranslationEdges& relativeTranslations, - const Unit3& projectionDirection) - : nodes_(nodes) { +MFAS::MFAS(const TranslationEdges& relativeTranslations, + const Unit3& projectionDirection) { // Iterate over edges, obtain weights by projecting // their relativeTranslations along the projection direction for (const auto& measurement : relativeTranslations) { diff --git a/gtsam/sfm/MFAS.h b/gtsam/sfm/MFAS.h index ca85d3248..3b01122a9 100644 --- a/gtsam/sfm/MFAS.h +++ b/gtsam/sfm/MFAS.h @@ -22,8 +22,6 @@ #include #include -#include - #include #include #include @@ -58,40 +56,28 @@ class MFAS { using TranslationEdges = std::vector>; private: - // pointer to nodes in the graph - const boost::shared_ptr> nodes_; - // edges with a direction such that all weights are positive // i.e, edges that originally had negative weights are flipped std::map edgeWeights_; public: /** - * @brief Construct from the nodes in a graph and weighted directed edges + * @brief Construct from the weighted directed edges * between the nodes. Each node is identified by a Key. - * A shared pointer to the nodes is used as input parameter - * because, MFAS ordering is usually used to compute the ordering of a - * large graph that is already stored in memory. It is unnecessary to make a - * copy of the nodes in this class. - * @param nodes: Nodes in the graph * @param edgeWeights: weights of edges in the graph */ - MFAS(const boost::shared_ptr> nodes, - const std::map &edgeWeights) - : nodes_(nodes), edgeWeights_(edgeWeights) {} + MFAS(const std::map &edgeWeights) + : edgeWeights_(edgeWeights) {} /** * @brief Constructor to be used in the context of translation averaging. * Here, the nodes of the graph are cameras in 3D and the edges have a unit * translation direction between them. The weights of the edges is computed by * projecting them along a projection direction. - * @param nodes cameras in the epipolar graph (each camera is identified by a - * Key) * @param relativeTranslations translation directions between the cameras * @param projectionDirection direction in which edges are to be projected */ - MFAS(const boost::shared_ptr> nodes, - const TranslationEdges &relativeTranslations, + MFAS(const TranslationEdges &relativeTranslations, const Unit3 &projectionDirection); /** @@ -101,7 +87,7 @@ class MFAS { std::vector computeOrdering() const; /** - * @brief Computes the "outlier weights" of the graph. We define the outlier + * @brief Computes the outlier weights of the graph. We define the outlier * weight of a edge to be zero if the edge is an inlier and the magnitude of * its edgeWeight if it is an outlier. This function internally calls * computeOrdering and uses the obtained ordering to identify outlier edges. diff --git a/gtsam/sfm/tests/testMFAS.cpp b/gtsam/sfm/tests/testMFAS.cpp index 53526cce1..b2daf0d2e 100644 --- a/gtsam/sfm/tests/testMFAS.cpp +++ b/gtsam/sfm/tests/testMFAS.cpp @@ -7,8 +7,6 @@ #include -#include -#include #include #include @@ -43,14 +41,13 @@ map getEdgeWeights(const vector &edges, for (size_t i = 0; i < edges.size(); i++) { edgeWeights[edges[i]] = weights[i]; } - cout << "returning edge weights " << edgeWeights.size() << endl; return edgeWeights; } // test the ordering and the outlierWeights function using weights2 - outlier // edge is rejected when projected in a direction that gives weights2 TEST(MFAS, OrderingWeights2) { - MFAS mfas_obj(boost::make_shared>(nodes), getEdgeWeights(edges, weights2)); + MFAS mfas_obj(getEdgeWeights(edges, weights2)); vector ordered_nodes = mfas_obj.computeOrdering(); @@ -80,7 +77,7 @@ TEST(MFAS, OrderingWeights2) { // weights1 (outlier edge is accepted when projected in a direction that // produces weights1) TEST(MFAS, OrderingWeights1) { - MFAS mfas_obj(boost::make_shared>(nodes), getEdgeWeights(edges, weights1)); + MFAS mfas_obj(getEdgeWeights(edges, weights1)); vector ordered_nodes = mfas_obj.computeOrdering(); From 565467f2ff48dcdd9a53f4e93c664ac7fa51b4b3 Mon Sep 17 00:00:00 2001 From: Akshay Krishnan Date: Sun, 20 Sep 2020 20:34:10 +0000 Subject: [PATCH 04/38] translation averaging example --- .../examples/TranslationAveragingExample.py | 93 +++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 python/gtsam/examples/TranslationAveragingExample.py diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py new file mode 100644 index 000000000..09d261d97 --- /dev/null +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -0,0 +1,93 @@ +from collections import Counter +import functools +import operator + +import numpy as np + +import gtsam +from gtsam.examples import SFMdata + +max_1dsfm_projection_directions = 50 +outlier_weight_threshold = 0.1 + +def get_data(): + """"Returns data from SfMData.createPoses(). This contains the global rotations and the unit translations directions.""" + # Using toy dataset in SfMdata for example. + poses = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) + rotations = gtsam.Values() + translation_directions = [] + for i in range(0, len(poses) - 2): + # Add the rotation + rotations.insert(i, poses[i].rotation()) + # Create unit translation measurements with next two poses + for j in range(i+1, i+3): + i_Z_j = gtsam.Unit3(poses[i].rotation().unrotate(poses[j].translation() - poses[i].translation())) + translation_directions.append(gtsam.BinaryMeasurementUnit3( + i, j, i_Z_j, gtsam.noiseModel.Isotropic.Sigma(3, 0.01))) + # Add the last two rotations. + rotations.insert(len(poses) - 1, poses[-1].rotation()) + rotations.insert(len(poses) - 2, poses[-2].rotation()) + return (rotations, translation_directions) + + +def estimate_poses_given_rot(measurements: gtsam.BinaryMeasurementsUnit3, + rotations: gtsam.Values): + """Estimate poses given normalized translation directions and rotations between nodes. + + Arguments: + measurements - List of translation direction from the first node to the second node in the coordinate frame of the first node. + rotations {Values} -- Estimated rotations + + Returns: + Values -- Estimated poses. + """ + + # Convert the translation directions to global frame using the rotations. + w_measurements = gtsam.BinaryMeasurementsUnit3() + for measurement in measurements: + w_measurements.append(gtsam.BinaryMeasurementUnit3(measurement.key1(), measurement.key2( + ), gtsam.Unit3(rotations.atRot3(measurement.key1()).rotate(measurement.measured().point3())), measurement.noiseModel())) + + # Indices of measurements that are to be used as projection directions. These are randomly chosen. + indices = np.random.choice(len(w_measurements), min( + max_1dsfm_projection_directions, len(w_measurements)), replace=False) + # Sample projection directions from the measurements. + projection_directions = [w_measurements[idx].measured() for idx in indices] + + outlier_weights = [] + # Find the outlier weights for each direction using MFAS. + for direction in projection_directions: + algorithm = gtsam.MFAS(w_measurements, direction) + outlier_weights.append(algorithm.computeOutlierWeights()) + + # Compute average of outlier weights. + avg_outlier_weights = {} + for outlier_weight_dict in outlier_weights: + for k, v in outlier_weight_dict.items(): + if k in avg_outlier_weights: + avg_outlier_weights[k] += v/len(outlier_weights) + else: + avg_outlier_weights[k] = v/len(outlier_weights) + + # Remove measurements that have weight greater than threshold. + inlier_measurements = gtsam.BinaryMeasurementsUnit3() + [inlier_measurements.append(m) for m in w_measurements if avg_outlier_weights[(m.key1(), m.key2())] < outlier_weight_threshold] + + # Run the optimizer to obtain translations for normalized directions. + translations = gtsam.TranslationRecovery(inlier_measurements).run() + + poses = gtsam.Values() + for key in rotations.keys(): + poses.insert(key, gtsam.Pose3( + rotations.atRot3(key), translations.atPoint3(key))) + return poses + +def main(): + rotations, translation_directions = get_data() + poses = estimate_poses_given_rot(translation_directions, rotations) + print("**** Translation averaging output ****") + print(poses) + print("**************************************") + +if __name__ == '__main__': + main() From 4b06616dfedcd2c2b850cf865c4c8118e81681c3 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Mon, 21 Sep 2020 20:40:43 -0700 Subject: [PATCH 05/38] adding documentation for example --- .../examples/TranslationAveragingExample.py | 49 +++++++++++++------ 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index 09d261d97..4e3c7467a 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -1,17 +1,27 @@ -from collections import Counter -import functools -import operator +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +This example shows how 1dsfm uses outlier rejection (MFAS) and optimization (translation recovery) +together for estimating global translations from relative translation directions and global rotations. +The purpose of this example is to illustrate the connection between these two classes using a small SfM dataset. + +Author: Akshay Krishnan +Date: September 2020 +""" import numpy as np import gtsam from gtsam.examples import SFMdata -max_1dsfm_projection_directions = 50 -outlier_weight_threshold = 0.1 def get_data(): - """"Returns data from SfMData.createPoses(). This contains the global rotations and the unit translations directions.""" + """"Returns data from SfMData.createPoses(). This contains global rotations and unit translations directions.""" # Using toy dataset in SfMdata for example. poses = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) rotations = gtsam.Values() @@ -20,8 +30,9 @@ def get_data(): # Add the rotation rotations.insert(i, poses[i].rotation()) # Create unit translation measurements with next two poses - for j in range(i+1, i+3): - i_Z_j = gtsam.Unit3(poses[i].rotation().unrotate(poses[j].translation() - poses[i].translation())) + for j in range(i + 1, i + 3): + i_Z_j = gtsam.Unit3(poses[i].rotation().unrotate( + poses[j].translation() - poses[i].translation())) translation_directions.append(gtsam.BinaryMeasurementUnit3( i, j, i_Z_j, gtsam.noiseModel.Isotropic.Sigma(3, 0.01))) # Add the last two rotations. @@ -35,25 +46,30 @@ def estimate_poses_given_rot(measurements: gtsam.BinaryMeasurementsUnit3, """Estimate poses given normalized translation directions and rotations between nodes. Arguments: - measurements - List of translation direction from the first node to the second node in the coordinate frame of the first node. + measurements {BinaryMeasurementsUnit3}- List of translation direction from the first node to + the second node in the coordinate frame of the first node. rotations {Values} -- Estimated rotations Returns: Values -- Estimated poses. """ + # Some hyperparameters. + max_1dsfm_projection_directions = 50 + outlier_weight_threshold = 0.1 + # Convert the translation directions to global frame using the rotations. w_measurements = gtsam.BinaryMeasurementsUnit3() for measurement in measurements: - w_measurements.append(gtsam.BinaryMeasurementUnit3(measurement.key1(), measurement.key2( - ), gtsam.Unit3(rotations.atRot3(measurement.key1()).rotate(measurement.measured().point3())), measurement.noiseModel())) + w_measurements.append(gtsam.BinaryMeasurementUnit3(measurement.key1(), measurement.key2(), gtsam.Unit3( + rotations.atRot3(measurement.key1()).rotate(measurement.measured().point3())), measurement.noiseModel())) # Indices of measurements that are to be used as projection directions. These are randomly chosen. indices = np.random.choice(len(w_measurements), min( max_1dsfm_projection_directions, len(w_measurements)), replace=False) # Sample projection directions from the measurements. projection_directions = [w_measurements[idx].measured() for idx in indices] - + outlier_weights = [] # Find the outlier weights for each direction using MFAS. for direction in projection_directions: @@ -65,13 +81,14 @@ def estimate_poses_given_rot(measurements: gtsam.BinaryMeasurementsUnit3, for outlier_weight_dict in outlier_weights: for k, v in outlier_weight_dict.items(): if k in avg_outlier_weights: - avg_outlier_weights[k] += v/len(outlier_weights) + avg_outlier_weights[k] += v / len(outlier_weights) else: - avg_outlier_weights[k] = v/len(outlier_weights) + avg_outlier_weights[k] = v / len(outlier_weights) # Remove measurements that have weight greater than threshold. inlier_measurements = gtsam.BinaryMeasurementsUnit3() - [inlier_measurements.append(m) for m in w_measurements if avg_outlier_weights[(m.key1(), m.key2())] < outlier_weight_threshold] + [inlier_measurements.append(m) for m in w_measurements if avg_outlier_weights[( + m.key1(), m.key2())] < outlier_weight_threshold] # Run the optimizer to obtain translations for normalized directions. translations = gtsam.TranslationRecovery(inlier_measurements).run() @@ -82,6 +99,7 @@ def estimate_poses_given_rot(measurements: gtsam.BinaryMeasurementsUnit3, rotations.atRot3(key), translations.atPoint3(key))) return poses + def main(): rotations, translation_directions = get_data() poses = estimate_poses_given_rot(translation_directions, rotations) @@ -89,5 +107,6 @@ def main(): print(poses) print("**************************************") + if __name__ == '__main__': main() From fbb26eea07ccc05c5496ea3ef019c6b024f4e626 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Thu, 24 Sep 2020 22:32:04 -0700 Subject: [PATCH 06/38] naming and other changes - review1 --- .../examples/TranslationAveragingExample.py | 78 +++++++++++-------- 1 file changed, 44 insertions(+), 34 deletions(-) diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index 4e3c7467a..c3dcf2ad2 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -14,22 +14,28 @@ Author: Akshay Krishnan Date: September 2020 """ +from collections import defaultdict +from typing import Tuple, List + import numpy as np import gtsam from gtsam.examples import SFMdata -def get_data(): +def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]: """"Returns data from SfMData.createPoses(). This contains global rotations and unit translations directions.""" # Using toy dataset in SfMdata for example. poses = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) + # Rotations of the cameras in the world frame - wRc. rotations = gtsam.Values() + # Normalized translation directions for pairs of cameras - from first camera to second, + # in the coordinate frame of the first camera. translation_directions = [] for i in range(0, len(poses) - 2): - # Add the rotation + # Add the rotation. rotations.insert(i, poses[i].rotation()) - # Create unit translation measurements with next two poses + # Create unit translation measurements with next two poses. for j in range(i + 1, i + 3): i_Z_j = gtsam.Unit3(poses[i].rotation().unrotate( poses[j].translation() - poses[i].translation())) @@ -41,68 +47,72 @@ def get_data(): return (rotations, translation_directions) -def estimate_poses_given_rot(measurements: gtsam.BinaryMeasurementsUnit3, - rotations: gtsam.Values): - """Estimate poses given normalized translation directions and rotations between nodes. +def estimate_poses(relative_translations: gtsam.BinaryMeasurementsUnit3, + rotations: gtsam.Values) -> gtsam.Values: + """Estimate poses given rotations normalized translation directions between cameras. - Arguments: - measurements {BinaryMeasurementsUnit3}- List of translation direction from the first node to - the second node in the coordinate frame of the first node. - rotations {Values} -- Estimated rotations + Args: + relative_translations -- List of normalized translation directions between camera pairs, each direction + is from the first camera to the second, in the frame of the first camera. + rotations -- Rotations of the cameras in the world frame. Returns: Values -- Estimated poses. """ - # Some hyperparameters. - max_1dsfm_projection_directions = 50 + # Some hyperparameters, values used from 1dsfm. + max_1dsfm_projection_directions = 48 outlier_weight_threshold = 0.1 # Convert the translation directions to global frame using the rotations. - w_measurements = gtsam.BinaryMeasurementsUnit3() - for measurement in measurements: - w_measurements.append(gtsam.BinaryMeasurementUnit3(measurement.key1(), measurement.key2(), gtsam.Unit3( - rotations.atRot3(measurement.key1()).rotate(measurement.measured().point3())), measurement.noiseModel())) + w_relative_translations = gtsam.BinaryMeasurementsUnit3() + for relative_translation in relative_translations: + w_relative_translation = gtsam.Unit3(rotations.atRot3(relative_translation.key1()) + .rotate(relative_translation.measured().point3())) + w_relative_translations.append(gtsam.BinaryMeasurementUnit3(relative_translation.key1(), + relative_translation.key2(), + w_relative_translation, + relative_translation.noiseModel())) # Indices of measurements that are to be used as projection directions. These are randomly chosen. - indices = np.random.choice(len(w_measurements), min( - max_1dsfm_projection_directions, len(w_measurements)), replace=False) + sampled_indices = np.random.choice(len(w_relative_translations), min( + max_1dsfm_projection_directions, len(w_relative_translations)), replace=False) # Sample projection directions from the measurements. - projection_directions = [w_measurements[idx].measured() for idx in indices] + projection_directions = [ + w_relative_translations[idx].measured() for idx in sampled_indices] outlier_weights = [] # Find the outlier weights for each direction using MFAS. for direction in projection_directions: - algorithm = gtsam.MFAS(w_measurements, direction) + algorithm = gtsam.MFAS(w_relative_translations, direction) outlier_weights.append(algorithm.computeOutlierWeights()) - # Compute average of outlier weights. - avg_outlier_weights = {} + # Compute average of outlier weights. Each outlier weight is a map from a pair of Keys (camera IDs) to a weight, + # where weights are proportional to the probability of the edge being an outlier. + avg_outlier_weights = defaultdict(lambda: 0.0) for outlier_weight_dict in outlier_weights: - for k, v in outlier_weight_dict.items(): - if k in avg_outlier_weights: - avg_outlier_weights[k] += v / len(outlier_weights) - else: - avg_outlier_weights[k] = v / len(outlier_weights) + for keypair, weight in outlier_weight_dict.items(): + avg_outlier_weights[keypair] += weight / len(outlier_weights) - # Remove measurements that have weight greater than threshold. - inlier_measurements = gtsam.BinaryMeasurementsUnit3() - [inlier_measurements.append(m) for m in w_measurements if avg_outlier_weights[( - m.key1(), m.key2())] < outlier_weight_threshold] + # Remove w_relative_tranlsations that have weight greater than threshold, these are outliers. + inlier_w_relative_translations = gtsam.BinaryMeasurementsUnit3() + [inlier_w_relative_translations.append(Z) for Z in w_relative_translations + if avg_outlier_weights[(Z.key1(), Z.key2())] < outlier_weight_threshold] # Run the optimizer to obtain translations for normalized directions. - translations = gtsam.TranslationRecovery(inlier_measurements).run() + w_translations = gtsam.TranslationRecovery( + inlier_w_relative_translations).run() poses = gtsam.Values() for key in rotations.keys(): poses.insert(key, gtsam.Pose3( - rotations.atRot3(key), translations.atPoint3(key))) + rotations.atRot3(key), w_translations.atPoint3(key))) return poses def main(): rotations, translation_directions = get_data() - poses = estimate_poses_given_rot(translation_directions, rotations) + poses = estimate_poses(translation_directions, rotations) print("**** Translation averaging output ****") print(poses) print("**************************************") From 98404ad27e583ef72fca03aa2f46f9f89fc31254 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Sun, 27 Sep 2020 18:55:14 -0700 Subject: [PATCH 07/38] updating defaultdict init --- python/gtsam/examples/TranslationAveragingExample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index c3dcf2ad2..0f1314645 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -89,7 +89,7 @@ def estimate_poses(relative_translations: gtsam.BinaryMeasurementsUnit3, # Compute average of outlier weights. Each outlier weight is a map from a pair of Keys (camera IDs) to a weight, # where weights are proportional to the probability of the edge being an outlier. - avg_outlier_weights = defaultdict(lambda: 0.0) + avg_outlier_weights = defaultdict(float) for outlier_weight_dict in outlier_weights: for keypair, weight in outlier_weight_dict.items(): avg_outlier_weights[keypair] += weight / len(outlier_weights) From c0fb3a271be928dd48b21267ab764087c4b69a97 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 30 Sep 2020 15:38:08 -0400 Subject: [PATCH 08/38] Small formatting changes and removal of test header --- gtsam_unstable/linear/ActiveSetSolver-inl.h | 22 +++++++++------------ gtsam_unstable/linear/ActiveSetSolver.h | 4 ++-- gtsam_unstable/linear/LPInitSolver.h | 3 +-- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/gtsam_unstable/linear/ActiveSetSolver-inl.h b/gtsam_unstable/linear/ActiveSetSolver-inl.h index 602012090..12374ac76 100644 --- a/gtsam_unstable/linear/ActiveSetSolver-inl.h +++ b/gtsam_unstable/linear/ActiveSetSolver-inl.h @@ -149,7 +149,7 @@ Template JacobianFactor::shared_ptr This::createDualFactor( // to compute the least-square approximation of dual variables return boost::make_shared(Aterms, b); } else { - return boost::make_shared(); + return nullptr; } } @@ -165,14 +165,13 @@ Template JacobianFactor::shared_ptr This::createDualFactor( * if lambda = 0 you are on the constraint * if lambda > 0 you are violating the constraint. */ -Template GaussianFactorGraph::shared_ptr This::buildDualGraph( +Template GaussianFactorGraph This::buildDualGraph( const InequalityFactorGraph& workingSet, const VectorValues& delta) const { - GaussianFactorGraph::shared_ptr dualGraph(new GaussianFactorGraph()); + GaussianFactorGraph dualGraph; for (Key key : constrainedKeys_) { // Each constrained key becomes a factor in the dual graph - JacobianFactor::shared_ptr dualFactor = - createDualFactor(key, workingSet, delta); - if (!dualFactor->empty()) dualGraph->push_back(dualFactor); + auto dualFactor = createDualFactor(key, workingSet, delta); + if (dualFactor) dualGraph.push_back(dualFactor); } return dualGraph; } @@ -193,19 +192,16 @@ This::buildWorkingGraph(const InequalityFactorGraph& workingSet, Template typename This::State This::iterate( const typename This::State& state) const { // Algorithm 16.3 from Nocedal06book. - // Solve with the current working set eqn 16.39, but instead of solving for p - // solve for x - GaussianFactorGraph workingGraph = - buildWorkingGraph(state.workingSet, state.values); + // Solve with the current working set eqn 16.39, but solve for x not p + auto workingGraph = buildWorkingGraph(state.workingSet, state.values); VectorValues newValues = workingGraph.optimize(); // If we CAN'T move further // if p_k = 0 is the original condition, modified by Duy to say that the state // update is zero. if (newValues.equals(state.values, 1e-7)) { // Compute lambda from the dual graph - GaussianFactorGraph::shared_ptr dualGraph = buildDualGraph(state.workingSet, - newValues); - VectorValues duals = dualGraph->optimize(); + auto dualGraph = buildDualGraph(state.workingSet, newValues); + VectorValues duals = dualGraph.optimize(); int leavingFactor = identifyLeavingConstraint(state.workingSet, duals); // If all inequality constraints are satisfied: We have the solution!! if (leavingFactor < 0) { diff --git a/gtsam_unstable/linear/ActiveSetSolver.h b/gtsam_unstable/linear/ActiveSetSolver.h index 8c3c5a7e5..318912cf3 100644 --- a/gtsam_unstable/linear/ActiveSetSolver.h +++ b/gtsam_unstable/linear/ActiveSetSolver.h @@ -154,8 +154,8 @@ protected: public: /// Just for testing... /// Builds a dual graph from the current working set. - GaussianFactorGraph::shared_ptr buildDualGraph( - const InequalityFactorGraph& workingSet, const VectorValues& delta) const; + GaussianFactorGraph buildDualGraph(const InequalityFactorGraph &workingSet, + const VectorValues &delta) const; /** * Build a working graph of cost, equality and active inequality constraints diff --git a/gtsam_unstable/linear/LPInitSolver.h b/gtsam_unstable/linear/LPInitSolver.h index 4eb672fbc..14e5fb000 100644 --- a/gtsam_unstable/linear/LPInitSolver.h +++ b/gtsam_unstable/linear/LPInitSolver.h @@ -21,7 +21,6 @@ #include #include -#include namespace gtsam { /** @@ -83,7 +82,7 @@ private: const InequalityFactorGraph& inequalities) const; // friend class for unit-testing private methods - FRIEND_TEST(LPInitSolver, initialization); + friend class LPInitSolverInitializationTest; }; } From c51264ac985954d26470bdc56d5805433f31ee3f Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 30 Sep 2020 15:38:25 -0400 Subject: [PATCH 09/38] New method "add" as in GaussianFactorGraph --- gtsam_unstable/linear/EqualityFactorGraph.h | 5 +++++ gtsam_unstable/linear/InequalityFactorGraph.h | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/gtsam_unstable/linear/EqualityFactorGraph.h b/gtsam_unstable/linear/EqualityFactorGraph.h index 43befdbe0..fb3f4c076 100644 --- a/gtsam_unstable/linear/EqualityFactorGraph.h +++ b/gtsam_unstable/linear/EqualityFactorGraph.h @@ -31,6 +31,11 @@ class EqualityFactorGraph: public FactorGraph { public: typedef boost::shared_ptr shared_ptr; + /// Add a linear inequality, forwards arguments to LinearInequality. + template void add(Args &&... args) { + emplace_shared(std::forward(args)...); + } + /// Compute error of a guess. double error(const VectorValues& x) const { double total_error = 0.; diff --git a/gtsam_unstable/linear/InequalityFactorGraph.h b/gtsam_unstable/linear/InequalityFactorGraph.h index c87645697..d042b0436 100644 --- a/gtsam_unstable/linear/InequalityFactorGraph.h +++ b/gtsam_unstable/linear/InequalityFactorGraph.h @@ -47,6 +47,11 @@ public: return Base::equals(other, tol); } + /// Add a linear inequality, forwards arguments to LinearInequality. + template void add(Args &&... args) { + emplace_shared(std::forward(args)...); + } + /** * Compute error of a guess. * Infinity error if it violates an inequality; zero otherwise. */ From 6b739b17be03292c4d477432b1b2ff6d399fd4c2 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 30 Sep 2020 15:38:46 -0400 Subject: [PATCH 10/38] Re-formatting and using "add"/"auto" where we can. --- gtsam_unstable/linear/tests/testLPSolver.cpp | 138 ++++----- gtsam_unstable/linear/tests/testQPSolver.cpp | 307 +++++++++---------- 2 files changed, 199 insertions(+), 246 deletions(-) diff --git a/gtsam_unstable/linear/tests/testLPSolver.cpp b/gtsam_unstable/linear/tests/testLPSolver.cpp index a105a39f0..de9cd032a 100644 --- a/gtsam_unstable/linear/tests/testLPSolver.cpp +++ b/gtsam_unstable/linear/tests/testLPSolver.cpp @@ -16,20 +16,20 @@ * @author Duy-Nguyen Ta */ -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include -#include #include +#include using namespace std; using namespace gtsam; @@ -47,37 +47,27 @@ static const Vector kOne = Vector::Ones(1), kZero = Vector::Zero(1); */ LP simpleLP1() { LP lp; - lp.cost = LinearCost(1, Vector2(-1., -1.)); // min -x1-x2 (max x1+x2) - lp.inequalities.push_back( - LinearInequality(1, Vector2(-1, 0), 0, 1)); // x1 >= 0 - lp.inequalities.push_back( - LinearInequality(1, Vector2(0, -1), 0, 2)); // x2 >= 0 - lp.inequalities.push_back( - LinearInequality(1, Vector2(1, 2), 4, 3)); // x1 + 2*x2 <= 4 - lp.inequalities.push_back( - LinearInequality(1, Vector2(4, 2), 12, 4)); // 4x1 + 2x2 <= 12 - lp.inequalities.push_back( - LinearInequality(1, Vector2(-1, 1), 1, 5)); // -x1 + x2 <= 1 + lp.cost = LinearCost(1, Vector2(-1., -1.)); // min -x1-x2 (max x1+x2) + lp.inequalities.add(1, Vector2(-1, 0), 0, 1); // x1 >= 0 + lp.inequalities.add(1, Vector2(0, -1), 0, 2); // x2 >= 0 + lp.inequalities.add(1, Vector2(1, 2), 4, 3); // x1 + 2*x2 <= 4 + lp.inequalities.add(1, Vector2(4, 2), 12, 4); // 4x1 + 2x2 <= 12 + lp.inequalities.add(1, Vector2(-1, 1), 1, 5); // -x1 + x2 <= 1 return lp; } /* ************************************************************************* */ namespace gtsam { -TEST(LPInitSolver, infinite_loop_single_var) { - LP initchecker; - initchecker.cost = LinearCost(1, Vector3(0, 0, 1)); // min alpha - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(-2, -1, -1), -2, 1)); //-2x-y-alpha <= -2 - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(-1, 2, -1), 6, 2)); // -x+2y-alpha <= 6 - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(-1, 0, -1), 0, 3)); // -x - alpha <= 0 - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(1, 0, -1), 20, 4)); // x - alpha <= 20 - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(0, -1, -1), 0, 5)); // -y - alpha <= 0 - LPSolver solver(initchecker); +TEST(LPInitSolver, InfiniteLoopSingleVar) { + LP lp; + lp.cost = LinearCost(1, Vector3(0, 0, 1)); // min alpha + lp.inequalities.add(1, Vector3(-2, -1, -1), -2, 1); //-2x-y-a <= -2 + lp.inequalities.add(1, Vector3(-1, 2, -1), 6, 2); // -x+2y-a <= 6 + lp.inequalities.add(1, Vector3(-1, 0, -1), 0, 3); // -x - a <= 0 + lp.inequalities.add(1, Vector3(1, 0, -1), 20, 4); // x - a <= 20 + lp.inequalities.add(1, Vector3(0, -1, -1), 0, 5); // -y - a <= 0 + LPSolver solver(lp); VectorValues starter; starter.insert(1, Vector3(0, 0, 2)); VectorValues results, duals; @@ -87,25 +77,23 @@ TEST(LPInitSolver, infinite_loop_single_var) { CHECK(assert_equal(results, expected, 1e-7)); } -TEST(LPInitSolver, infinite_loop_multi_var) { - LP initchecker; +TEST(LPInitSolver, InfiniteLoopMultiVar) { + LP lp; Key X = symbol('X', 1); Key Y = symbol('Y', 1); Key Z = symbol('Z', 1); - initchecker.cost = LinearCost(Z, kOne); // min alpha - initchecker.inequalities.push_back( - LinearInequality(X, -2.0 * kOne, Y, -1.0 * kOne, Z, -1.0 * kOne, -2, - 1)); //-2x-y-alpha <= -2 - initchecker.inequalities.push_back( - LinearInequality(X, -1.0 * kOne, Y, 2.0 * kOne, Z, -1.0 * kOne, 6, - 2)); // -x+2y-alpha <= 6 - initchecker.inequalities.push_back(LinearInequality( - X, -1.0 * kOne, Z, -1.0 * kOne, 0, 3)); // -x - alpha <= 0 - initchecker.inequalities.push_back(LinearInequality( - X, 1.0 * kOne, Z, -1.0 * kOne, 20, 4)); // x - alpha <= 20 - initchecker.inequalities.push_back(LinearInequality( - Y, -1.0 * kOne, Z, -1.0 * kOne, 0, 5)); // -y - alpha <= 0 - LPSolver solver(initchecker); + lp.cost = LinearCost(Z, kOne); // min alpha + lp.inequalities.add(X, -2.0 * kOne, Y, -1.0 * kOne, Z, -1.0 * kOne, -2, + 1); //-2x-y-alpha <= -2 + lp.inequalities.add(X, -1.0 * kOne, Y, 2.0 * kOne, Z, -1.0 * kOne, 6, + 2); // -x+2y-alpha <= 6 + lp.inequalities.add(X, -1.0 * kOne, Z, -1.0 * kOne, 0, + 3); // -x - alpha <= 0 + lp.inequalities.add(X, 1.0 * kOne, Z, -1.0 * kOne, 20, + 4); // x - alpha <= 20 + lp.inequalities.add(Y, -1.0 * kOne, Z, -1.0 * kOne, 0, + 5); // -y - alpha <= 0 + LPSolver solver(lp); VectorValues starter; starter.insert(X, kZero); starter.insert(Y, kZero); @@ -119,7 +107,7 @@ TEST(LPInitSolver, infinite_loop_multi_var) { CHECK(assert_equal(results, expected, 1e-7)); } -TEST(LPInitSolver, initialization) { +TEST(LPInitSolver, Initialization) { LP lp = simpleLP1(); LPInitSolver initSolver(lp); @@ -138,19 +126,19 @@ TEST(LPInitSolver, initialization) { LP::shared_ptr initLP = initSolver.buildInitialLP(yKey); LP expectedInitLP; expectedInitLP.cost = LinearCost(yKey, kOne); - expectedInitLP.inequalities.push_back(LinearInequality( - 1, Vector2(-1, 0), 2, Vector::Constant(1, -1), 0, 1)); // -x1 - y <= 0 - expectedInitLP.inequalities.push_back(LinearInequality( - 1, Vector2(0, -1), 2, Vector::Constant(1, -1), 0, 2)); // -x2 - y <= 0 - expectedInitLP.inequalities.push_back( - LinearInequality(1, Vector2(1, 2), 2, Vector::Constant(1, -1), 4, - 3)); // x1 + 2*x2 - y <= 4 - expectedInitLP.inequalities.push_back( - LinearInequality(1, Vector2(4, 2), 2, Vector::Constant(1, -1), 12, - 4)); // 4x1 + 2x2 - y <= 12 - expectedInitLP.inequalities.push_back( - LinearInequality(1, Vector2(-1, 1), 2, Vector::Constant(1, -1), 1, - 5)); // -x1 + x2 - y <= 1 + expectedInitLP.inequalities.add(1, Vector2(-1, 0), 2, Vector::Constant(1, -1), + 0, 1); // -x1 - y <= 0 + expectedInitLP.inequalities.add(1, Vector2(0, -1), 2, Vector::Constant(1, -1), + 0, 2); // -x2 - y <= 0 + expectedInitLP.inequalities.add(1, Vector2(1, 2), 2, Vector::Constant(1, -1), + 4, + 3); // x1 + 2*x2 - y <= 4 + expectedInitLP.inequalities.add(1, Vector2(4, 2), 2, Vector::Constant(1, -1), + 12, + 4); // 4x1 + 2x2 - y <= 12 + expectedInitLP.inequalities.add(1, Vector2(-1, 1), 2, Vector::Constant(1, -1), + 1, + 5); // -x1 + x2 - y <= 1 CHECK(assert_equal(expectedInitLP, *initLP, 1e-10)); LPSolver lpSolveInit(*initLP); VectorValues xy0(x0); @@ -164,7 +152,7 @@ TEST(LPInitSolver, initialization) { VectorValues x = initSolver.solve(); CHECK(lp.isFeasible(x)); } -} +} // namespace gtsam /* ************************************************************************* */ /** @@ -173,28 +161,24 @@ TEST(LPInitSolver, initialization) { * x - y = 5 * x + 2y = 6 */ -TEST(LPSolver, overConstrainedLinearSystem) { +TEST(LPSolver, OverConstrainedLinearSystem) { GaussianFactorGraph graph; Matrix A1 = Vector3(1, 1, 1); Matrix A2 = Vector3(1, -1, 2); Vector b = Vector3(1, 5, 6); - JacobianFactor factor(1, A1, 2, A2, b, noiseModel::Constrained::All(3)); - graph.push_back(factor); + graph.add(1, A1, 2, A2, b, noiseModel::Constrained::All(3)); VectorValues x = graph.optimize(); // This check confirms that gtsam linear constraint solver can't handle // over-constrained system - CHECK(factor.error(x) != 0.0); + CHECK(graph[0]->error(x) != 0.0); } TEST(LPSolver, overConstrainedLinearSystem2) { GaussianFactorGraph graph; - graph.emplace_shared(1, I_1x1, 2, I_1x1, kOne, - noiseModel::Constrained::All(1)); - graph.emplace_shared(1, I_1x1, 2, -I_1x1, 5 * kOne, - noiseModel::Constrained::All(1)); - graph.emplace_shared(1, I_1x1, 2, 2 * I_1x1, 6 * kOne, - noiseModel::Constrained::All(1)); + graph.add(1, I_1x1, 2, I_1x1, kOne, noiseModel::Constrained::All(1)); + graph.add(1, I_1x1, 2, -I_1x1, 5 * kOne, noiseModel::Constrained::All(1)); + graph.add(1, I_1x1, 2, 2 * I_1x1, 6 * kOne, noiseModel::Constrained::All(1)); VectorValues x = graph.optimize(); // This check confirms that gtsam linear constraint solver can't handle // over-constrained system @@ -202,7 +186,7 @@ TEST(LPSolver, overConstrainedLinearSystem2) { } /* ************************************************************************* */ -TEST(LPSolver, simpleTest1) { +TEST(LPSolver, SimpleTest1) { LP lp = simpleLP1(); LPSolver lpSolver(lp); VectorValues init; @@ -222,7 +206,7 @@ TEST(LPSolver, simpleTest1) { } /* ************************************************************************* */ -TEST(LPSolver, testWithoutInitialValues) { +TEST(LPSolver, TestWithoutInitialValues) { LP lp = simpleLP1(); LPSolver lpSolver(lp); VectorValues result, duals, expectedResult; diff --git a/gtsam_unstable/linear/tests/testQPSolver.cpp b/gtsam_unstable/linear/tests/testQPSolver.cpp index 2292c63d7..d3497d2a3 100644 --- a/gtsam_unstable/linear/tests/testQPSolver.cpp +++ b/gtsam_unstable/linear/tests/testQPSolver.cpp @@ -17,11 +17,11 @@ * @author Ivan Dario Jimenez */ +#include #include #include -#include #include -#include +#include using namespace std; using namespace gtsam; @@ -40,15 +40,15 @@ QP createTestCase() { // Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 10 //TODO: THIS TEST MIGHT BE WRONG : the last parameter might be 5 instead of 10 because the form of the equation // Should be 0.5x'Gx + gx + f : Nocedal 449 - qp.cost.push_back( - HessianFactor(X(1), X(2), 2.0 * I_1x1, -I_1x1, 3.0 * I_1x1, 2.0 * I_1x1, - Z_1x1, 10.0)); + qp.cost.push_back(HessianFactor(X(1), X(2), 2.0 * I_1x1, -I_1x1, 3.0 * I_1x1, + 2.0 * I_1x1, Z_1x1, 10.0)); // Inequality constraints - qp.inequalities.push_back(LinearInequality(X(1), I_1x1, X(2), I_1x1, 2, 0)); // x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2 - qp.inequalities.push_back(LinearInequality(X(1), -I_1x1, 0, 1)); // -x1 <= 0 - qp.inequalities.push_back(LinearInequality(X(2), -I_1x1, 0, 2)); // -x2 <= 0 - qp.inequalities.push_back(LinearInequality(X(1), I_1x1, 1.5, 3)); // x1 <= 3/2 + qp.inequalities.add(X(1), I_1x1, X(2), I_1x1, 2, + 0); // x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2 + qp.inequalities.add(X(1), -I_1x1, 0, 1); // -x1 <= 0 + qp.inequalities.add(X(2), -I_1x1, 0, 2); // -x2 <= 0 + qp.inequalities.add(X(1), I_1x1, 1.5, 3); // x1 <= 3/2 return qp; } @@ -94,16 +94,15 @@ QP createEqualityConstrainedTest() { // Note the Hessian encodes: // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0 - qp.cost.push_back( - HessianFactor(X(1), X(2), 2.0 * I_1x1, Z_1x1, Z_1x1, 2.0 * I_1x1, Z_1x1, - 0.0)); + qp.cost.push_back(HessianFactor(X(1), X(2), 2.0 * I_1x1, Z_1x1, Z_1x1, + 2.0 * I_1x1, Z_1x1, 0.0)); // Equality constraints // x1 + x2 = 1 --> x1 + x2 -1 = 0, hence we negate the b vector Matrix A1 = I_1x1; Matrix A2 = I_1x1; Vector b = -kOne; - qp.equalities.push_back(LinearEquality(X(1), A1, X(2), A2, b, 0)); + qp.equalities.add(X(1), A1, X(2), A2, b, 0); return qp; } @@ -118,9 +117,8 @@ TEST(QPSolver, dual) { QPSolver solver(qp); - GaussianFactorGraph::shared_ptr dualGraph = solver.buildDualGraph( - qp.inequalities, initialValues); - VectorValues dual = dualGraph->optimize(); + auto dualGraph = solver.buildDualGraph(qp.inequalities, initialValues); + VectorValues dual = dualGraph.optimize(); VectorValues expectedDual; expectedDual.insert(0, (Vector(1) << 2.0).finished()); CHECK(assert_equal(expectedDual, dual, 1e-10)); @@ -135,19 +133,19 @@ TEST(QPSolver, indentifyActiveConstraints) { currentSolution.insert(X(1), Z_1x1); currentSolution.insert(X(2), Z_1x1); - InequalityFactorGraph workingSet = solver.identifyActiveConstraints( - qp.inequalities, currentSolution); + auto workingSet = + solver.identifyActiveConstraints(qp.inequalities, currentSolution); CHECK(!workingSet.at(0)->active()); // inactive - CHECK(workingSet.at(1)->active());// active - CHECK(workingSet.at(2)->active());// active - CHECK(!workingSet.at(3)->active());// inactive + CHECK(workingSet.at(1)->active()); // active + CHECK(workingSet.at(2)->active()); // active + CHECK(!workingSet.at(3)->active()); // inactive VectorValues solution = solver.buildWorkingGraph(workingSet).optimize(); - VectorValues expectedSolution; - expectedSolution.insert(X(1), kZero); - expectedSolution.insert(X(2), kZero); - CHECK(assert_equal(expectedSolution, solution, 1e-100)); + VectorValues expected; + expected.insert(X(1), kZero); + expected.insert(X(2), kZero); + CHECK(assert_equal(expected, solution, 1e-100)); } /* ************************************************************************* */ @@ -159,24 +157,24 @@ TEST(QPSolver, iterate) { currentSolution.insert(X(1), Z_1x1); currentSolution.insert(X(2), Z_1x1); - std::vector expectedSolutions(4), expectedDuals(4); - expectedSolutions[0].insert(X(1), kZero); - expectedSolutions[0].insert(X(2), kZero); + std::vector expecteds(4), expectedDuals(4); + expecteds[0].insert(X(1), kZero); + expecteds[0].insert(X(2), kZero); expectedDuals[0].insert(1, (Vector(1) << 3).finished()); expectedDuals[0].insert(2, kZero); - expectedSolutions[1].insert(X(1), (Vector(1) << 1.5).finished()); - expectedSolutions[1].insert(X(2), kZero); + expecteds[1].insert(X(1), (Vector(1) << 1.5).finished()); + expecteds[1].insert(X(2), kZero); expectedDuals[1].insert(3, (Vector(1) << 1.5).finished()); - expectedSolutions[2].insert(X(1), (Vector(1) << 1.5).finished()); - expectedSolutions[2].insert(X(2), (Vector(1) << 0.75).finished()); + expecteds[2].insert(X(1), (Vector(1) << 1.5).finished()); + expecteds[2].insert(X(2), (Vector(1) << 0.75).finished()); - expectedSolutions[3].insert(X(1), (Vector(1) << 1.5).finished()); - expectedSolutions[3].insert(X(2), (Vector(1) << 0.5).finished()); + expecteds[3].insert(X(1), (Vector(1) << 1.5).finished()); + expecteds[3].insert(X(2), (Vector(1) << 0.5).finished()); - InequalityFactorGraph workingSet = solver.identifyActiveConstraints( - qp.inequalities, currentSolution); + auto workingSet = + solver.identifyActiveConstraints(qp.inequalities, currentSolution); QPSolver::State state(currentSolution, VectorValues(), workingSet, false, 100); @@ -188,12 +186,12 @@ TEST(QPSolver, iterate) { // Forst10book do not follow exactly what we implemented from Nocedal06book. // Specifically, we do not re-identify active constraints and // do not recompute dual variables after every step!!! -// CHECK(assert_equal(expectedSolutions[it], state.values, 1e-10)); -// CHECK(assert_equal(expectedDuals[it], state.duals, 1e-10)); + // CHECK(assert_equal(expecteds[it], state.values, 1e-10)); + // CHECK(assert_equal(expectedDuals[it], state.duals, 1e-10)); it++; } - CHECK(assert_equal(expectedSolutions[3], state.values, 1e-10)); + CHECK(assert_equal(expecteds[3], state.values, 1e-10)); } /* ************************************************************************* */ @@ -204,182 +202,161 @@ TEST(QPSolver, optimizeForst10book_pg171Ex5) { VectorValues initialValues; initialValues.insert(X(1), Z_1x1); initialValues.insert(X(2), Z_1x1); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); - VectorValues expectedSolution; - expectedSolution.insert(X(1), (Vector(1) << 1.5).finished()); - expectedSolution.insert(X(2), (Vector(1) << 0.5).finished()); - CHECK(assert_equal(expectedSolution, solution, 1e-100)); + VectorValues solution = solver.optimize(initialValues).first; + VectorValues expected; + expected.insert(X(1), (Vector(1) << 1.5).finished()); + expected.insert(X(2), (Vector(1) << 0.5).finished()); + CHECK(assert_equal(expected, solution, 1e-100)); } pair testParser(QPSParser parser) { QP exampleqp = parser.Parse(); - QP expectedqp; + QP expected; Key X1(Symbol('X', 1)), X2(Symbol('X', 2)); // min f(x,y) = 4 + 1.5x -y + 0.58x^2 + 2xy + 2yx + 10y^2 - expectedqp.cost.push_back( - HessianFactor(X1, X2, 8.0 * I_1x1, 2.0 * I_1x1, -1.5 * kOne, 10.0 * I_1x1, - 2.0 * kOne, 8.0)); - // 2x + y >= 2 - // -x + 2y <= 6 - expectedqp.inequalities.push_back( - LinearInequality(X1, -2.0 * I_1x1, X2, -I_1x1, -2, 0)); - expectedqp.inequalities.push_back( - LinearInequality(X1, -I_1x1, X2, 2.0 * I_1x1, 6, 1)); - // x<= 20 - expectedqp.inequalities.push_back(LinearInequality(X1, I_1x1, 20, 4)); - //x >= 0 - expectedqp.inequalities.push_back(LinearInequality(X1, -I_1x1, 0, 2)); - // y > = 0 - expectedqp.inequalities.push_back(LinearInequality(X2, -I_1x1, 0, 3)); - return std::make_pair(expectedqp, exampleqp); -} -; + expected.cost.push_back(HessianFactor(X1, X2, 8.0 * I_1x1, 2.0 * I_1x1, + -1.5 * kOne, 10.0 * I_1x1, 2.0 * kOne, + 8.0)); + + expected.inequalities.add(X1, -2.0 * I_1x1, X2, -I_1x1, -2, 0); // 2x + y >= 2 + expected.inequalities.add(X1, -I_1x1, X2, 2.0 * I_1x1, 6, 1); // -x + 2y <= 6 + expected.inequalities.add(X1, I_1x1, 20, 4); // x<= 20 + expected.inequalities.add(X1, -I_1x1, 0, 2); // x >= 0 + expected.inequalities.add(X2, -I_1x1, 0, 3); // y > = 0 + return {expected, exampleqp}; +}; TEST(QPSolver, ParserSyntaticTest) { - auto expectedActual = testParser(QPSParser("QPExample.QPS")); - CHECK(assert_equal(expectedActual.first.cost, expectedActual.second.cost, + auto result = testParser(QPSParser("QPExample.QPS")); + CHECK(assert_equal(result.first.cost, result.second.cost, 1e-7)); + CHECK(assert_equal(result.first.inequalities, result.second.inequalities, 1e-7)); - CHECK(assert_equal(expectedActual.first.inequalities, - expectedActual.second.inequalities, 1e-7)); - CHECK(assert_equal(expectedActual.first.equalities, - expectedActual.second.equalities, 1e-7)); + CHECK(assert_equal(result.first.equalities, result.second.equalities, 1e-7)); } TEST(QPSolver, ParserSemanticTest) { - auto expected_actual = testParser(QPSParser("QPExample.QPS")); - VectorValues actualSolution, expectedSolution; - boost::tie(expectedSolution, boost::tuples::ignore) = - QPSolver(expected_actual.first).optimize(); - boost::tie(actualSolution, boost::tuples::ignore) = - QPSolver(expected_actual.second).optimize(); - CHECK(assert_equal(actualSolution, expectedSolution, 1e-7)); + auto result = testParser(QPSParser("QPExample.QPS")); + VectorValues expected = QPSolver(result.first).optimize().first; + VectorValues actual = QPSolver(result.second).optimize().first; + CHECK(assert_equal(actual, expected, 1e-7)); } -TEST(QPSolver, QPExampleTest){ +TEST(QPSolver, QPExampleTest) { QP problem = QPSParser("QPExample.QPS").Parse(); - VectorValues actualSolution; auto solver = QPSolver(problem); - boost::tie(actualSolution, boost::tuples::ignore) = solver.optimize(); - VectorValues expectedSolution; - expectedSolution.insert(Symbol('X',1),0.7625*I_1x1); - expectedSolution.insert(Symbol('X',2),0.4750*I_1x1); - double error_expected = problem.cost.error(expectedSolution); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(expectedSolution, actualSolution, 1e-7)) + VectorValues actual = solver.optimize().first; + VectorValues expected; + expected.insert(Symbol('X', 1), 0.7625 * I_1x1); + expected.insert(Symbol('X', 2), 0.4750 * I_1x1); + double error_expected = problem.cost.error(expected); + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(expected, actual, 1e-7)) CHECK(assert_equal(error_expected, error_actual)) } TEST(QPSolver, HS21) { QP problem = QPSParser("HS21.QPS").Parse(); - VectorValues actualSolution; - VectorValues expectedSolution; - expectedSolution.insert(Symbol('X',1), 2.0*I_1x1); - expectedSolution.insert(Symbol('X',2), 0.0*I_1x1); - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); + VectorValues expected; + expected.insert(Symbol('X', 1), 2.0 * I_1x1); + expected.insert(Symbol('X', 2), 0.0 * I_1x1); + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); CHECK(assert_equal(-99.9599999, error_actual, 1e-7)) - CHECK(assert_equal(expectedSolution, actualSolution)) + CHECK(assert_equal(expected, actual)) } TEST(QPSolver, HS35) { QP problem = QPSParser("HS35.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(1.11111111e-01,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(1.11111111e-01, error_actual, 1e-7)) } TEST(QPSolver, HS35MOD) { QP problem = QPSParser("HS35MOD.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(2.50000001e-01,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(2.50000001e-01, error_actual, 1e-7)) } TEST(QPSolver, HS51) { QP problem = QPSParser("HS51.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(8.88178420e-16,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(8.88178420e-16, error_actual, 1e-7)) } TEST(QPSolver, HS52) { QP problem = QPSParser("HS52.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(5.32664756,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(5.32664756, error_actual, 1e-7)) } -TEST(QPSolver, HS268) { // This test needs an extra order of magnitude of tolerance than the rest +TEST(QPSolver, HS268) { // This test needs an extra order of magnitude of + // tolerance than the rest QP problem = QPSParser("HS268.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(5.73107049e-07,error_actual, 1e-6)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(5.73107049e-07, error_actual, 1e-6)) } TEST(QPSolver, QPTEST) { // REQUIRES Jacobian Fix QP problem = QPSParser("QPTEST.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(0.437187500e01,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(0.437187500e01, error_actual, 1e-7)) } /* ************************************************************************* */ -// Create Matlab's test graph as in http://www.mathworks.com/help/optim/ug/quadprog.html +// Create Matlab's test graph as in +// http://www.mathworks.com/help/optim/ug/quadprog.html QP createTestMatlabQPEx() { QP qp; // Objective functions 0.5*x1^2 + x2^2 - x1*x2 - 2*x1 -6*x2 // Note the Hessian encodes: - // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f + // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + + // 0.5*f // Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0 - qp.cost.push_back( - HessianFactor(X(1), X(2), 1.0 * I_1x1, -I_1x1, 2.0 * I_1x1, 2.0 * I_1x1, - 6 * I_1x1, 1000.0)); + qp.cost.push_back(HessianFactor(X(1), X(2), 1.0 * I_1x1, -I_1x1, 2.0 * I_1x1, + 2.0 * I_1x1, 6 * I_1x1, 1000.0)); // Inequality constraints - qp.inequalities.push_back(LinearInequality(X(1), I_1x1, X(2), I_1x1, 2, 0)); // x1 + x2 <= 2 - qp.inequalities.push_back( - LinearInequality(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 1)); //-x1 + 2*x2 <=2 - qp.inequalities.push_back( - LinearInequality(X(1), 2 * I_1x1, X(2), I_1x1, 3, 2)); // 2*x1 + x2 <=3 - qp.inequalities.push_back(LinearInequality(X(1), -I_1x1, 0, 3)); // -x1 <= 0 - qp.inequalities.push_back(LinearInequality(X(2), -I_1x1, 0, 4)); // -x2 <= 0 + qp.inequalities.add(X(1), I_1x1, X(2), I_1x1, 2, 0); // x1 + x2 <= 2 + qp.inequalities.add(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 1); //-x1 + 2*x2 <=2 + qp.inequalities.add(X(1), 2 * I_1x1, X(2), I_1x1, 3, 2); // 2*x1 + x2 <=3 + qp.inequalities.add(X(1), -I_1x1, 0, 3); // -x1 <= 0 + qp.inequalities.add(X(2), -I_1x1, 0, 4); // -x2 <= 0 return qp; } -///* ************************************************************************* */ +///* ************************************************************************* +///*/ TEST(QPSolver, optimizeMatlabEx) { QP qp = createTestMatlabQPEx(); QPSolver solver(qp); VectorValues initialValues; initialValues.insert(X(1), Z_1x1); initialValues.insert(X(2), Z_1x1); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); - VectorValues expectedSolution; - expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); - expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); - CHECK(assert_equal(expectedSolution, solution, 1e-7)); + VectorValues solution = solver.optimize(initialValues).first; + VectorValues expected; + expected.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); + expected.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); + CHECK(assert_equal(expected, solution, 1e-7)); } -///* ************************************************************************* */ +///* ************************************************************************* +///*/ TEST(QPSolver, optimizeMatlabExNoinitials) { QP qp = createTestMatlabQPEx(); QPSolver solver(qp); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(); - VectorValues expectedSolution; - expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); - expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); - CHECK(assert_equal(expectedSolution, solution, 1e-7)); + VectorValues solution = solver.optimize().first; + VectorValues expected; + expected.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); + expected.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); + CHECK(assert_equal(expected, solution, 1e-7)); } /* ************************************************************************* */ @@ -387,18 +364,15 @@ TEST(QPSolver, optimizeMatlabExNoinitials) { QP createTestNocedal06bookEx16_4() { QP qp; - qp.cost.push_back(JacobianFactor(X(1), I_1x1, I_1x1)); - qp.cost.push_back(JacobianFactor(X(2), I_1x1, 2.5 * I_1x1)); + qp.cost.add(X(1), I_1x1, I_1x1); + qp.cost.add(X(2), I_1x1, 2.5 * I_1x1); // Inequality constraints - qp.inequalities.push_back( - LinearInequality(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 0)); - qp.inequalities.push_back( - LinearInequality(X(1), I_1x1, X(2), 2 * I_1x1, 6, 1)); - qp.inequalities.push_back( - LinearInequality(X(1), I_1x1, X(2), -2 * I_1x1, 2, 2)); - qp.inequalities.push_back(LinearInequality(X(1), -I_1x1, 0.0, 3)); - qp.inequalities.push_back(LinearInequality(X(2), -I_1x1, 0.0, 4)); + qp.inequalities.add(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 0); + qp.inequalities.add(X(1), I_1x1, X(2), 2 * I_1x1, 6, 1); + qp.inequalities.add(X(1), I_1x1, X(2), -2 * I_1x1, 2, 2); + qp.inequalities.add(X(1), -I_1x1, 0.0, 3); + qp.inequalities.add(X(2), -I_1x1, 0.0, 4); return qp; } @@ -410,21 +384,19 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4) { initialValues.insert(X(1), (Vector(1) << 2.0).finished()); initialValues.insert(X(2), Z_1x1); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); - VectorValues expectedSolution; - expectedSolution.insert(X(1), (Vector(1) << 1.4).finished()); - expectedSolution.insert(X(2), (Vector(1) << 1.7).finished()); - CHECK(assert_equal(expectedSolution, solution, 1e-7)); + VectorValues solution = solver.optimize(initialValues).first; + VectorValues expected; + expected.insert(X(1), (Vector(1) << 1.4).finished()); + expected.insert(X(2), (Vector(1) << 1.7).finished()); + CHECK(assert_equal(expected, solution, 1e-7)); } /* ************************************************************************* */ TEST(QPSolver, failedSubproblem) { QP qp; - qp.cost.push_back(JacobianFactor(X(1), I_2x2, Z_2x1)); + qp.cost.add(X(1), I_2x2, Z_2x1); qp.cost.push_back(HessianFactor(X(1), Z_2x2, Z_2x1, 100.0)); - qp.inequalities.push_back( - LinearInequality(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0)); + qp.inequalities.add(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0); VectorValues expected; expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished()); @@ -433,8 +405,7 @@ TEST(QPSolver, failedSubproblem) { initialValues.insert(X(1), (Vector(2) << 10.0, 100.0).finished()); QPSolver solver(qp); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); + VectorValues solution = solver.optimize(initialValues).first; CHECK(assert_equal(expected, solution, 1e-7)); } @@ -442,10 +413,9 @@ TEST(QPSolver, failedSubproblem) { /* ************************************************************************* */ TEST(QPSolver, infeasibleInitial) { QP qp; - qp.cost.push_back(JacobianFactor(X(1), I_2x2, Vector::Zero(2))); + qp.cost.add(X(1), I_2x2, Vector::Zero(2)); qp.cost.push_back(HessianFactor(X(1), Z_2x2, Vector::Zero(2), 100.0)); - qp.inequalities.push_back( - LinearInequality(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0)); + qp.inequalities.add(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0); VectorValues expected; expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished()); @@ -464,4 +434,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - From dc1057f31405401d27abc2a040b041286795a395 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 30 Sep 2020 15:41:43 -0400 Subject: [PATCH 11/38] Fixed spelling mistake --- gtsam_unstable/linear/tests/testQPSolver.cpp | 22 ++++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/gtsam_unstable/linear/tests/testQPSolver.cpp b/gtsam_unstable/linear/tests/testQPSolver.cpp index d3497d2a3..285f19b3f 100644 --- a/gtsam_unstable/linear/tests/testQPSolver.cpp +++ b/gtsam_unstable/linear/tests/testQPSolver.cpp @@ -157,21 +157,21 @@ TEST(QPSolver, iterate) { currentSolution.insert(X(1), Z_1x1); currentSolution.insert(X(2), Z_1x1); - std::vector expecteds(4), expectedDuals(4); - expecteds[0].insert(X(1), kZero); - expecteds[0].insert(X(2), kZero); + std::vector expected(4), expectedDuals(4); + expected[0].insert(X(1), kZero); + expected[0].insert(X(2), kZero); expectedDuals[0].insert(1, (Vector(1) << 3).finished()); expectedDuals[0].insert(2, kZero); - expecteds[1].insert(X(1), (Vector(1) << 1.5).finished()); - expecteds[1].insert(X(2), kZero); + expected[1].insert(X(1), (Vector(1) << 1.5).finished()); + expected[1].insert(X(2), kZero); expectedDuals[1].insert(3, (Vector(1) << 1.5).finished()); - expecteds[2].insert(X(1), (Vector(1) << 1.5).finished()); - expecteds[2].insert(X(2), (Vector(1) << 0.75).finished()); + expected[2].insert(X(1), (Vector(1) << 1.5).finished()); + expected[2].insert(X(2), (Vector(1) << 0.75).finished()); - expecteds[3].insert(X(1), (Vector(1) << 1.5).finished()); - expecteds[3].insert(X(2), (Vector(1) << 0.5).finished()); + expected[3].insert(X(1), (Vector(1) << 1.5).finished()); + expected[3].insert(X(2), (Vector(1) << 0.5).finished()); auto workingSet = solver.identifyActiveConstraints(qp.inequalities, currentSolution); @@ -186,12 +186,12 @@ TEST(QPSolver, iterate) { // Forst10book do not follow exactly what we implemented from Nocedal06book. // Specifically, we do not re-identify active constraints and // do not recompute dual variables after every step!!! - // CHECK(assert_equal(expecteds[it], state.values, 1e-10)); + // CHECK(assert_equal(expected[it], state.values, 1e-10)); // CHECK(assert_equal(expectedDuals[it], state.duals, 1e-10)); it++; } - CHECK(assert_equal(expecteds[3], state.values, 1e-10)); + CHECK(assert_equal(expected[3], state.values, 1e-10)); } /* ************************************************************************* */ From 634682738e085d1a3c7d4ab2efdf9716971e1327 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Wed, 30 Sep 2020 23:25:20 -0700 Subject: [PATCH 12/38] renaming variables --- .../examples/TranslationAveragingExample.py | 116 +++++++++--------- 1 file changed, 60 insertions(+), 56 deletions(-) diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index 0f1314645..a374dc630 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -22,99 +22,103 @@ import numpy as np import gtsam from gtsam.examples import SFMdata +# Hyperparameters for 1dsfm, values used from Kyle Wilson's code. +MAX_1DSFM_PROJECTION_DIRECTIONS = 48 +OUTLIER_WEIGHT_THRESHOLD = 0.1 + def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]: - """"Returns data from SfMData.createPoses(). This contains global rotations and unit translations directions.""" + """"Returns global rotations and unit translation directions between 8 cameras + that lie on a circle and face the center. The poses of 8 cameras are obtained from SFMdata + and the unit translations directions between some camera pairs are computed from their + global translations. """ # Using toy dataset in SfMdata for example. - poses = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) - # Rotations of the cameras in the world frame - wRc. - rotations = gtsam.Values() - # Normalized translation directions for pairs of cameras - from first camera to second, - # in the coordinate frame of the first camera. - translation_directions = [] - for i in range(0, len(poses) - 2): + wTc = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) + # Rotations of the cameras in the world frame. + wRc_values = gtsam.Values() + # Normalized translation directions from camera i to camera j + # in the coordinate frame of camera i. + i_iZj_list = [] + for i in range(0, len(wTc) - 2): # Add the rotation. - rotations.insert(i, poses[i].rotation()) + wRc_values.insert(i, wTc[i].rotation()) # Create unit translation measurements with next two poses. for j in range(i + 1, i + 3): - i_Z_j = gtsam.Unit3(poses[i].rotation().unrotate( - poses[j].translation() - poses[i].translation())) - translation_directions.append(gtsam.BinaryMeasurementUnit3( - i, j, i_Z_j, gtsam.noiseModel.Isotropic.Sigma(3, 0.01))) + i_iZj = gtsam.Unit3(wTc[i].rotation().unrotate( + wTc[j].translation() - wTc[i].translation())) + i_iZj_list.append(gtsam.BinaryMeasurementUnit3( + i, j, i_iZj, gtsam.noiseModel.Isotropic.Sigma(3, 0.01))) # Add the last two rotations. - rotations.insert(len(poses) - 1, poses[-1].rotation()) - rotations.insert(len(poses) - 2, poses[-2].rotation()) - return (rotations, translation_directions) + wRc_values.insert(len(wTc) - 1, wTc[-1].rotation()) + wRc_values.insert(len(wTc) - 2, wTc[-2].rotation()) + return (wRc_values, i_iZj_list) -def estimate_poses(relative_translations: gtsam.BinaryMeasurementsUnit3, - rotations: gtsam.Values) -> gtsam.Values: - """Estimate poses given rotations normalized translation directions between cameras. +def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, + wRc_values: gtsam.Values) -> gtsam.Values: + """Estimate poses given rotations and normalized translation directions between cameras. Args: - relative_translations -- List of normalized translation directions between camera pairs, each direction - is from the first camera to the second, in the frame of the first camera. - rotations -- Rotations of the cameras in the world frame. + iZj_list -- List of normalized translation direction measurements between camera pairs, + Z here refers to measurements. The measurements are of camera j with reference + to camera i (iZj), in camera i's coordinate frame (i_). iZj represents a unit + vector to j in i's frame and is not a transformation. + wRc_values -- Rotations of the cameras in the world frame. Returns: Values -- Estimated poses. """ - # Some hyperparameters, values used from 1dsfm. - max_1dsfm_projection_directions = 48 - outlier_weight_threshold = 0.1 + # Convert the translation direction measurements to world frame using the rotations. + w_iZj_list = gtsam.BinaryMeasurementsUnit3() + for i_iZj in i_iZj_list: + w_iZj = gtsam.Unit3(wRc_values.atRot3(i_iZj.key1()) + .rotate(i_iZj.measured().point3())) + w_iZj_list.append(gtsam.BinaryMeasurementUnit3( + i_iZj.key1(), i_iZj.key2(), w_iZj, i_iZj.noiseModel())) - # Convert the translation directions to global frame using the rotations. - w_relative_translations = gtsam.BinaryMeasurementsUnit3() - for relative_translation in relative_translations: - w_relative_translation = gtsam.Unit3(rotations.atRot3(relative_translation.key1()) - .rotate(relative_translation.measured().point3())) - w_relative_translations.append(gtsam.BinaryMeasurementUnit3(relative_translation.key1(), - relative_translation.key2(), - w_relative_translation, - relative_translation.noiseModel())) - - # Indices of measurements that are to be used as projection directions. These are randomly chosen. - sampled_indices = np.random.choice(len(w_relative_translations), min( - max_1dsfm_projection_directions, len(w_relative_translations)), replace=False) + # Indices of measurements that are to be used as projection directions. + # These are randomly chosen. + sampled_indices = np.random.choice(len(w_iZj_list), min( + MAX_1DSFM_PROJECTION_DIRECTIONS, len(w_iZj_list)), replace=False) # Sample projection directions from the measurements. - projection_directions = [ - w_relative_translations[idx].measured() for idx in sampled_indices] + projection_directions = [w_iZj_list[idx].measured() + for idx in sampled_indices] outlier_weights = [] # Find the outlier weights for each direction using MFAS. for direction in projection_directions: - algorithm = gtsam.MFAS(w_relative_translations, direction) + algorithm = gtsam.MFAS(w_iZj_list, direction) outlier_weights.append(algorithm.computeOutlierWeights()) - # Compute average of outlier weights. Each outlier weight is a map from a pair of Keys (camera IDs) to a weight, - # where weights are proportional to the probability of the edge being an outlier. + # Compute average of outlier weights. Each outlier weight is a map from a pair of Keys + # (camera IDs) to a weight, where weights are proportional to the probability of the edge + # being an outlier. avg_outlier_weights = defaultdict(float) for outlier_weight_dict in outlier_weights: for keypair, weight in outlier_weight_dict.items(): avg_outlier_weights[keypair] += weight / len(outlier_weights) # Remove w_relative_tranlsations that have weight greater than threshold, these are outliers. - inlier_w_relative_translations = gtsam.BinaryMeasurementsUnit3() - [inlier_w_relative_translations.append(Z) for Z in w_relative_translations - if avg_outlier_weights[(Z.key1(), Z.key2())] < outlier_weight_threshold] + w_iZj_inliers = gtsam.BinaryMeasurementsUnit3() + [w_iZj_inliers.append(Z) for Z in w_iZj_list + if avg_outlier_weights[(Z.key1(), Z.key2())] < OUTLIER_WEIGHT_THRESHOLD] # Run the optimizer to obtain translations for normalized directions. - w_translations = gtsam.TranslationRecovery( - inlier_w_relative_translations).run() + wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run() - poses = gtsam.Values() - for key in rotations.keys(): - poses.insert(key, gtsam.Pose3( - rotations.atRot3(key), w_translations.atPoint3(key))) - return poses + wTc_values = gtsam.Values() + for key in wRc_values.keys(): + wTc_values.insert(key, gtsam.Pose3( + wRc_values.atRot3(key), wtc_values.atPoint3(key))) + return wTc_values def main(): - rotations, translation_directions = get_data() - poses = estimate_poses(translation_directions, rotations) + wRc_values, w_iZj_list = get_data() + wTc_values = estimate_poses(w_iZj_list, wRc_values) print("**** Translation averaging output ****") - print(poses) + print(wTc_values) print("**************************************") From b9174ae0f4e4a6835151da8d1751beb2db5174ae Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 1 Oct 2020 11:24:26 -0400 Subject: [PATCH 13/38] Changed name to avoid template confusion in VC 2016 --- gtsam/geometry/Point3.cpp | 12 +++++------- gtsam/geometry/Point3.h | 4 ++-- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/gtsam/geometry/Point3.cpp b/gtsam/geometry/Point3.cpp index ce4ceee89..8665f7716 100644 --- a/gtsam/geometry/Point3.cpp +++ b/gtsam/geometry/Point3.cpp @@ -75,17 +75,15 @@ double dot(const Point3 &p, const Point3 &q, OptionalJacobian<1, 3> H1, return p.x() * q.x() + p.y() * q.y() + p.z() * q.z(); } -Point3Pair mean(const std::vector &abPointPairs) { +Point3Pair means(const std::vector &abPointPairs) { const size_t n = abPointPairs.size(); - Point3 aCentroid(0, 0, 0), bCentroid(0, 0, 0); + Point3 aSum(0, 0, 0), bSum(0, 0, 0); for (const Point3Pair &abPair : abPointPairs) { - aCentroid += abPair.first; - bCentroid += abPair.second; + aSum += abPair.first; + bSum += abPair.second; } const double f = 1.0 / n; - aCentroid *= f; - bCentroid *= f; - return make_pair(aCentroid, bCentroid); + return {aSum * f, bSum * f}; } /* ************************************************************************* */ diff --git a/gtsam/geometry/Point3.h b/gtsam/geometry/Point3.h index 7f58497e9..33b5836f8 100644 --- a/gtsam/geometry/Point3.h +++ b/gtsam/geometry/Point3.h @@ -67,8 +67,8 @@ GTSAM_EXPORT Point3 mean(const CONTAINER& points) { return sum / points.size(); } -/// mean of Point3 pair -GTSAM_EXPORT Point3Pair mean(const std::vector& abPointPairs); +/// mean of Point3 pair +GTSAM_EXPORT Point3Pair means(const std::vector &abPointPairs); template struct Range; From eb4f5288e9b31dd749d8173dd287eac2f1a94a71 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 1 Oct 2020 12:01:33 -0400 Subject: [PATCH 14/38] Clean up code and tests and use "means" --- gtsam/geometry/Pose3.cpp | 65 ++++++++++++------------- gtsam/geometry/tests/testPoint3.cpp | 16 +++--- gtsam_unstable/geometry/Similarity3.cpp | 49 +++++++++++-------- 3 files changed, 69 insertions(+), 61 deletions(-) diff --git a/gtsam/geometry/Pose3.cpp b/gtsam/geometry/Pose3.cpp index ea822b796..22849d4f5 100644 --- a/gtsam/geometry/Pose3.cpp +++ b/gtsam/geometry/Pose3.cpp @@ -24,10 +24,11 @@ #include #include -using namespace std; - namespace gtsam { +using std::vector; +using Point3Pairs = vector; + /** instantiate concept checks */ GTSAM_CONCEPT_POSE_INST(Pose3); @@ -212,18 +213,20 @@ Matrix3 Pose3::ComputeQforExpmapDerivative(const Vector6& xi, double nearZeroThr #else // The closed-form formula in Barfoot14tro eq. (102) double phi = w.norm(); - if (std::abs(phi)>nearZeroThreshold) { - const double sinPhi = sin(phi), cosPhi = cos(phi); - const double phi2 = phi * phi, phi3 = phi2 * phi, phi4 = phi3 * phi, phi5 = phi4 * phi; + const Matrix3 WVW = W * V * W; + if (std::abs(phi) > nearZeroThreshold) { + const double s = sin(phi), c = cos(phi); + const double phi2 = phi * phi, phi3 = phi2 * phi, phi4 = phi3 * phi, + phi5 = phi4 * phi; // Invert the sign of odd-order terms to have the right Jacobian - Q = -0.5*V + (phi-sinPhi)/phi3*(W*V + V*W - W*V*W) - + (1-phi2/2-cosPhi)/phi4*(W*W*V + V*W*W - 3*W*V*W) - - 0.5*((1-phi2/2-cosPhi)/phi4 - 3*(phi-sinPhi-phi3/6.)/phi5)*(W*V*W*W + W*W*V*W); - } - else { - Q = -0.5*V + 1./6.*(W*V + V*W - W*V*W) - - 1./24.*(W*W*V + V*W*W - 3*W*V*W) - + 1./120.*(W*V*W*W + W*W*V*W); + Q = -0.5 * V + (phi - s) / phi3 * (W * V + V * W - WVW) + + (1 - phi2 / 2 - c) / phi4 * (W * W * V + V * W * W - 3 * WVW) - + 0.5 * ((1 - phi2 / 2 - c) / phi4 - 3 * (phi - s - phi3 / 6.) / phi5) * + (WVW * W + W * WVW); + } else { + Q = -0.5 * V + 1. / 6. * (W * V + V * W - WVW) - + 1. / 24. * (W * W * V + V * W * W - 3 * WVW) + + 1. / 120. * (WVW * W + W * WVW); } #endif @@ -381,39 +384,33 @@ Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself, } /* ************************************************************************* */ -boost::optional Pose3::Align(const std::vector& abPointPairs) { +boost::optional Pose3::Align(const Point3Pairs &abPointPairs) { const size_t n = abPointPairs.size(); - if (n < 3) - return boost::none; // we need at least three pairs + if (n < 3) { + return boost::none; // we need at least three pairs + } // calculate centroids - Point3 aCentroid(0,0,0), bCentroid(0,0,0); - for(const Point3Pair& abPair: abPointPairs) { - aCentroid += abPair.first; - bCentroid += abPair.second; - } - double f = 1.0 / n; - aCentroid *= f; - bCentroid *= f; + const auto centroids = means(abPointPairs); // Add to form H matrix Matrix3 H = Z_3x3; - for(const Point3Pair& abPair: abPointPairs) { - Point3 da = abPair.first - aCentroid; - Point3 db = abPair.second - bCentroid; + for (const Point3Pair &abPair : abPointPairs) { + const Point3 da = abPair.first - centroids.first; + const Point3 db = abPair.second - centroids.second; H += da * db.transpose(); - } + } // ClosestTo finds rotation matrix closest to H in Frobenius sense - Rot3 aRb = Rot3::ClosestTo(H); - Point3 aTb = Point3(aCentroid) - aRb * Point3(bCentroid); + const Rot3 aRb = Rot3::ClosestTo(H); + const Point3 aTb = centroids.first - aRb * centroids.second; return Pose3(aRb, aTb); } -boost::optional align(const vector& baPointPairs) { - vector abPointPairs; - for (const Point3Pair& baPair: baPointPairs) { - abPointPairs.push_back(make_pair(baPair.second, baPair.first)); +boost::optional align(const Point3Pairs &baPointPairs) { + Point3Pairs abPointPairs; + for (const Point3Pair &baPair : baPointPairs) { + abPointPairs.emplace_back(baPair.second, baPair.first); } return Pose3::Align(abPointPairs); } diff --git a/gtsam/geometry/tests/testPoint3.cpp b/gtsam/geometry/tests/testPoint3.cpp index a481a8072..a655011a0 100644 --- a/gtsam/geometry/tests/testPoint3.cpp +++ b/gtsam/geometry/tests/testPoint3.cpp @@ -166,22 +166,22 @@ TEST (Point3, normalize) { //************************************************************************* TEST(Point3, mean) { - Point3 expected_a_mean(2, 2, 2); + Point3 expected(2, 2, 2); Point3 a1(0, 0, 0), a2(1, 2, 3), a3(5, 4, 3); std::vector a_points{a1, a2, a3}; - Point3 actual_a_mean = mean(a_points); - EXPECT(assert_equal(expected_a_mean, actual_a_mean)); + Point3 actual = mean(a_points); + EXPECT(assert_equal(expected, actual)); } TEST(Point3, mean_pair) { Point3 a_mean(2, 2, 2), b_mean(-1, 1, 0); - Point3Pair expected_mean = std::make_pair(a_mean, b_mean); + Point3Pair expected = std::make_pair(a_mean, b_mean); Point3 a1(0, 0, 0), a2(1, 2, 3), a3(5, 4, 3); Point3 b1(-1, 0, 0), b2(-2, 4, 0), b3(0, -1, 0); - std::vector point_pairs{{a1,b1},{a2,b2},{a3,b3}}; - Point3Pair actual_mean = mean(point_pairs); - EXPECT(assert_equal(expected_mean.first, actual_mean.first)); - EXPECT(assert_equal(expected_mean.second, actual_mean.second)); + std::vector point_pairs{{a1, b1}, {a2, b2}, {a3, b3}}; + Point3Pair actual = means(point_pairs); + EXPECT(assert_equal(expected.first, actual.first)); + EXPECT(assert_equal(expected.second, actual.second)); } //************************************************************************* diff --git a/gtsam_unstable/geometry/Similarity3.cpp b/gtsam_unstable/geometry/Similarity3.cpp index b2d7dc080..819c51fee 100644 --- a/gtsam_unstable/geometry/Similarity3.cpp +++ b/gtsam_unstable/geometry/Similarity3.cpp @@ -23,10 +23,14 @@ namespace gtsam { +using std::vector; +using PointPairs = vector; + namespace { /// Subtract centroids from point pairs. -static std::vector subtractCentroids(const std::vector& abPointPairs, const Point3Pair& centroids) { - std::vector d_abPointPairs; +static PointPairs subtractCentroids(const PointPairs &abPointPairs, + const Point3Pair ¢roids) { + PointPairs d_abPointPairs; for (const Point3Pair& abPair : abPointPairs) { Point3 da = abPair.first - centroids.first; Point3 db = abPair.second - centroids.second; @@ -36,7 +40,8 @@ static std::vector subtractCentroids(const std::vector& } /// Form inner products x and y and calculate scale. -static const double calculateScale(const std::vector& d_abPointPairs, const Rot3& aRb) { +static const double calculateScale(const PointPairs &d_abPointPairs, + const Rot3 &aRb) { double x = 0, y = 0; Point3 da, db; for (const Point3Pair& d_abPair : d_abPointPairs) { @@ -50,7 +55,7 @@ static const double calculateScale(const std::vector& d_abPointPairs } /// Form outer product H. -static Matrix3 calculateH(const std::vector& d_abPointPairs) { +static Matrix3 calculateH(const PointPairs &d_abPointPairs) { Matrix3 H = Z_3x3; for (const Point3Pair& d_abPair : d_abPointPairs) { H += d_abPair.first * d_abPair.second.transpose(); @@ -59,7 +64,8 @@ static Matrix3 calculateH(const std::vector& d_abPointPairs) { } /// This method estimates the similarity transform from differences point pairs, given a known or estimated rotation and point centroids. -static Similarity3 align(const std::vector& d_abPointPairs, const Rot3& aRb, const Point3Pair& centroids) { +static Similarity3 align(const PointPairs &d_abPointPairs, const Rot3 &aRb, + const Point3Pair ¢roids) { const double s = calculateScale(d_abPointPairs, aRb); const Point3 aTb = (centroids.first - s * (aRb * centroids.second)) / s; return Similarity3(aRb, aTb, s); @@ -67,8 +73,9 @@ static Similarity3 align(const std::vector& d_abPointPairs, const Ro /// This method estimates the similarity transform from point pairs, given a known or estimated rotation. // Refer to: http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf Chapter 3 -static Similarity3 alignGivenR(const std::vector& abPointPairs, const Rot3& aRb) { - auto centroids = mean(abPointPairs); +static Similarity3 alignGivenR(const PointPairs &abPointPairs, + const Rot3 &aRb) { + auto centroids = means(abPointPairs); auto d_abPointPairs = subtractCentroids(abPointPairs, centroids); return align(d_abPointPairs, aRb, centroids); } @@ -147,10 +154,12 @@ Point3 Similarity3::operator*(const Point3& p) const { return transformFrom(p); } -Similarity3 Similarity3::Align(const std::vector& abPointPairs) { - // Refer to: http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf Chapter 3 - if (abPointPairs.size() < 3) throw std::runtime_error("input should have at least 3 pairs of points"); - auto centroids = mean(abPointPairs); +Similarity3 Similarity3::Align(const PointPairs &abPointPairs) { + // Refer to Chapter 3 of + // http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf + if (abPointPairs.size() < 3) + throw std::runtime_error("input should have at least 3 pairs of points"); + auto centroids = means(abPointPairs); auto d_abPointPairs = subtractCentroids(abPointPairs, centroids); Matrix3 H = calculateH(d_abPointPairs); // ClosestTo finds rotation matrix closest to H in Frobenius sense @@ -158,17 +167,18 @@ Similarity3 Similarity3::Align(const std::vector& abPointPairs) { return align(d_abPointPairs, aRb, centroids); } -Similarity3 Similarity3::Align(const std::vector& abPosePairs) { +Similarity3 Similarity3::Align(const vector &abPosePairs) { const size_t n = abPosePairs.size(); - if (n < 2) throw std::runtime_error("input should have at least 2 pairs of poses"); + if (n < 2) + throw std::runtime_error("input should have at least 2 pairs of poses"); // calculate rotation vector rotations; - vector abPointPairs; + PointPairs abPointPairs; rotations.reserve(n); abPointPairs.reserve(n); Pose3 wTa, wTb; - for (const Pose3Pair& abPair : abPosePairs) { + for (const Pose3Pair &abPair : abPosePairs) { std::tie(wTa, wTb) = abPair; rotations.emplace_back(wTa.rotation().compose(wTb.rotation().inverse())); abPointPairs.emplace_back(wTa.translation(), wTb.translation()); @@ -178,7 +188,7 @@ Similarity3 Similarity3::Align(const std::vector& abPosePairs) { return alignGivenR(abPointPairs, aRb); } -Matrix4 Similarity3::wedge(const Vector7& xi) { +Matrix4 Similarity3::wedge(const Vector7 &xi) { // http://www.ethaneade.org/latex2html/lie/node29.html const auto w = xi.head<3>(); const auto u = xi.segment<3>(3); @@ -217,12 +227,13 @@ Matrix3 Similarity3::GetV(Vector3 w, double lambda) { W = 1.0 / 24.0 - theta2 / 720.0; } const double lambda2 = lambda * lambda, lambda3 = lambda2 * lambda; + const double expMinLambda = exp(-lambda); double A, alpha = 0.0, beta, mu; if (lambda2 > 1e-9) { - A = (1.0 - exp(-lambda)) / lambda; + A = (1.0 - expMinLambda) / lambda; alpha = 1.0 / (1.0 + theta2 / lambda2); - beta = (exp(-lambda) - 1 + lambda) / lambda2; - mu = (1 - lambda + (0.5 * lambda2) - exp(-lambda)) / lambda3; + beta = (expMinLambda - 1 + lambda) / lambda2; + mu = (1 - lambda + (0.5 * lambda2) - expMinLambda) / lambda3; } else { A = 1.0 - lambda / 2.0 + lambda2 / 6.0; beta = 0.5 - lambda / 6.0 + lambda2 / 24.0 - lambda3 / 120.0; From b30448733c66ec8194fa49e376834b1f4eb711d3 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 1 Oct 2020 19:56:35 -0400 Subject: [PATCH 15/38] remove all Cython references --- .github/scripts/python.sh | 56 +++------------- .github/workflows/build-python.yml | 2 - .github/workflows/trigger-python.yml | 6 +- .gitignore | 6 -- INSTALL.md | 4 +- cmake/CMakeLists.txt | 1 - cmake/FindCython.cmake | 81 ----------------------- docker/ubuntu-gtsam-python/Dockerfile | 8 +-- docker/ubuntu-gtsam/Dockerfile | 1 - gtsam/gtsam.i | 8 +-- gtsam/navigation/AHRSFactor.h | 2 +- gtsam/navigation/CombinedImuFactor.h | 2 +- gtsam/navigation/ImuFactor.h | 2 +- gtsam/nonlinear/Marginals.h | 4 +- gtsam_extra.cmake.in | 5 -- python/gtsam/tests/test_JacobianFactor.py | 2 +- 16 files changed, 27 insertions(+), 163 deletions(-) delete mode 100644 cmake/FindCython.cmake diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh index 6948cc385..a71e14c97 100644 --- a/.github/scripts/python.sh +++ b/.github/scripts/python.sh @@ -43,11 +43,6 @@ if [ -z ${PYTHON_VERSION+x} ]; then exit 127 fi -if [ -z ${WRAPPER+x} ]; then - echo "Please provide the wrapper to build!" - exit 126 -fi - PYTHON="python${PYTHON_VERSION}" if [[ $(uname) == "Darwin" ]]; then @@ -61,25 +56,11 @@ PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb -case $WRAPPER in -"cython") - BUILD_CYTHON="ON" - BUILD_PYBIND="OFF" - TYPEDEF_POINTS_TO_VECTORS="OFF" - sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/cython/requirements.txt - ;; -"pybind") - BUILD_CYTHON="OFF" - BUILD_PYBIND="ON" - TYPEDEF_POINTS_TO_VECTORS="ON" +BUILD_PYBIND="ON" +TYPEDEF_POINTS_TO_VECTORS="ON" - sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt - ;; -*) - exit 126 - ;; -esac +sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt mkdir $GITHUB_WORKSPACE/build cd $GITHUB_WORKSPACE/build @@ -90,7 +71,6 @@ cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \ -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DGTSAM_INSTALL_CYTHON_TOOLBOX=${BUILD_CYTHON} \ -DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \ -DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \ -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ @@ -98,30 +78,10 @@ cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \ -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \ -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install -make -j$(nproc) install & +make -j$(nproc) install -while ps -p $! > /dev/null -do - sleep 60 - now=$(date +%s) - printf "%d seconds have elapsed\n" $(( (now - start) )) -done -case $WRAPPER in -"cython") - cd $GITHUB_WORKSPACE/build/cython - $PYTHON setup.py install --user --prefix= - cd $GITHUB_WORKSPACE/build/cython/gtsam/tests - $PYTHON -m unittest discover - ;; -"pybind") - cd $GITHUB_WORKSPACE/build/python - $PYTHON setup.py install --user --prefix= - cd $GITHUB_WORKSPACE/python/gtsam/tests - $PYTHON -m unittest discover - ;; -*) - echo "THIS SHOULD NEVER HAPPEN!" - exit 125 - ;; -esac \ No newline at end of file +cd $GITHUB_WORKSPACE/build/python +$PYTHON setup.py install --user --prefix= +cd $GITHUB_WORKSPACE/python/gtsam/tests +$PYTHON -m unittest discover diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index dc03ec6c9..b8d6bc311 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -12,7 +12,6 @@ jobs: CTEST_PARALLEL_LEVEL: 2 CMAKE_BUILD_TYPE: ${{ matrix.build_type }} PYTHON_VERSION: ${{ matrix.python_version }} - WRAPPER: ${{ matrix.wrapper }} strategy: fail-fast: false matrix: @@ -28,7 +27,6 @@ jobs: build_type: [Debug, Release] python_version: [3] - wrapper: [pybind] include: - name: ubuntu-18.04-gcc-5 os: ubuntu-18.04 diff --git a/.github/workflows/trigger-python.yml b/.github/workflows/trigger-python.yml index 94527e732..1e8981d99 100644 --- a/.github/workflows/trigger-python.yml +++ b/.github/workflows/trigger-python.yml @@ -1,11 +1,11 @@ -# This triggers Cython builds on `gtsam-manylinux-build` +# This triggers Python builds on `gtsam-manylinux-build` name: Trigger Python Builds on: push: branches: - develop jobs: - triggerCython: + triggerPython: runs-on: ubuntu-latest steps: - name: Repository Dispatch @@ -13,5 +13,5 @@ jobs: with: token: ${{ secrets.PYTHON_CI_REPO_ACCESS_TOKEN }} repository: borglab/gtsam-manylinux-build - event-type: cython-wrapper + event-type: python-wrapper client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' diff --git a/.gitignore b/.gitignore index c2d6ce60f..cde059767 100644 --- a/.gitignore +++ b/.gitignore @@ -9,12 +9,6 @@ *.txt.user *.txt.user.6d59f0c *.pydevproject -cython/venv -cython/gtsam.cpp -cython/gtsam.cpython-35m-darwin.so -cython/gtsam.pyx -cython/gtsam.so -cython/gtsam_wrapper.pxd .vscode .env /.vs/ diff --git a/INSTALL.md b/INSTALL.md index cf66766a1..3dbc3a850 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -173,7 +173,7 @@ NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against g Intel has a guide for installing MKL on Linux through APT repositories at . After following the instructions, add the following to your `~/.bashrc` (and afterwards, open a new terminal before compiling GTSAM): -`LD_PRELOAD` need only be set if you are building the cython wrapper to use GTSAM from python. +`LD_PRELOAD` need only be set if you are building the python wrapper to use GTSAM from python. ```sh source /opt/intel/mkl/bin/mklvars.sh intel64 export LD_PRELOAD="$LD_PRELOAD:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so" @@ -190,6 +190,6 @@ Failing to specify `LD_PRELOAD` may lead to errors such as: `ImportError: /opt/intel/mkl/lib/intel64/libmkl_vml_avx2.so: undefined symbol: mkl_serv_getenv` or `Intel MKL FATAL ERROR: Cannot load libmkl_avx2.so or libmkl_def.so.` -when importing GTSAM using the cython wrapper in python. +when importing GTSAM using the python wrapper. diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index 9d9ecd48b..451ca38a4 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt @@ -19,7 +19,6 @@ install(FILES GtsamMatlabWrap.cmake GtsamTesting.cmake GtsamPrinting.cmake - FindCython.cmake FindNumPy.cmake README.html DESTINATION "${SCRIPT_INSTALL_DIR}/GTSAMCMakeTools") diff --git a/cmake/FindCython.cmake b/cmake/FindCython.cmake deleted file mode 100644 index e5a32c30d..000000000 --- a/cmake/FindCython.cmake +++ /dev/null @@ -1,81 +0,0 @@ -# Modifed from: https://github.com/nest/nest-simulator/blob/master/cmake/FindCython.cmake -# -# Find the Cython compiler. -# -# This code sets the following variables: -# -# CYTHON_FOUND -# CYTHON_PATH -# CYTHON_EXECUTABLE -# CYTHON_VERSION -# -# See also UseCython.cmake - -#============================================================================= -# Copyright 2011 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Use the Cython executable that lives next to the Python executable -# if it is a local installation. -if(GTSAM_PYTHON_VERSION STREQUAL "Default") - find_package(PythonInterp) -else() - find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT) -endif() - -if ( PYTHONINTERP_FOUND ) - execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import Cython; print(Cython.__path__[0])" - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE CYTHON_PATH - OUTPUT_STRIP_TRAILING_WHITESPACE - ) -endif () - -# RESULT=0 means ok -if ( NOT RESULT ) - get_filename_component( _python_path ${PYTHON_EXECUTABLE} PATH ) - find_program( CYTHON_EXECUTABLE - NAMES cython cython.bat cython3 - HINTS ${_python_path} - ) -endif () - -# RESULT=0 means ok -if ( NOT RESULT ) - execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import Cython; print(Cython.__version__)" - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE CYTHON_VAR_OUTPUT - ERROR_VARIABLE CYTHON_VAR_OUTPUT - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - if ( RESULT EQUAL 0 ) - string( REGEX REPLACE ".* ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1" - CYTHON_VERSION "${CYTHON_VAR_OUTPUT}" ) - endif () -endif () - -include( FindPackageHandleStandardArgs ) -find_package_handle_standard_args( Cython - FOUND_VAR - CYTHON_FOUND - REQUIRED_VARS - CYTHON_PATH - CYTHON_EXECUTABLE - VERSION_VAR - CYTHON_VERSION - ) - diff --git a/docker/ubuntu-gtsam-python/Dockerfile b/docker/ubuntu-gtsam-python/Dockerfile index c733ceb19..ce5d8fdca 100644 --- a/docker/ubuntu-gtsam-python/Dockerfile +++ b/docker/ubuntu-gtsam-python/Dockerfile @@ -7,9 +7,9 @@ FROM dellaert/ubuntu-gtsam:bionic RUN apt-get install -y python3-pip python3-dev # Install python wrapper requirements -RUN python3 -m pip install -U -r /usr/src/gtsam/cython/requirements.txt +RUN python3 -m pip install -U -r /usr/src/gtsam/python/requirements.txt -# Run cmake again, now with cython toolbox on +# Run cmake again, now with python toolbox on WORKDIR /usr/src/gtsam/build RUN cmake \ -DCMAKE_BUILD_TYPE=Release \ @@ -17,7 +17,7 @@ RUN cmake \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ -DGTSAM_BUILD_TIMING_ALWAYS=OFF \ -DGTSAM_BUILD_TESTS=OFF \ - -DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \ + -DGTSAM_BUILD_PYTHON=ON \ -DGTSAM_PYTHON_VERSION=3\ .. @@ -25,7 +25,7 @@ RUN cmake \ RUN make -j4 install && make clean # Needed to run python wrapper: -RUN echo 'export PYTHONPATH=/usr/local/cython/:$PYTHONPATH' >> /root/.bashrc +RUN echo 'export PYTHONPATH=/usr/local/python/:$PYTHONPATH' >> /root/.bashrc # Run bash CMD ["bash"] diff --git a/docker/ubuntu-gtsam/Dockerfile b/docker/ubuntu-gtsam/Dockerfile index 187c76314..f2b476f15 100644 --- a/docker/ubuntu-gtsam/Dockerfile +++ b/docker/ubuntu-gtsam/Dockerfile @@ -23,7 +23,6 @@ RUN cmake \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ -DGTSAM_BUILD_TIMING_ALWAYS=OFF \ -DGTSAM_BUILD_TESTS=OFF \ - -DGTSAM_INSTALL_CYTHON_TOOLBOX=OFF \ .. # Build diff --git a/gtsam/gtsam.i b/gtsam/gtsam.i index a172df315..e4652f741 100644 --- a/gtsam/gtsam.i +++ b/gtsam/gtsam.i @@ -93,9 +93,9 @@ * - Add "void serializable()" to a class if you only want the class to be serialized as a * part of a container (such as noisemodel). This version does not require a publicly * accessible default constructor. - * Forward declarations and class definitions for Cython: - * - Need to specify the base class (both this forward class and base class are declared in an external cython header) - * This is so Cython can generate proper inheritance. + * Forward declarations and class definitions for Pybind: + * - Need to specify the base class (both this forward class and base class are declared in an external Pybind header) + * This is so Pybind can generate proper inheritance. * Example when wrapping a gtsam-based project: * // forward declarations * virtual class gtsam::NonlinearFactor @@ -104,7 +104,7 @@ * #include * virtual class MyFactor : gtsam::NoiseModelFactor {...}; * - *DO NOT* re-define overriden function already declared in the external (forward-declared) base class - * - This will cause an ambiguity problem in Cython pxd header file + * - This will cause an ambiguity problem in Pybind header file */ /** diff --git a/gtsam/navigation/AHRSFactor.h b/gtsam/navigation/AHRSFactor.h index ec1a07f65..34626fcf6 100644 --- a/gtsam/navigation/AHRSFactor.h +++ b/gtsam/navigation/AHRSFactor.h @@ -42,7 +42,7 @@ class GTSAM_EXPORT PreintegratedAhrsMeasurements : public PreintegratedRotation public: - /// Default constructor, only for serialization and Cython wrapper + /// Default constructor, only for serialization and wrappers PreintegratedAhrsMeasurements() {} /** diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index 387353136..efca25bff 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -145,7 +145,7 @@ public: /// @name Constructors /// @{ - /// Default constructor only for serialization and Cython wrapper + /// Default constructor only for serialization and wrappers PreintegratedCombinedMeasurements() { preintMeasCov_.setZero(); } diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index 51df3f24a..cd9c591f1 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -80,7 +80,7 @@ protected: public: - /// Default constructor for serialization and Cython wrapper + /// Default constructor for serialization and wrappers PreintegratedImuMeasurements() { preintMeasCov_.setZero(); } diff --git a/gtsam/nonlinear/Marginals.h b/gtsam/nonlinear/Marginals.h index 4e201cc38..9935bafdd 100644 --- a/gtsam/nonlinear/Marginals.h +++ b/gtsam/nonlinear/Marginals.h @@ -48,7 +48,7 @@ protected: public: - /// Default constructor only for Cython wrapper + /// Default constructor only for wrappers Marginals(){} /** Construct a marginals class from a nonlinear factor graph. @@ -156,7 +156,7 @@ protected: FastMap indices_; public: - /// Default constructor only for Cython wrapper + /// Default constructor only for wrappers JointMarginal() {} /** Access a block, corresponding to a pair of variables, of the joint diff --git a/gtsam_extra.cmake.in b/gtsam_extra.cmake.in index 01ac00b37..44ba36bd6 100644 --- a/gtsam_extra.cmake.in +++ b/gtsam_extra.cmake.in @@ -7,8 +7,3 @@ set (GTSAM_VERSION_STRING "@GTSAM_VERSION_STRING@") set (GTSAM_USE_TBB @GTSAM_USE_TBB@) set (GTSAM_DEFAULT_ALLOCATOR @GTSAM_DEFAULT_ALLOCATOR@) - -if("@GTSAM_INSTALL_CYTHON_TOOLBOX@") - list(APPEND GTSAM_CYTHON_INSTALL_PATH "@GTSAM_CYTHON_INSTALL_PATH@") - list(APPEND GTSAM_EIGENCY_INSTALL_PATH "@GTSAM_EIGENCY_INSTALL_PATH@") -endif() diff --git a/python/gtsam/tests/test_JacobianFactor.py b/python/gtsam/tests/test_JacobianFactor.py index 6e049ed47..79f512f60 100644 --- a/python/gtsam/tests/test_JacobianFactor.py +++ b/python/gtsam/tests/test_JacobianFactor.py @@ -19,7 +19,7 @@ from gtsam.utils.test_case import GtsamTestCase class TestJacobianFactor(GtsamTestCase): def test_eliminate(self): - # Recommended way to specify a matrix (see cython/README) + # Recommended way to specify a matrix (see python/README) Ax2 = np.array( [[-5., 0.], [+0., -5.], From cef937e09d8505068ad503ab7a6248a194190536 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 1 Oct 2020 20:21:14 -0400 Subject: [PATCH 16/38] Update Point3.h Fix doxygen comment --- gtsam/geometry/Point3.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/geometry/Point3.h b/gtsam/geometry/Point3.h index b425af8a4..57188fc5e 100644 --- a/gtsam/geometry/Point3.h +++ b/gtsam/geometry/Point3.h @@ -68,7 +68,7 @@ GTSAM_EXPORT Point3 mean(const CONTAINER& points) { return sum / points.size(); } -/// mean of Point3 pair +/// Calculate the two means of a set of Point3 pairs GTSAM_EXPORT Point3Pair means(const std::vector &abPointPairs); template From a490017669578711140d48f5aaa22b77ce7efc9d Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Thu, 1 Oct 2020 22:19:17 -0700 Subject: [PATCH 17/38] outlier rejection in separate fn and other readability changes --- .../examples/TranslationAveragingExample.py | 104 ++++++++++-------- 1 file changed, 61 insertions(+), 43 deletions(-) diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index a374dc630..d843f8702 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -15,7 +15,7 @@ Date: September 2020 """ from collections import defaultdict -from typing import Tuple, List +from typing import List, Tuple import numpy as np @@ -28,59 +28,48 @@ OUTLIER_WEIGHT_THRESHOLD = 0.1 def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]: - """"Returns global rotations and unit translation directions between 8 cameras - that lie on a circle and face the center. The poses of 8 cameras are obtained from SFMdata - and the unit translations directions between some camera pairs are computed from their + """"Returns global rotations and unit translation directions between 8 cameras + that lie on a circle and face the center. The poses of 8 cameras are obtained from SFMdata + and the unit translations directions between some camera pairs are computed from their global translations. """ # Using toy dataset in SfMdata for example. - wTc = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) + wTc_list = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) # Rotations of the cameras in the world frame. wRc_values = gtsam.Values() # Normalized translation directions from camera i to camera j # in the coordinate frame of camera i. i_iZj_list = [] - for i in range(0, len(wTc) - 2): + for i in range(0, len(wTc_list) - 2): # Add the rotation. - wRc_values.insert(i, wTc[i].rotation()) + wRi = wTc_list[i].rotation() + wRc_values.insert(i, wRi) # Create unit translation measurements with next two poses. for j in range(i + 1, i + 3): - i_iZj = gtsam.Unit3(wTc[i].rotation().unrotate( - wTc[j].translation() - wTc[i].translation())) + # Compute the translation from pose i to pose j, in the world reference frame. + w_itj = wTc_list[j].translation() - wTc_list[i].translation() + # Obtain the translation in the camera i's reference frame. + i_itj = wRi.unrotate(w_itj) + # Compute the normalized unit translation direction. + i_iZj = gtsam.Unit3(i_itj) i_iZj_list.append(gtsam.BinaryMeasurementUnit3( i, j, i_iZj, gtsam.noiseModel.Isotropic.Sigma(3, 0.01))) # Add the last two rotations. - wRc_values.insert(len(wTc) - 1, wTc[-1].rotation()) - wRc_values.insert(len(wTc) - 2, wTc[-2].rotation()) - return (wRc_values, i_iZj_list) + wRc_values.insert(len(wTc_list) - 1, wTc_list[-1].rotation()) + wRc_values.insert(len(wTc_list) - 2, wTc_list[-2].rotation()) + return wRc_values, i_iZj_list -def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, - wRc_values: gtsam.Values) -> gtsam.Values: - """Estimate poses given rotations and normalized translation directions between cameras. +def prune_to_inliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryMeasurementsUnit3: + """Removes outliers from a list of Unit3 measurements that are the + translation directions from camera i to camera j in the world frame.""" - Args: - iZj_list -- List of normalized translation direction measurements between camera pairs, - Z here refers to measurements. The measurements are of camera j with reference - to camera i (iZj), in camera i's coordinate frame (i_). iZj represents a unit - vector to j in i's frame and is not a transformation. - wRc_values -- Rotations of the cameras in the world frame. + # Indices of measurements that are to be used as projection directions. + # These are randomly chosen. All sampled directions must be unique. + num_directions_to_sample = min( + MAX_1DSFM_PROJECTION_DIRECTIONS, len(w_iZj_list)) + sampled_indices = np.random.choice( + len(w_iZj_list), num_directions_to_sample, replace=False) - Returns: - Values -- Estimated poses. - """ - - # Convert the translation direction measurements to world frame using the rotations. - w_iZj_list = gtsam.BinaryMeasurementsUnit3() - for i_iZj in i_iZj_list: - w_iZj = gtsam.Unit3(wRc_values.atRot3(i_iZj.key1()) - .rotate(i_iZj.measured().point3())) - w_iZj_list.append(gtsam.BinaryMeasurementUnit3( - i_iZj.key1(), i_iZj.key2(), w_iZj, i_iZj.noiseModel())) - - # Indices of measurements that are to be used as projection directions. - # These are randomly chosen. - sampled_indices = np.random.choice(len(w_iZj_list), min( - MAX_1DSFM_PROJECTION_DIRECTIONS, len(w_iZj_list)), replace=False) # Sample projection directions from the measurements. projection_directions = [w_iZj_list[idx].measured() for idx in sampled_indices] @@ -91,8 +80,8 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, algorithm = gtsam.MFAS(w_iZj_list, direction) outlier_weights.append(algorithm.computeOutlierWeights()) - # Compute average of outlier weights. Each outlier weight is a map from a pair of Keys - # (camera IDs) to a weight, where weights are proportional to the probability of the edge + # Compute average of outlier weights. Each outlier weight is a map from a pair of Keys + # (camera IDs) to a weight, where weights are proportional to the probability of the edge # being an outlier. avg_outlier_weights = defaultdict(float) for outlier_weight_dict in outlier_weights: @@ -101,8 +90,37 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, # Remove w_relative_tranlsations that have weight greater than threshold, these are outliers. w_iZj_inliers = gtsam.BinaryMeasurementsUnit3() - [w_iZj_inliers.append(Z) for Z in w_iZj_list - if avg_outlier_weights[(Z.key1(), Z.key2())] < OUTLIER_WEIGHT_THRESHOLD] + [w_iZj_inliers.append(Z) for Z in w_iZj_list if avg_outlier_weights[( + Z.key1(), Z.key2())] < OUTLIER_WEIGHT_THRESHOLD] + + return w_iZj_inliers + + +def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, + wRc_values: gtsam.Values) -> gtsam.Values: + """Estimate poses given rotations and normalized translation directions between cameras. + + Args: + i_iZj_list: List of normalized translation direction measurements between camera pairs, + Z here refers to measurements. The measurements are of camera j with reference + to camera i (iZj), in camera i's coordinate frame (i_). iZj represents a unit + vector to j in i's frame and is not a transformation. + wRc_values: Rotations of the cameras in the world frame. + + Returns: + Values: Estimated poses. + """ + + # Convert the translation direction measurements to world frame using the rotations. + w_iZj_list = gtsam.BinaryMeasurementsUnit3() + for i_iZj in i_iZj_list: + w_iZj = gtsam.Unit3(wRc_values.atRot3(i_iZj.key1()) + .rotate(i_iZj.measured().point3())) + w_iZj_list.append(gtsam.BinaryMeasurementUnit3( + i_iZj.key1(), i_iZj.key2(), w_iZj, i_iZj.noiseModel())) + + # Remove the outliers in the unit translation directions. + w_iZj_inliers = prune_to_inliers(w_iZj_list) # Run the optimizer to obtain translations for normalized directions. wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run() @@ -115,8 +133,8 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, def main(): - wRc_values, w_iZj_list = get_data() - wTc_values = estimate_poses(w_iZj_list, wRc_values) + wRc_values, i_iZj_list = get_data() + wTc_values = estimate_poses(i_iZj_list, wRc_values) print("**** Translation averaging output ****") print(wTc_values) print("**************************************") From 695f75bc8d37cd820108a399c7f992472b4d0b20 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Fri, 2 Oct 2020 07:56:41 -0700 Subject: [PATCH 18/38] readability changes --- python/gtsam/examples/TranslationAveragingExample.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index d843f8702..683008749 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -32,8 +32,8 @@ def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]: that lie on a circle and face the center. The poses of 8 cameras are obtained from SFMdata and the unit translations directions between some camera pairs are computed from their global translations. """ - # Using toy dataset in SfMdata for example. - wTc_list = SFMdata.createPoses(gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)) + fx, fy, s, u0, v0 = 50.0, 50.0, 0.0, 50.0, 50.0 + wTc_list = SFMdata.createPoses(gtsam.Cal3_S2(fx, fy, s, u0, v0)) # Rotations of the cameras in the world frame. wRc_values = gtsam.Values() # Normalized translation directions from camera i to camera j @@ -88,10 +88,10 @@ def prune_to_inliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryM for keypair, weight in outlier_weight_dict.items(): avg_outlier_weights[keypair] += weight / len(outlier_weights) - # Remove w_relative_tranlsations that have weight greater than threshold, these are outliers. + # Remove w_iZj that have weight greater than threshold, these are outliers. w_iZj_inliers = gtsam.BinaryMeasurementsUnit3() - [w_iZj_inliers.append(Z) for Z in w_iZj_list if avg_outlier_weights[( - Z.key1(), Z.key2())] < OUTLIER_WEIGHT_THRESHOLD] + [w_iZj_inliers.append(Z) for w_iZj in w_iZj_list if avg_outlier_weights[( + w_iZj.key1(), w_iZj.key2())] < OUTLIER_WEIGHT_THRESHOLD] return w_iZj_inliers @@ -108,7 +108,7 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, wRc_values: Rotations of the cameras in the world frame. Returns: - Values: Estimated poses. + gtsam.Values: Estimated poses. """ # Convert the translation direction measurements to world frame using the rotations. From f11ce11678b8cac8341d9fb3f4dda4dd7931e084 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Fri, 2 Oct 2020 08:03:28 -0700 Subject: [PATCH 19/38] fixing one variable that was not renamed --- python/gtsam/examples/TranslationAveragingExample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index 683008749..7e8c96b15 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -90,7 +90,7 @@ def prune_to_inliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryM # Remove w_iZj that have weight greater than threshold, these are outliers. w_iZj_inliers = gtsam.BinaryMeasurementsUnit3() - [w_iZj_inliers.append(Z) for w_iZj in w_iZj_list if avg_outlier_weights[( + [w_iZj_inliers.append(w_iZj) for w_iZj in w_iZj_list if avg_outlier_weights[( w_iZj.key1(), w_iZj.key2())] < OUTLIER_WEIGHT_THRESHOLD] return w_iZj_inliers From eb9ca8cd92608200ebf72908decf81a298fc1af8 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Fri, 2 Oct 2020 14:05:13 -0400 Subject: [PATCH 20/38] find python if using Default --- CMakeLists.txt | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index eedc42c9e..8b546ebc2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -118,15 +118,15 @@ if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS) endif() if(GTSAM_BUILD_PYTHON) - # Get info about the Python3 interpreter - # https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3 - find_package(Python3 COMPONENTS Interpreter Development) - - if(NOT ${Python3_FOUND}) - message(FATAL_ERROR "Cannot find Python3 interpreter. Please install Python >= 3.6.") - endif() - if(${GTSAM_PYTHON_VERSION} STREQUAL "Default") + # Get info about the Python3 interpreter + # https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3 + find_package(Python3 COMPONENTS Interpreter Development) + + if(NOT ${Python3_FOUND}) + message(FATAL_ERROR "Cannot find Python3 interpreter. Please install Python >= 3.6.") + endif() + set(GTSAM_PYTHON_VERSION "${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}" CACHE STRING From a8ea6f2bd26f68ed940138be840fa5bbd24bb985 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Fri, 2 Oct 2020 16:12:10 -0400 Subject: [PATCH 21/38] Fixed include error --- gtsam_unstable/linear/tests/testLPSolver.cpp | 12 +++++++----- gtsam_unstable/linear/tests/testQPSolver.cpp | 8 +++++--- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/gtsam_unstable/linear/tests/testLPSolver.cpp b/gtsam_unstable/linear/tests/testLPSolver.cpp index de9cd032a..53c8c7618 100644 --- a/gtsam_unstable/linear/tests/testLPSolver.cpp +++ b/gtsam_unstable/linear/tests/testLPSolver.cpp @@ -16,9 +16,9 @@ * @author Duy-Nguyen Ta */ -#include -#include -#include +#include +#include + #include #include #include @@ -28,8 +28,10 @@ #include #include -#include -#include +#include + +#include +#include using namespace std; using namespace gtsam; diff --git a/gtsam_unstable/linear/tests/testQPSolver.cpp b/gtsam_unstable/linear/tests/testQPSolver.cpp index 285f19b3f..67a0c971e 100644 --- a/gtsam_unstable/linear/tests/testQPSolver.cpp +++ b/gtsam_unstable/linear/tests/testQPSolver.cpp @@ -17,12 +17,14 @@ * @author Ivan Dario Jimenez */ -#include -#include -#include #include #include +#include +#include + +#include + using namespace std; using namespace gtsam; using namespace gtsam::symbol_shorthand; From 03ca9053421dcb51c63065a03a5cc99081e8a529 Mon Sep 17 00:00:00 2001 From: akrishnan86 Date: Fri, 2 Oct 2020 23:44:55 -0700 Subject: [PATCH 22/38] removing shared ptr, iostream, renaming --- gtsam/sfm/MFAS.cpp | 2 -- gtsam/sfm/tests/testMFAS.cpp | 2 -- python/gtsam/examples/TranslationAveragingExample.py | 4 ++-- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/gtsam/sfm/MFAS.cpp b/gtsam/sfm/MFAS.cpp index bc66d0711..4cd983ecd 100644 --- a/gtsam/sfm/MFAS.cpp +++ b/gtsam/sfm/MFAS.cpp @@ -7,8 +7,6 @@ #include -#include - #include #include #include diff --git a/gtsam/sfm/tests/testMFAS.cpp b/gtsam/sfm/tests/testMFAS.cpp index b2daf0d2e..362027d5d 100644 --- a/gtsam/sfm/tests/testMFAS.cpp +++ b/gtsam/sfm/tests/testMFAS.cpp @@ -9,8 +9,6 @@ #include -#include - using namespace std; using namespace gtsam; diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index 7e8c96b15..054b61126 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -59,7 +59,7 @@ def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]: return wRc_values, i_iZj_list -def prune_to_inliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryMeasurementsUnit3: +def filter_outliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryMeasurementsUnit3: """Removes outliers from a list of Unit3 measurements that are the translation directions from camera i to camera j in the world frame.""" @@ -120,7 +120,7 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, i_iZj.key1(), i_iZj.key2(), w_iZj, i_iZj.noiseModel())) # Remove the outliers in the unit translation directions. - w_iZj_inliers = prune_to_inliers(w_iZj_list) + w_iZj_inliers = filter_outliers(w_iZj_list) # Run the optimizer to obtain translations for normalized directions. wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run() From 06ac62724927abc947aec6e87017f8b2bd5df39a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 4 Oct 2020 19:55:08 -0400 Subject: [PATCH 23/38] added normalize function to orthogonalize the rotation after composition --- gtsam/geometry/Rot3.h | 7 +++++++ gtsam/geometry/Rot3M.cpp | 28 +++++++++++++++++++++++++++- gtsam/geometry/Rot3Q.cpp | 6 +++++- gtsam/geometry/tests/testRot3.cpp | 20 ++++++++++++++++++++ 4 files changed, 59 insertions(+), 2 deletions(-) diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index de9d2b420..db5367c8f 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -430,6 +430,13 @@ namespace gtsam { */ Matrix3 transpose() const; + /** + * Normalize rotation so that its determinant is 1. + * This means either re-orthogonalizing the Matrix representation or + * normalizing the quaternion representation. + */ + Rot3 normalize(const Rot3& R) const; + /// @deprecated, this is base 1, and was just confusing Point3 column(int index) const; diff --git a/gtsam/geometry/Rot3M.cpp b/gtsam/geometry/Rot3M.cpp index 500941a16..ffc468dfc 100644 --- a/gtsam/geometry/Rot3M.cpp +++ b/gtsam/geometry/Rot3M.cpp @@ -108,9 +108,35 @@ Rot3 Rot3::RzRyRx(double x, double y, double z, OptionalJacobian<3, 1> Hx, ); } +/* ************************************************************************* */ +Rot3 Rot3::normalize(const Rot3& R) const { + /// Implementation from here: https://stackoverflow.com/a/23082112/1236990 + /// Theory: https://drive.google.com/file/d/0B9rLLz1XQKmaZTlQdV81QjNoZTA/view + + /// Essentially, this computes the orthogonalization error, distributes the + /// error to the x and y rows, and then performs a Taylor expansion to + /// orthogonalize. + + Matrix3 rot = R.matrix(), rot_new; + + if (std::fabs(rot.determinant()-1) < 1e-12) return R; + + Vector3 x = rot.block<1, 3>(0, 0), y = rot.block<1, 3>(1, 0); + double error = x.dot(y); + + Vector3 x_ort = x - (error / 2) * y, y_ort = y - (error / 2) * x; + Vector3 z_ort = x_ort.cross(y_ort); + + rot_new.block<1, 3>(0, 0) = 0.5 * (3 - x_ort.dot(x_ort)) * x_ort; + rot_new.block<1, 3>(1, 0) = 0.5 * (3 - y_ort.dot(y_ort)) * y_ort; + rot_new.block<1, 3>(2, 0) = 0.5 * (3 - z_ort.dot(z_ort)) * z_ort; + + return Rot3(rot_new); +} + /* ************************************************************************* */ Rot3 Rot3::operator*(const Rot3& R2) const { - return Rot3(rot_*R2.rot_); + return normalize(Rot3(rot_*R2.rot_)); } /* ************************************************************************* */ diff --git a/gtsam/geometry/Rot3Q.cpp b/gtsam/geometry/Rot3Q.cpp index 6e1871c64..d4400b0dc 100644 --- a/gtsam/geometry/Rot3Q.cpp +++ b/gtsam/geometry/Rot3Q.cpp @@ -86,9 +86,13 @@ namespace gtsam { gtsam::Quaternion(Eigen::AngleAxisd(x, Eigen::Vector3d::UnitX()))); } + /* ************************************************************************* */ + Rot3 Rot3::normalize(const Rot3& R) const { + return Rot3(R.quaternion_.normalized()); + } /* ************************************************************************* */ Rot3 Rot3::operator*(const Rot3& R2) const { - return Rot3(quaternion_ * R2.quaternion_); + return normalize(Rot3(quaternion_ * R2.quaternion_)); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testRot3.cpp b/gtsam/geometry/tests/testRot3.cpp index a7c6f5a77..e86029026 100644 --- a/gtsam/geometry/tests/testRot3.cpp +++ b/gtsam/geometry/tests/testRot3.cpp @@ -910,6 +910,26 @@ TEST(Rot3, yaw_derivative) { CHECK(assert_equal(num, calc)); } +/* ************************************************************************* */ +TEST(Rot3, determinant) { + size_t degree = 1; + Rot3 R_w0; // Zero rotation + Rot3 R_w1 = Rot3::Ry(degree * M_PI / 180); + + Rot3 R_01, R_w2; + double actual, expected = 1.0; + + for (size_t i = 2; i < 360; ++i) { + R_01 = R_w0.between(R_w1); + R_w2 = R_w1 * R_01; + R_w0 = R_w1; + R_w1 = R_w2; + actual = R_w2.matrix().determinant(); + + EXPECT_DOUBLES_EQUAL(expected, actual, 1e-7); + } +} + /* ************************************************************************* */ int main() { TestResult tr; From 2e1cc3ca3517eca5431a07405453f776b28c9aeb Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 5 Oct 2020 13:25:30 -0400 Subject: [PATCH 24/38] normalized needs to be called explicitly --- gtsam/geometry/Rot3.h | 2 +- gtsam/geometry/Rot3M.cpp | 18 ++++++++++-------- gtsam/geometry/Rot3Q.cpp | 6 +++--- gtsam/geometry/tests/testRot3.cpp | 2 +- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index db5367c8f..b1e46308d 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -435,7 +435,7 @@ namespace gtsam { * This means either re-orthogonalizing the Matrix representation or * normalizing the quaternion representation. */ - Rot3 normalize(const Rot3& R) const; + Rot3 normalized() const; /// @deprecated, this is base 1, and was just confusing Point3 column(int index) const; diff --git a/gtsam/geometry/Rot3M.cpp b/gtsam/geometry/Rot3M.cpp index ffc468dfc..c372d403b 100644 --- a/gtsam/geometry/Rot3M.cpp +++ b/gtsam/geometry/Rot3M.cpp @@ -109,7 +109,7 @@ Rot3 Rot3::RzRyRx(double x, double y, double z, OptionalJacobian<3, 1> Hx, } /* ************************************************************************* */ -Rot3 Rot3::normalize(const Rot3& R) const { +Rot3 Rot3::normalized() const { /// Implementation from here: https://stackoverflow.com/a/23082112/1236990 /// Theory: https://drive.google.com/file/d/0B9rLLz1XQKmaZTlQdV81QjNoZTA/view @@ -117,9 +117,11 @@ Rot3 Rot3::normalize(const Rot3& R) const { /// error to the x and y rows, and then performs a Taylor expansion to /// orthogonalize. - Matrix3 rot = R.matrix(), rot_new; + Matrix3 rot = rot_.matrix(), rot_orth; - if (std::fabs(rot.determinant()-1) < 1e-12) return R; + // Check if determinant is already 1. + // If yes, then return the current Rot3. + if (std::fabs(rot.determinant()-1) < 1e-12) return Rot3(rot_); Vector3 x = rot.block<1, 3>(0, 0), y = rot.block<1, 3>(1, 0); double error = x.dot(y); @@ -127,16 +129,16 @@ Rot3 Rot3::normalize(const Rot3& R) const { Vector3 x_ort = x - (error / 2) * y, y_ort = y - (error / 2) * x; Vector3 z_ort = x_ort.cross(y_ort); - rot_new.block<1, 3>(0, 0) = 0.5 * (3 - x_ort.dot(x_ort)) * x_ort; - rot_new.block<1, 3>(1, 0) = 0.5 * (3 - y_ort.dot(y_ort)) * y_ort; - rot_new.block<1, 3>(2, 0) = 0.5 * (3 - z_ort.dot(z_ort)) * z_ort; + rot_orth.block<1, 3>(0, 0) = 0.5 * (3 - x_ort.dot(x_ort)) * x_ort; + rot_orth.block<1, 3>(1, 0) = 0.5 * (3 - y_ort.dot(y_ort)) * y_ort; + rot_orth.block<1, 3>(2, 0) = 0.5 * (3 - z_ort.dot(z_ort)) * z_ort; - return Rot3(rot_new); + return Rot3(rot_orth); } /* ************************************************************************* */ Rot3 Rot3::operator*(const Rot3& R2) const { - return normalize(Rot3(rot_*R2.rot_)); + return Rot3(rot_*R2.rot_); } /* ************************************************************************* */ diff --git a/gtsam/geometry/Rot3Q.cpp b/gtsam/geometry/Rot3Q.cpp index d4400b0dc..523255d87 100644 --- a/gtsam/geometry/Rot3Q.cpp +++ b/gtsam/geometry/Rot3Q.cpp @@ -87,12 +87,12 @@ namespace gtsam { } /* ************************************************************************* */ - Rot3 Rot3::normalize(const Rot3& R) const { - return Rot3(R.quaternion_.normalized()); + Rot3 Rot3::normalized() const { + return Rot3(quaternion_.normalized()); } /* ************************************************************************* */ Rot3 Rot3::operator*(const Rot3& R2) const { - return normalize(Rot3(quaternion_ * R2.quaternion_)); + return Rot3(quaternion_ * R2.quaternion_); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testRot3.cpp b/gtsam/geometry/tests/testRot3.cpp index e86029026..7b792f8bd 100644 --- a/gtsam/geometry/tests/testRot3.cpp +++ b/gtsam/geometry/tests/testRot3.cpp @@ -923,7 +923,7 @@ TEST(Rot3, determinant) { R_01 = R_w0.between(R_w1); R_w2 = R_w1 * R_01; R_w0 = R_w1; - R_w1 = R_w2; + R_w1 = R_w2.normalized(); actual = R_w2.matrix().determinant(); EXPECT_DOUBLES_EQUAL(expected, actual, 1e-7); From 5fb7229fa67fd14b671611dc5b8daf21f5179342 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 5 Oct 2020 22:28:27 -0400 Subject: [PATCH 25/38] Moved normalize next to ClosestTo and add more docs --- gtsam/geometry/Rot3.h | 29 +++++++++++++++++++++-------- gtsam/geometry/Rot3M.cpp | 1 - 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index b1e46308d..2334312f6 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -262,9 +262,29 @@ namespace gtsam { static Rot3 AlignTwoPairs(const Unit3& a_p, const Unit3& b_p, // const Unit3& a_q, const Unit3& b_q); - /// Static, named constructor that finds Rot3 element closest to M in Frobenius norm. + /** + * Static, named constructor that finds Rot3 element closest to M in Frobenius norm. + * + * Uses Full SVD to compute the orthogonal matrix, thus is highly accurate and robust. + * + * N. J. Higham. Matrix nearness problems and applications. + * In M. J. C. Gover and S. Barnett, editors, Applications of Matrix Theory, pages 1–27. + * Oxford University Press, 1989. + */ static Rot3 ClosestTo(const Matrix3& M) { return Rot3(SO3::ClosestTo(M)); } + /** + * Normalize rotation so that its determinant is 1. + * This means either re-orthogonalizing the Matrix representation or + * normalizing the quaternion representation. + * + * This method is akin to `ClosestTo` but uses a computationally cheaper + * algorithm. + * + * Ref: https://drive.google.com/file/d/0B9rLLz1XQKmaZTlQdV81QjNoZTA/view + */ + Rot3 normalized() const; + /// @} /// @name Testable /// @{ @@ -430,13 +450,6 @@ namespace gtsam { */ Matrix3 transpose() const; - /** - * Normalize rotation so that its determinant is 1. - * This means either re-orthogonalizing the Matrix representation or - * normalizing the quaternion representation. - */ - Rot3 normalized() const; - /// @deprecated, this is base 1, and was just confusing Point3 column(int index) const; diff --git a/gtsam/geometry/Rot3M.cpp b/gtsam/geometry/Rot3M.cpp index c372d403b..02e5b771f 100644 --- a/gtsam/geometry/Rot3M.cpp +++ b/gtsam/geometry/Rot3M.cpp @@ -111,7 +111,6 @@ Rot3 Rot3::RzRyRx(double x, double y, double z, OptionalJacobian<3, 1> Hx, /* ************************************************************************* */ Rot3 Rot3::normalized() const { /// Implementation from here: https://stackoverflow.com/a/23082112/1236990 - /// Theory: https://drive.google.com/file/d/0B9rLLz1XQKmaZTlQdV81QjNoZTA/view /// Essentially, this computes the orthogonalization error, distributes the /// error to the x and y rows, and then performs a Taylor expansion to From e9e87526c41699a544fb07ae0beedca87658643e Mon Sep 17 00:00:00 2001 From: Jose Luis Blanco Claraco Date: Tue, 6 Oct 2020 18:10:06 +0200 Subject: [PATCH 26/38] refactor cmake scripts into smaller files --- CMakeLists.txt | 551 +------------------------ cmake/GtsamBuildTypes.cmake | 14 + cmake/handle_allocators.cmake | 34 ++ cmake/handle_boost.cmake | 56 +++ cmake/handle_ccache.cmake | 14 + cmake/handle_cpack.cmake | 28 ++ cmake/handle_eigen.cmake | 75 ++++ cmake/handle_final_checks.cmake | 10 + cmake/handle_general_options.cmake | 46 +++ cmake/handle_global_build_flags.cmake | 52 +++ cmake/handle_mkl.cmake | 17 + cmake/handle_openmp.cmake | 11 + cmake/handle_perftools.cmake | 4 + cmake/handle_print_configuration.cmake | 104 +++++ cmake/handle_python.cmake | 26 ++ cmake/handle_tbb.cmake | 24 ++ cmake/handle_uninstall.cmake | 10 + matlab/CMakeLists.txt | 7 +- 18 files changed, 545 insertions(+), 538 deletions(-) create mode 100644 cmake/handle_allocators.cmake create mode 100644 cmake/handle_boost.cmake create mode 100644 cmake/handle_ccache.cmake create mode 100644 cmake/handle_cpack.cmake create mode 100644 cmake/handle_eigen.cmake create mode 100644 cmake/handle_final_checks.cmake create mode 100644 cmake/handle_general_options.cmake create mode 100644 cmake/handle_global_build_flags.cmake create mode 100644 cmake/handle_mkl.cmake create mode 100644 cmake/handle_openmp.cmake create mode 100644 cmake/handle_perftools.cmake create mode 100644 cmake/handle_print_configuration.cmake create mode 100644 cmake/handle_python.cmake create mode 100644 cmake/handle_tbb.cmake create mode 100644 cmake/handle_uninstall.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 8b546ebc2..831ee00f3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,17 +22,10 @@ set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH}) ############################################################################### # Gather information, perform checks, set defaults -# Set the default install path to home -#set (CMAKE_INSTALL_PREFIX ${HOME} CACHE PATH "Install prefix for library") - set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(GtsamMakeConfigFile) include(GNUInstallDirs) -# Record the root dir for gtsam - needed during external builds, e.g., ROS -set(GTSAM_SOURCE_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -message(STATUS "GTSAM_SOURCE_ROOT_DIR: [${GTSAM_SOURCE_ROOT_DIR}]") - # Load build type flags and default to Debug mode include(GtsamBuildTypes) @@ -45,399 +38,21 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR}) message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ") endif() -# See whether gtsam_unstable is available (it will be present only if we're using a git checkout) -if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable") - set(GTSAM_UNSTABLE_AVAILABLE 1) -else() - set(GTSAM_UNSTABLE_AVAILABLE 0) -endif() +include(cmake/handle_boost.cmake) # Boost +include(cmake/handle_ccache.cmake) # ccache +include(cmake/handle_cpack.cmake) # CPack +include(cmake/handle_eigen.cmake) # Eigen3 +include(cmake/handle_general_options.cmake) # CMake build options +include(cmake/handle_mkl.cmake) # MKL +include(cmake/handle_openmp.cmake) # OpenMP +include(cmake/handle_perftools.cmake) # Google perftools +include(cmake/handle_python.cmake) # Python options and commands +include(cmake/handle_tbb.cmake) # TBB +include(cmake/handle_uninstall.cmake) # for "make uninstall" -# ---------------------------------------------------------------------------- -# Uninstall target, for "make uninstall" -# ---------------------------------------------------------------------------- -configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in" - "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" - IMMEDIATE @ONLY) +include(cmake/handle_allocators.cmake) # Must be after tbb, pertools -add_custom_target(uninstall - "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake") - - -############################################################################### -# Set up options - -# Configurable Options -if(GTSAM_UNSTABLE_AVAILABLE) - option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON) - option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON) - option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF) -endif() -option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON) -option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) -option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) -option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) -option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) -option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) -option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) -option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) -option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) -option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF) -option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) -option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) -option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) -if(NOT MSVC AND NOT XCODE_VERSION) - option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON) -endif() - -if(NOT MSVC AND NOT XCODE_VERSION) - # Set the build type to upper case for downstream use - string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER) - - # Set the GTSAM_BUILD_TAG variable. - # If build type is Release, set to blank (""), else set to the build type. - if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE") - set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory - else() - set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}") - endif() -endif() - -# Options relating to MATLAB wrapper -# TODO: Check for matlab mex binary before handling building of binaries -option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) -set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.") - -# Check / set dependent variables for MATLAB wrapper -if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_BUILD_TYPE_POSTFIXES) - set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) -endif() - -if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") -endif() - -if(GTSAM_BUILD_PYTHON) - if(${GTSAM_PYTHON_VERSION} STREQUAL "Default") - # Get info about the Python3 interpreter - # https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3 - find_package(Python3 COMPONENTS Interpreter Development) - - if(NOT ${Python3_FOUND}) - message(FATAL_ERROR "Cannot find Python3 interpreter. Please install Python >= 3.6.") - endif() - - set(GTSAM_PYTHON_VERSION "${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}" - CACHE - STRING - "The version of Python to build the wrappers against." - FORCE) - endif() - - if(GTSAM_UNSTABLE_BUILD_PYTHON) - if (NOT GTSAM_BUILD_UNSTABLE) - message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.") - set(GTSAM_UNSTABLE_BUILD_PYTHON OFF) - endif() - endif() - - set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python") -endif() - -# Flags for choosing default packaging tools -set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator") -set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator") - -if (CMAKE_GENERATOR STREQUAL "Ninja" AND - ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR - (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5))) - # Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support. - # Rationale in https://github.com/ninja-build/ninja/issues/814 - add_compile_options(-fdiagnostics-color=always) -endif() - -############################################################################### -# Find boost - -# To change the path for boost, you will need to set: -# BOOST_ROOT: path to install prefix for boost -# Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT - -if(MSVC) - # By default, boost only builds static libraries on windows - set(Boost_USE_STATIC_LIBS ON) # only find static libs - # If we ever reset above on windows and, ... - # If we use Boost shared libs, disable auto linking. - # Some libraries, at least Boost Program Options, rely on this to export DLL symbols. - if(NOT Boost_USE_STATIC_LIBS) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) - endif() - # Virtual memory range for PCH exceeded on VS2015 - if(MSVC_VERSION LESS 1910) # older than VS2017 - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) - endif() -endif() - -# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT() -# or explicit instantiation will generate build errors. -# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017 -# -if(MSVC AND BUILD_SHARED_LIBS) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) -endif() - -# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such. -set(BOOST_FIND_MINIMUM_VERSION 1.58) -set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex) - -find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS}) - -# Required components -if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR - NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY) - message(FATAL_ERROR "Missing required Boost components >= v1.58, please install/upgrade Boost or configure your search paths.") -endif() - -option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF) -# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library) -set(GTSAM_BOOST_LIBRARIES - Boost::serialization - Boost::system - Boost::filesystem - Boost::thread - Boost::date_time - Boost::regex -) -if (GTSAM_DISABLE_NEW_TIMERS) - message("WARNING: GTSAM timing instrumentation manually disabled") - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS) -else() - if(Boost_TIMER_LIBRARY) - list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono) - else() - list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt - message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.") - endif() -endif() - -############################################################################### -# Find TBB -find_package(TBB 4.4 COMPONENTS tbb tbbmalloc) - -# Set up variables if we're using TBB -if(TBB_FOUND AND GTSAM_WITH_TBB) - set(GTSAM_USE_TBB 1) # This will go into config.h - if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020)) - set(TBB_GREATER_EQUAL_2020 1) - else() - set(TBB_GREATER_EQUAL_2020 0) - endif() - # all definitions and link requisites will go via imported targets: - # tbb & tbbmalloc - list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc) -else() - set(GTSAM_USE_TBB 0) # This will go into config.h -endif() - -############################################################################### -# Prohibit Timing build mode in combination with TBB -if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing")) - message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.") -endif() - - -############################################################################### -# Find Google perftools -find_package(GooglePerfTools) - -############################################################################### -# Support ccache, if installed -if(NOT MSVC AND NOT XCODE_VERSION) - find_program(CCACHE_FOUND ccache) - if(CCACHE_FOUND) - if(GTSAM_BUILD_WITH_CCACHE) - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) - else() - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") - endif() - endif(CCACHE_FOUND) -endif() - -############################################################################### -# Find MKL -find_package(MKL) - -if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL) - set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h - set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL - list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES}) - - # --no-as-needed is required with gcc according to the MKL link advisor - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed") - endif() -else() - set(GTSAM_USE_EIGEN_MKL 0) - set(EIGEN_USE_MKL_ALL 0) -endif() - -############################################################################### -# Find OpenMP (if we're also using MKL) -find_package(OpenMP) # do this here to generate correct message if disabled - -if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL) - if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP) - set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h - list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS}) - endif() -endif() - - -############################################################################### -# Option for using system Eigen or GTSAM-bundled Eigen -### These patches only affect usage of MKL. If you want to enable MKL, you *must* -### use our patched version of Eigen -### See: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=704 (Householder QR MKL selection) -### http://eigen.tuxfamily.org/bz/show_bug.cgi?id=705 (Fix MKL LLT return code) -option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF) -option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) - -# Switch for using system Eigen or GTSAM-bundled Eigen -if(GTSAM_USE_SYSTEM_EIGEN) - find_package(Eigen3 REQUIRED) - - # Use generic Eigen include paths e.g. - set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") - - # check if MKL is also enabled - can have one or the other, but not both! - # Note: Eigen >= v3.2.5 includes our patches - if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) - message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") - endif() - - # Check for Eigen version which doesn't work with MKL - # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. - if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) - message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") - endif() - - # The actual include directory (for BUILD cmake target interface): - set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") -else() - # Use bundled Eigen include path. - # Clear any variables set by FindEigen3 - if(EIGEN3_INCLUDE_DIR) - set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) - endif() - - # set full path to be used by external projects - # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in - set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") - - # The actual include directory (for BUILD cmake target interface): - set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") -endif() - -# Detect Eigen version: -set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h") -if (EXISTS ${EIGEN_VER_H}) - file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) - - # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc... - - string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") - - string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") - - string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") - - set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") - - message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") -else() - message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") -endif () - -if (MSVC) - if (BUILD_SHARED_LIBS) - # mute eigen static assert to avoid errors in shared lib - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) - endif() - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen -endif() - -if (APPLE AND BUILD_SHARED_LIBS) - # Set the default install directory on macOS - set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib") -endif() - -############################################################################### -# Global compile options - -# Build list of possible allocators -set(possible_allocators "") -if(GTSAM_USE_TBB) - list(APPEND possible_allocators TBB) - set(preferred_allocator TBB) -else() - list(APPEND possible_allocators BoostPool STL) - set(preferred_allocator STL) -endif() -if(GOOGLE_PERFTOOLS_FOUND) - list(APPEND possible_allocators tcmalloc) -endif() - -# Check if current allocator choice is valid and set cache option -list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid) -if(allocator_valid EQUAL -1) - set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) -else() - set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") -endif() -set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators}) -mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR) - -# Define compile flags depending on allocator -if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool") - set(GTSAM_ALLOCATOR_BOOSTPOOL 1) -elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL") - set(GTSAM_ALLOCATOR_STL 1) -elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB") - set(GTSAM_ALLOCATOR_TBB 1) -elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc") - set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator - list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") -endif() - -if(MSVC) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code -endif() - -# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc. -if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8) - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) - endif() -endif() - -# As of XCode 7, clang also complains about this -if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) - endif() -endif() - -if(GTSAM_ENABLE_CONSISTENCY_CHECKS) - # This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS) -endif() +include(cmake/handle_global_build_flags.cmake) # Build flags ############################################################################### # Add components @@ -477,7 +92,6 @@ endif() GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in") export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake) - # Check for doxygen availability - optional dependency find_package(Doxygen) @@ -489,146 +103,11 @@ endif() # CMake Tools add_subdirectory(cmake) - -############################################################################### -# Set up CPack -set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM") -set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology") -set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu") -set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md") -set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") -set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR}) -set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR}) -set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH}) -set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}") -#set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory -#set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error -set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$") -set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/") -set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/") -set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") -#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs - -# Deb-package specific cpack -set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev") -set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.58)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)") - - -############################################################################### # Print configuration variables -message(STATUS "===============================================================") -message(STATUS "================ Configuration Options ======================") -print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}") -print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}") -print_config("CMake version" "${CMAKE_VERSION}") -print_config("CMake generator" "${CMAKE_GENERATOR}") -print_config("CMake build tool" "${CMAKE_BUILD_TOOL}") -message(STATUS "Build flags ") -print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests") -print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'") -print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") -if (DOXYGEN_FOUND) - print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs") -endif() -print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries") -print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name") -if(GTSAM_UNSTABLE_AVAILABLE) - print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") - print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON} "Build GTSAM unstable Python ") - print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable") -endif() - -if(NOT MSVC AND NOT XCODE_VERSION) - print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") - print_config("Build type" "${CMAKE_BUILD_TYPE}") - print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") - print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") -endif() - -print_build_options_for_target(gtsam) - -print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") - -if(GTSAM_USE_TBB) - print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})") -elseif(TBB_FOUND) - print_config("Use Intel TBB" "TBB (Version: ${TBB_VERSION}) found but GTSAM_WITH_TBB is disabled") -else() - print_config("Use Intel TBB" "TBB not found") -endif() -if(GTSAM_USE_EIGEN_MKL) - print_config("Eigen will use MKL" "Yes") -elseif(MKL_FOUND) - print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled") -else() - print_config("Eigen will use MKL" "MKL not found") -endif() -if(GTSAM_USE_EIGEN_MKL_OPENMP) - print_config("Eigen will use MKL and OpenMP" "Yes") -elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) - print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") -elseif(OPENMP_FOUND AND NOT MKL_FOUND) - print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found") -elseif(OPENMP_FOUND) - print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") -else() - print_config("Eigen will use MKL and OpenMP" "OpenMP not found") -endif() -print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}") - -if(GTSAM_THROW_CHEIRALITY_EXCEPTION) - print_config("Cheirality exceptions enabled" "YES") -else() - print_config("Cheirality exceptions enabled" "NO") -endif() - -if(NOT MSVC AND NOT XCODE_VERSION) - if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) - print_config("Build with ccache" "Yes") - elseif(CCACHE_FOUND) - print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") - else() - print_config("Build with ccache" "No") - endif() -endif() - -message(STATUS "Packaging flags") -print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}") -print_config("CPack Generator" "${CPACK_GENERATOR}") - -message(STATUS "GTSAM flags ") -print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") -print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") -print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") -print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") -print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1") -print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") -print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") - -message(STATUS "MATLAB toolbox flags") -print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") -if (${GTSAM_INSTALL_MATLAB_TOOLBOX}) - print_config("MATLAB root" "${MATLAB_ROOT}") - print_config("MEX binary" "${MEX_COMMAND}") -endif() -message(STATUS "Python toolbox flags ") -print_enabled_config(${GTSAM_BUILD_PYTHON} "Build Python module with pybind ") -if(GTSAM_BUILD_PYTHON) - print_config("Python version" ${GTSAM_PYTHON_VERSION}) -endif() - -message(STATUS "===============================================================") +include(cmake/handle_print_configuration.cmake) # Print warnings at the end -if(GTSAM_WITH_TBB AND NOT TBB_FOUND) - message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") -endif() -if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND) - message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.") -endif() -if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND) - message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") -endif() +include(cmake/handle_final_checks.cmake) # Include CPack *after* all flags include(CPack) diff --git a/cmake/GtsamBuildTypes.cmake b/cmake/GtsamBuildTypes.cmake index 53dacd3f5..840d37427 100644 --- a/cmake/GtsamBuildTypes.cmake +++ b/cmake/GtsamBuildTypes.cmake @@ -263,3 +263,17 @@ function(gtsam_apply_build_flags target_name_) target_compile_options(${target_name_} PRIVATE ${GTSAM_COMPILE_OPTIONS_PRIVATE}) endfunction(gtsam_apply_build_flags) + + +if(NOT MSVC AND NOT XCODE_VERSION) + # Set the build type to upper case for downstream use + string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER) + + # Set the GTSAM_BUILD_TAG variable. + # If build type is Release, set to blank (""), else set to the build type. + if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE") + set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory + else() + set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}") + endif() +endif() diff --git a/cmake/handle_allocators.cmake b/cmake/handle_allocators.cmake new file mode 100644 index 000000000..63411b17b --- /dev/null +++ b/cmake/handle_allocators.cmake @@ -0,0 +1,34 @@ +# Build list of possible allocators +set(possible_allocators "") +if(GTSAM_USE_TBB) + list(APPEND possible_allocators TBB) + set(preferred_allocator TBB) +else() + list(APPEND possible_allocators BoostPool STL) + set(preferred_allocator STL) +endif() +if(GOOGLE_PERFTOOLS_FOUND) + list(APPEND possible_allocators tcmalloc) +endif() + +# Check if current allocator choice is valid and set cache option +list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid) +if(allocator_valid EQUAL -1) + set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) +else() + set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") +endif() +set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators}) +mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR) + +# Define compile flags depending on allocator +if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool") + set(GTSAM_ALLOCATOR_BOOSTPOOL 1) +elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL") + set(GTSAM_ALLOCATOR_STL 1) +elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB") + set(GTSAM_ALLOCATOR_TBB 1) +elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc") + set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator + list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") +endif() diff --git a/cmake/handle_boost.cmake b/cmake/handle_boost.cmake new file mode 100644 index 000000000..e73c2237d --- /dev/null +++ b/cmake/handle_boost.cmake @@ -0,0 +1,56 @@ +############################################################################### +# Find boost + +# To change the path for boost, you will need to set: +# BOOST_ROOT: path to install prefix for boost +# Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT + +if(MSVC) + # By default, boost only builds static libraries on windows + set(Boost_USE_STATIC_LIBS ON) # only find static libs + # If we ever reset above on windows and, ... + # If we use Boost shared libs, disable auto linking. + # Some libraries, at least Boost Program Options, rely on this to export DLL symbols. + if(NOT Boost_USE_STATIC_LIBS) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) + endif() + # Virtual memory range for PCH exceeded on VS2015 + if(MSVC_VERSION LESS 1910) # older than VS2017 + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) + endif() +endif() + + +# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such. +set(BOOST_FIND_MINIMUM_VERSION 1.58) +set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex) + +find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS}) + +# Required components +if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR + NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY) + message(FATAL_ERROR "Missing required Boost components >= v1.58, please install/upgrade Boost or configure your search paths.") +endif() + +option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF) +# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library) +set(GTSAM_BOOST_LIBRARIES + Boost::serialization + Boost::system + Boost::filesystem + Boost::thread + Boost::date_time + Boost::regex +) +if (GTSAM_DISABLE_NEW_TIMERS) + message("WARNING: GTSAM timing instrumentation manually disabled") + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS) +else() + if(Boost_TIMER_LIBRARY) + list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono) + else() + list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt + message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.") + endif() +endif() diff --git a/cmake/handle_ccache.cmake b/cmake/handle_ccache.cmake new file mode 100644 index 000000000..9eabb1905 --- /dev/null +++ b/cmake/handle_ccache.cmake @@ -0,0 +1,14 @@ +############################################################################### +# Support ccache, if installed +if(NOT MSVC AND NOT XCODE_VERSION) + find_program(CCACHE_FOUND ccache) + if(CCACHE_FOUND) + if(GTSAM_BUILD_WITH_CCACHE) + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) + else() + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") + endif() + endif(CCACHE_FOUND) +endif() diff --git a/cmake/handle_cpack.cmake b/cmake/handle_cpack.cmake new file mode 100644 index 000000000..1c32433a4 --- /dev/null +++ b/cmake/handle_cpack.cmake @@ -0,0 +1,28 @@ +#JLBC: is all this actually used by someone? could it be removed? + +# Flags for choosing default packaging tools +set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator") +set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator") + +############################################################################### +# Set up CPack +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM") +set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology") +set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu") +set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md") +set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") +set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR}) +set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR}) +set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH}) +set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}") +#set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory +#set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error +set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$") +set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/") +set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/") +set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") +#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs + +# Deb-package specific cpack +set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev") +set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.58)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)") diff --git a/cmake/handle_eigen.cmake b/cmake/handle_eigen.cmake new file mode 100644 index 000000000..690da6971 --- /dev/null +++ b/cmake/handle_eigen.cmake @@ -0,0 +1,75 @@ +############################################################################### +# Option for using system Eigen or GTSAM-bundled Eigen +### These patches only affect usage of MKL. If you want to enable MKL, you *must* +### use our patched version of Eigen +### See: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=704 (Householder QR MKL selection) +### http://eigen.tuxfamily.org/bz/show_bug.cgi?id=705 (Fix MKL LLT return code) +option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF) +option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) + +# Switch for using system Eigen or GTSAM-bundled Eigen +if(GTSAM_USE_SYSTEM_EIGEN) + find_package(Eigen3 REQUIRED) + + # Use generic Eigen include paths e.g. + set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") + + # check if MKL is also enabled - can have one or the other, but not both! + # Note: Eigen >= v3.2.5 includes our patches + if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) + message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") + endif() + + # Check for Eigen version which doesn't work with MKL + # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. + if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) + message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") + endif() + + # The actual include directory (for BUILD cmake target interface): + set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") +else() + # Use bundled Eigen include path. + # Clear any variables set by FindEigen3 + if(EIGEN3_INCLUDE_DIR) + set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) + endif() + + # set full path to be used by external projects + # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in + set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") + + # The actual include directory (for BUILD cmake target interface): + set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") +endif() + +# Detect Eigen version: +set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h") +if (EXISTS ${EIGEN_VER_H}) + file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) + + # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc... + + string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") + + string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") + + string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") + + set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") + + message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") +else() + message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") +endif () + +if (MSVC) + if (BUILD_SHARED_LIBS) + # mute eigen static assert to avoid errors in shared lib + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) + endif() + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen +endif() diff --git a/cmake/handle_final_checks.cmake b/cmake/handle_final_checks.cmake new file mode 100644 index 000000000..f91fc7fdb --- /dev/null +++ b/cmake/handle_final_checks.cmake @@ -0,0 +1,10 @@ +# Print warnings at the end +if(GTSAM_WITH_TBB AND NOT TBB_FOUND) + message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") +endif() +if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND) + message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.") +endif() +if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND) + message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") +endif() diff --git a/cmake/handle_general_options.cmake b/cmake/handle_general_options.cmake new file mode 100644 index 000000000..27d73bd86 --- /dev/null +++ b/cmake/handle_general_options.cmake @@ -0,0 +1,46 @@ +############################################################################### +# Set up options + +# See whether gtsam_unstable is available (it will be present only if we're using a git checkout) +if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable") + set(GTSAM_UNSTABLE_AVAILABLE 1) +else() + set(GTSAM_UNSTABLE_AVAILABLE 0) +endif() + +# Configurable Options +if(GTSAM_UNSTABLE_AVAILABLE) + option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON) + option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON) + option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF) +endif() +option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON) +option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) +option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) +option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) +option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) +option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) +option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) +option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) +option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) +option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF) +option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) +option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) +option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) +if(NOT MSVC AND NOT XCODE_VERSION) + option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON) +endif() + +# Options relating to MATLAB wrapper +# TODO: Check for matlab mex binary before handling building of binaries +option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) +set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.") + +# Check / set dependent variables for MATLAB wrapper +if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_BUILD_TYPE_POSTFIXES) + set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) +endif() + +if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS) + message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") +endif() diff --git a/cmake/handle_global_build_flags.cmake b/cmake/handle_global_build_flags.cmake new file mode 100644 index 000000000..f33e12b94 --- /dev/null +++ b/cmake/handle_global_build_flags.cmake @@ -0,0 +1,52 @@ +# JLBC: These should ideally be ported to "modern cmake" via target properties. +# + +if (CMAKE_GENERATOR STREQUAL "Ninja" AND + ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR + (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5))) + # Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support. + # Rationale in https://github.com/ninja-build/ninja/issues/814 + add_compile_options(-fdiagnostics-color=always) +endif() + + +# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT() +# or explicit instantiation will generate build errors. +# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017 +# +if(MSVC AND BUILD_SHARED_LIBS) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) +endif() + +if (APPLE AND BUILD_SHARED_LIBS) + # Set the default install directory on macOS + set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib") +endif() + +############################################################################### +# Global compile options + +if(MSVC) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code +endif() + +# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc. +if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8) + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) + endif() +endif() + +# As of XCode 7, clang also complains about this +if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) + endif() +endif() + +if(GTSAM_ENABLE_CONSISTENCY_CHECKS) + # This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS) +endif() diff --git a/cmake/handle_mkl.cmake b/cmake/handle_mkl.cmake new file mode 100644 index 000000000..5d7ec365b --- /dev/null +++ b/cmake/handle_mkl.cmake @@ -0,0 +1,17 @@ +############################################################################### +# Find MKL +find_package(MKL) + +if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL) + set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h + set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL + list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES}) + + # --no-as-needed is required with gcc according to the MKL link advisor + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed") + endif() +else() + set(GTSAM_USE_EIGEN_MKL 0) + set(EIGEN_USE_MKL_ALL 0) +endif() diff --git a/cmake/handle_openmp.cmake b/cmake/handle_openmp.cmake new file mode 100644 index 000000000..4f27aa633 --- /dev/null +++ b/cmake/handle_openmp.cmake @@ -0,0 +1,11 @@ + +############################################################################### +# Find OpenMP (if we're also using MKL) +find_package(OpenMP) # do this here to generate correct message if disabled + +if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL) + if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP) + set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h + list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS}) + endif() +endif() diff --git a/cmake/handle_perftools.cmake b/cmake/handle_perftools.cmake new file mode 100644 index 000000000..499caf80a --- /dev/null +++ b/cmake/handle_perftools.cmake @@ -0,0 +1,4 @@ + +############################################################################### +# Find Google perftools +find_package(GooglePerfTools) diff --git a/cmake/handle_print_configuration.cmake b/cmake/handle_print_configuration.cmake new file mode 100644 index 000000000..4ffd00e54 --- /dev/null +++ b/cmake/handle_print_configuration.cmake @@ -0,0 +1,104 @@ +############################################################################### +# Print configuration variables +message(STATUS "===============================================================") +message(STATUS "================ Configuration Options ======================") +print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}") +print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}") +print_config("CMake version" "${CMAKE_VERSION}") +print_config("CMake generator" "${CMAKE_GENERATOR}") +print_config("CMake build tool" "${CMAKE_BUILD_TOOL}") +message(STATUS "Build flags ") +print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests") +print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'") +print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") +if (DOXYGEN_FOUND) + print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs") +endif() +print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries") +print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name") +if(GTSAM_UNSTABLE_AVAILABLE) + print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") + print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON} "Build GTSAM unstable Python ") + print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable") +endif() + +if(NOT MSVC AND NOT XCODE_VERSION) + print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") + print_config("Build type" "${CMAKE_BUILD_TYPE}") + print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") + print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") +endif() + +print_build_options_for_target(gtsam) + +print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") + +if(GTSAM_USE_TBB) + print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})") +elseif(TBB_FOUND) + print_config("Use Intel TBB" "TBB (Version: ${TBB_VERSION}) found but GTSAM_WITH_TBB is disabled") +else() + print_config("Use Intel TBB" "TBB not found") +endif() +if(GTSAM_USE_EIGEN_MKL) + print_config("Eigen will use MKL" "Yes") +elseif(MKL_FOUND) + print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled") +else() + print_config("Eigen will use MKL" "MKL not found") +endif() +if(GTSAM_USE_EIGEN_MKL_OPENMP) + print_config("Eigen will use MKL and OpenMP" "Yes") +elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") +elseif(OPENMP_FOUND AND NOT MKL_FOUND) + print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found") +elseif(OPENMP_FOUND) + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") +else() + print_config("Eigen will use MKL and OpenMP" "OpenMP not found") +endif() +print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}") + +if(GTSAM_THROW_CHEIRALITY_EXCEPTION) + print_config("Cheirality exceptions enabled" "YES") +else() + print_config("Cheirality exceptions enabled" "NO") +endif() + +if(NOT MSVC AND NOT XCODE_VERSION) + if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) + print_config("Build with ccache" "Yes") + elseif(CCACHE_FOUND) + print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") + else() + print_config("Build with ccache" "No") + endif() +endif() + +message(STATUS "Packaging flags") +print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}") +print_config("CPack Generator" "${CPACK_GENERATOR}") + +message(STATUS "GTSAM flags ") +print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") +print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") +print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") +print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") +print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1") +print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") +print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") + +message(STATUS "MATLAB toolbox flags") +print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") +if (${GTSAM_INSTALL_MATLAB_TOOLBOX}) + print_config("MATLAB root" "${MATLAB_ROOT}") + print_config("MEX binary" "${MEX_COMMAND}") +endif() +message(STATUS "Python toolbox flags ") +print_enabled_config(${GTSAM_BUILD_PYTHON} "Build Python module with pybind ") +if(GTSAM_BUILD_PYTHON) + print_config("Python version" ${GTSAM_PYTHON_VERSION}) +endif() + +message(STATUS "===============================================================") diff --git a/cmake/handle_python.cmake b/cmake/handle_python.cmake new file mode 100644 index 000000000..ac7401906 --- /dev/null +++ b/cmake/handle_python.cmake @@ -0,0 +1,26 @@ +if(GTSAM_BUILD_PYTHON) + if(${GTSAM_PYTHON_VERSION} STREQUAL "Default") + # Get info about the Python3 interpreter + # https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3 + find_package(Python3 COMPONENTS Interpreter Development) + + if(NOT ${Python3_FOUND}) + message(FATAL_ERROR "Cannot find Python3 interpreter. Please install Python >= 3.6.") + endif() + + set(GTSAM_PYTHON_VERSION "${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}" + CACHE + STRING + "The version of Python to build the wrappers against." + FORCE) + endif() + + if(GTSAM_UNSTABLE_BUILD_PYTHON) + if (NOT GTSAM_BUILD_UNSTABLE) + message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.") + set(GTSAM_UNSTABLE_BUILD_PYTHON OFF) + endif() + endif() + + set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python") +endif() diff --git a/cmake/handle_tbb.cmake b/cmake/handle_tbb.cmake new file mode 100644 index 000000000..cedee55ea --- /dev/null +++ b/cmake/handle_tbb.cmake @@ -0,0 +1,24 @@ +############################################################################### +# Find TBB +find_package(TBB 4.4 COMPONENTS tbb tbbmalloc) + +# Set up variables if we're using TBB +if(TBB_FOUND AND GTSAM_WITH_TBB) + set(GTSAM_USE_TBB 1) # This will go into config.h + if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020)) + set(TBB_GREATER_EQUAL_2020 1) + else() + set(TBB_GREATER_EQUAL_2020 0) + endif() + # all definitions and link requisites will go via imported targets: + # tbb & tbbmalloc + list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc) +else() + set(GTSAM_USE_TBB 0) # This will go into config.h +endif() + +############################################################################### +# Prohibit Timing build mode in combination with TBB +if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing")) + message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.") +endif() diff --git a/cmake/handle_uninstall.cmake b/cmake/handle_uninstall.cmake new file mode 100644 index 000000000..1859b0273 --- /dev/null +++ b/cmake/handle_uninstall.cmake @@ -0,0 +1,10 @@ +# ---------------------------------------------------------------------------- +# Uninstall target, for "make uninstall" +# ---------------------------------------------------------------------------- +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" + IMMEDIATE @ONLY) + +add_custom_target(uninstall + "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake") diff --git a/matlab/CMakeLists.txt b/matlab/CMakeLists.txt index 9abd4e31d..52d56a2b5 100644 --- a/matlab/CMakeLists.txt +++ b/matlab/CMakeLists.txt @@ -2,6 +2,10 @@ include(GtsamMatlabWrap) +# Record the root dir for gtsam - needed during external builds, e.g., ROS +set(GTSAM_SOURCE_ROOT_DIR ${GTSAM_SOURCE_DIR}) +message(STATUS "GTSAM_SOURCE_ROOT_DIR: [${GTSAM_SOURCE_ROOT_DIR}]") + # Tests #message(STATUS "Installing Matlab Toolbox") install_matlab_scripts("${GTSAM_SOURCE_ROOT_DIR}/matlab/" "*.m;*.fig") @@ -21,7 +25,7 @@ install_matlab_scripts("${GTSAM_SOURCE_ROOT_DIR}/matlab/" "README-gtsam-toolbox. file(GLOB matlab_examples_data_graph "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.graph") file(GLOB matlab_examples_data_mat "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.mat") file(GLOB matlab_examples_data_txt "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.txt") -set(matlab_examples_data ${matlab_examples_data_graph} ${matlab_examples_data_mat} ${matlab_examples_data_txt}) +set(matlab_examples_data ${matlab_examples_data_graph} ${matlab_examples_data_mat} ${matlab_examples_data_txt}) if(GTSAM_BUILD_TYPE_POSTFIXES) foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) string(TOUPPER "${build_type}" build_type_upper) @@ -38,4 +42,3 @@ if(GTSAM_BUILD_TYPE_POSTFIXES) else() install(FILES ${matlab_examples_data} DESTINATION ${GTSAM_TOOLBOX_INSTALL_PATH}/gtsam_examples/Data) endif() - From 8b2b7476e1d304ac7a09d5315ee13674fe7b5f40 Mon Sep 17 00:00:00 2001 From: Jose Luis Blanco Claraco Date: Tue, 6 Oct 2020 22:58:21 +0200 Subject: [PATCH 27/38] Remove obsolete comments --- cmake/handle_eigen.cmake | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cmake/handle_eigen.cmake b/cmake/handle_eigen.cmake index 690da6971..4aaf4f2ef 100644 --- a/cmake/handle_eigen.cmake +++ b/cmake/handle_eigen.cmake @@ -1,11 +1,5 @@ ############################################################################### # Option for using system Eigen or GTSAM-bundled Eigen -### These patches only affect usage of MKL. If you want to enable MKL, you *must* -### use our patched version of Eigen -### See: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=704 (Householder QR MKL selection) -### http://eigen.tuxfamily.org/bz/show_bug.cgi?id=705 (Fix MKL LLT return code) -option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF) -option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) # Switch for using system Eigen or GTSAM-bundled Eigen if(GTSAM_USE_SYSTEM_EIGEN) From b1c2e0174b5cdc20470354b308ff6c2cd2a883d3 Mon Sep 17 00:00:00 2001 From: Jose Luis Blanco Claraco Date: Tue, 6 Oct 2020 22:58:42 +0200 Subject: [PATCH 28/38] Use system eigen3 only if first quietly found. --- cmake/handle_eigen.cmake | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cmake/handle_eigen.cmake b/cmake/handle_eigen.cmake index 4aaf4f2ef..69111303d 100644 --- a/cmake/handle_eigen.cmake +++ b/cmake/handle_eigen.cmake @@ -1,6 +1,22 @@ ############################################################################### # Option for using system Eigen or GTSAM-bundled Eigen +# Default: Use system Eigen if it's present: +find_package(Eigen3 QUIET) +if (Eigen3_FOUND) + set(SYS_EIGEN3_DEFAULT_ ON) +else() + set(SYS_EIGEN3_DEFAULT_ OFF) +endif() +option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" ${SYS_EIGEN3_DEFAULT_}) +unset(SYS_EIGEN3_DEFAULT_) + +if(NOT GTSAM_USE_SYSTEM_EIGEN) + # This option only makes sense if using the embedded copy of Eigen, it is + # used to decide whether to *install* the "unsupported" module: + option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) +endif() + # Switch for using system Eigen or GTSAM-bundled Eigen if(GTSAM_USE_SYSTEM_EIGEN) find_package(Eigen3 REQUIRED) From f9e7c7d942618c346d96704a7f0b0880e526b0a8 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Wed, 7 Oct 2020 01:42:57 -0400 Subject: [PATCH 29/38] Squashed 'wrap/' changes from 5e1373486..2192b194e 2192b194e Merge pull request #8 from borglab/fix/serialization 3a3461a35 Fix test ce3d5c35d Fix serialization git-subtree-dir: wrap git-subtree-split: 2192b194edc35142e529adcf50ed5e6803d48975 --- pybind_wrapper.py | 4 ++-- tests/expected-python/geometry_pybind.cpp | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pybind_wrapper.py b/pybind_wrapper.py index 3624d06df..8022a2f77 100755 --- a/pybind_wrapper.py +++ b/pybind_wrapper.py @@ -74,8 +74,8 @@ class PybindWrapper(object): ) .def("deserialize", []({class_inst} self, string serialized){{ - return gtsam::deserialize(serialized, self); - }}) + gtsam::deserialize(serialized, *self); + }}, py::arg("serialized")) '''.format(class_inst=cpp_class + '*')) is_method = isinstance(method, instantiator.InstantiatedMethod) diff --git a/tests/expected-python/geometry_pybind.cpp b/tests/expected-python/geometry_pybind.cpp index 6e18f83d7..2cf104d34 100644 --- a/tests/expected-python/geometry_pybind.cpp +++ b/tests/expected-python/geometry_pybind.cpp @@ -45,8 +45,8 @@ PYBIND11_MODULE(geometry_py, m_) { ) .def("deserialize", [](gtsam::Point2* self, string serialized){ - return gtsam::deserialize(serialized, self); - }) + gtsam::deserialize(serialized, *self); + }, py::arg("serialized")) ; py::class_>(m_gtsam, "Point3") @@ -59,8 +59,8 @@ PYBIND11_MODULE(geometry_py, m_) { ) .def("deserialize", [](gtsam::Point3* self, string serialized){ - return gtsam::deserialize(serialized, self); - }) + gtsam::deserialize(serialized, *self); + }, py::arg("serialized")) .def_static("staticFunction",[](){return gtsam::Point3::staticFunction();}) .def_static("StaticFunctionRet",[]( double z){return gtsam::Point3::StaticFunctionRet(z);}, py::arg("z")); From 82d6b8b66b8a667bde4058b3c8d0a149fcaa1414 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Wed, 7 Oct 2020 02:29:11 -0400 Subject: [PATCH 30/38] Resurrect serialization tests --- gtsam/geometry/tests/testSerializationGeometry.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gtsam/geometry/tests/testSerializationGeometry.cpp b/gtsam/geometry/tests/testSerializationGeometry.cpp index f7ff881eb..aa111c3ae 100644 --- a/gtsam/geometry/tests/testSerializationGeometry.cpp +++ b/gtsam/geometry/tests/testSerializationGeometry.cpp @@ -57,7 +57,7 @@ static StereoCamera cam2(pose3, cal4ptr); static StereoPoint2 spt(1.0, 2.0, 3.0); /* ************************************************************************* */ -TEST_DISABLED (Serialization, text_geometry) { +TEST (Serialization, text_geometry) { EXPECT(equalsObj(Point2(1.0, 2.0))); EXPECT(equalsObj(Pose2(1.0, 2.0, 0.3))); EXPECT(equalsObj(Rot2::fromDegrees(30.0))); @@ -82,7 +82,7 @@ TEST_DISABLED (Serialization, text_geometry) { } /* ************************************************************************* */ -TEST_DISABLED (Serialization, xml_geometry) { +TEST (Serialization, xml_geometry) { EXPECT(equalsXML(Point2(1.0, 2.0))); EXPECT(equalsXML(Pose2(1.0, 2.0, 0.3))); EXPECT(equalsXML(Rot2::fromDegrees(30.0))); @@ -106,7 +106,7 @@ TEST_DISABLED (Serialization, xml_geometry) { } /* ************************************************************************* */ -TEST_DISABLED (Serialization, binary_geometry) { +TEST (Serialization, binary_geometry) { EXPECT(equalsBinary(Point2(1.0, 2.0))); EXPECT(equalsBinary(Pose2(1.0, 2.0, 0.3))); EXPECT(equalsBinary(Rot2::fromDegrees(30.0))); From 114f069f234445bf0f2904c56be58615f3137da3 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Wed, 7 Oct 2020 02:29:29 -0400 Subject: [PATCH 31/38] Add unit test for python serdes --- python/gtsam/tests/test_Pose3.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/python/gtsam/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py index 138f1ff13..e07b904a9 100644 --- a/python/gtsam/tests/test_Pose3.py +++ b/python/gtsam/tests/test_Pose3.py @@ -65,6 +65,14 @@ class TestPose3(GtsamTestCase): actual = Pose3.adjoint_(xi, xi) np.testing.assert_array_equal(actual, expected) + def test_serialization(self): + """Test if serialization is working normally""" + expected = Pose3(Rot3.Ypr(0.0, 1.0, 0.0), Point3(1, 1, 0)) + actual = Pose3() + serialized = expected.serialize() + actual.deserialize(serialized) + self.gtsamAssertEquals(expected, actual, 1e-10) + if __name__ == "__main__": unittest.main() From 16418e2fa6493d8a8983ea2b1aaa2aca86ad2bab Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Wed, 7 Oct 2020 02:29:41 -0400 Subject: [PATCH 32/38] Squashed 'wrap/' changes from 2192b194e..dfa624e77 dfa624e77 Merge pull request #9 from borglab/fix/serialization 7849665a7 Fix serialization git-subtree-dir: wrap git-subtree-split: dfa624e77e24ce3391d23c614d732fc81b4e6193 --- pybind_wrapper.py | 2 +- tests/expected-python/geometry_pybind.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pybind_wrapper.py b/pybind_wrapper.py index 8022a2f77..326d9be52 100755 --- a/pybind_wrapper.py +++ b/pybind_wrapper.py @@ -69,7 +69,7 @@ class PybindWrapper(object): return textwrap.dedent(''' .def("serialize", []({class_inst} self){{ - return gtsam::serialize(self); + return gtsam::serialize(*self); }} ) .def("deserialize", diff --git a/tests/expected-python/geometry_pybind.cpp b/tests/expected-python/geometry_pybind.cpp index 2cf104d34..3eee55bf4 100644 --- a/tests/expected-python/geometry_pybind.cpp +++ b/tests/expected-python/geometry_pybind.cpp @@ -40,7 +40,7 @@ PYBIND11_MODULE(geometry_py, m_) { .def("vectorConfusion",[](gtsam::Point2* self){return self->vectorConfusion();}) .def("serialize", [](gtsam::Point2* self){ - return gtsam::serialize(self); + return gtsam::serialize(*self); } ) .def("deserialize", @@ -54,7 +54,7 @@ PYBIND11_MODULE(geometry_py, m_) { .def("norm",[](gtsam::Point3* self){return self->norm();}) .def("serialize", [](gtsam::Point3* self){ - return gtsam::serialize(self); + return gtsam::serialize(*self); } ) .def("deserialize", From 8cb22624e0520315f402cf2255857301634a9041 Mon Sep 17 00:00:00 2001 From: Jose Luis Blanco Claraco Date: Wed, 7 Oct 2020 17:02:39 +0200 Subject: [PATCH 33/38] Use camel case for cmake files --- CMakeLists.txt | 30 +++++++++---------- ...llocators.cmake => HandleAllocators.cmake} | 0 .../{handle_boost.cmake => HandleBoost.cmake} | 0 ...handle_ccache.cmake => HandleCCache.cmake} | 0 .../{handle_cpack.cmake => HandleCPack.cmake} | 0 .../{handle_eigen.cmake => HandleEigen.cmake} | 0 ...l_checks.cmake => HandleFinalChecks.cmake} | 0 ...tions.cmake => HandleGeneralOptions.cmake} | 0 ...ags.cmake => HandleGlobalBuildFlags.cmake} | 0 cmake/{handle_mkl.cmake => HandleMKL.cmake} | 0 ...handle_openmp.cmake => HandleOpenMP.cmake} | 0 ..._perftools.cmake => HandlePerfTools.cmake} | 0 ...n.cmake => HandlePrintConfiguration.cmake} | 0 ...handle_python.cmake => HandlePython.cmake} | 0 cmake/{handle_tbb.cmake => HandleTBB.cmake} | 0 ..._uninstall.cmake => HandleUninstall.cmake} | 0 16 files changed, 15 insertions(+), 15 deletions(-) rename cmake/{handle_allocators.cmake => HandleAllocators.cmake} (100%) rename cmake/{handle_boost.cmake => HandleBoost.cmake} (100%) rename cmake/{handle_ccache.cmake => HandleCCache.cmake} (100%) rename cmake/{handle_cpack.cmake => HandleCPack.cmake} (100%) rename cmake/{handle_eigen.cmake => HandleEigen.cmake} (100%) rename cmake/{handle_final_checks.cmake => HandleFinalChecks.cmake} (100%) rename cmake/{handle_general_options.cmake => HandleGeneralOptions.cmake} (100%) rename cmake/{handle_global_build_flags.cmake => HandleGlobalBuildFlags.cmake} (100%) rename cmake/{handle_mkl.cmake => HandleMKL.cmake} (100%) rename cmake/{handle_openmp.cmake => HandleOpenMP.cmake} (100%) rename cmake/{handle_perftools.cmake => HandlePerfTools.cmake} (100%) rename cmake/{handle_print_configuration.cmake => HandlePrintConfiguration.cmake} (100%) rename cmake/{handle_python.cmake => HandlePython.cmake} (100%) rename cmake/{handle_tbb.cmake => HandleTBB.cmake} (100%) rename cmake/{handle_uninstall.cmake => HandleUninstall.cmake} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 831ee00f3..35c487fd3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,21 +38,21 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR}) message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ") endif() -include(cmake/handle_boost.cmake) # Boost -include(cmake/handle_ccache.cmake) # ccache -include(cmake/handle_cpack.cmake) # CPack -include(cmake/handle_eigen.cmake) # Eigen3 -include(cmake/handle_general_options.cmake) # CMake build options -include(cmake/handle_mkl.cmake) # MKL -include(cmake/handle_openmp.cmake) # OpenMP -include(cmake/handle_perftools.cmake) # Google perftools -include(cmake/handle_python.cmake) # Python options and commands -include(cmake/handle_tbb.cmake) # TBB -include(cmake/handle_uninstall.cmake) # for "make uninstall" +include(cmake/HandleBoost.cmake) # Boost +include(cmake/HandleCCache.cmake) # ccache +include(cmake/HandleCPack.cmake) # CPack +include(cmake/HandleEigen.cmake) # Eigen3 +include(cmake/HandleGeneralOptions.cmake) # CMake build options +include(cmake/HandleMKL.cmake) # MKL +include(cmake/HandleOpenMP.cmake) # OpenMP +include(cmake/HandlePerfTools.cmake) # Google perftools +include(cmake/HandlePython.cmake) # Python options and commands +include(cmake/HandleTBB.cmake) # TBB +include(cmake/HandleUninstall.cmake) # for "make uninstall" -include(cmake/handle_allocators.cmake) # Must be after tbb, pertools +include(cmake/HandleAllocators.cmake) # Must be after tbb, pertools -include(cmake/handle_global_build_flags.cmake) # Build flags +include(cmake/HandleGlobalBuildFlags.cmake) # Build flags ############################################################################### # Add components @@ -104,10 +104,10 @@ endif() add_subdirectory(cmake) # Print configuration variables -include(cmake/handle_print_configuration.cmake) +include(cmake/HandlePrintConfiguration.cmake) # Print warnings at the end -include(cmake/handle_final_checks.cmake) +include(cmake/HandleFinalChecks.cmake) # Include CPack *after* all flags include(CPack) diff --git a/cmake/handle_allocators.cmake b/cmake/HandleAllocators.cmake similarity index 100% rename from cmake/handle_allocators.cmake rename to cmake/HandleAllocators.cmake diff --git a/cmake/handle_boost.cmake b/cmake/HandleBoost.cmake similarity index 100% rename from cmake/handle_boost.cmake rename to cmake/HandleBoost.cmake diff --git a/cmake/handle_ccache.cmake b/cmake/HandleCCache.cmake similarity index 100% rename from cmake/handle_ccache.cmake rename to cmake/HandleCCache.cmake diff --git a/cmake/handle_cpack.cmake b/cmake/HandleCPack.cmake similarity index 100% rename from cmake/handle_cpack.cmake rename to cmake/HandleCPack.cmake diff --git a/cmake/handle_eigen.cmake b/cmake/HandleEigen.cmake similarity index 100% rename from cmake/handle_eigen.cmake rename to cmake/HandleEigen.cmake diff --git a/cmake/handle_final_checks.cmake b/cmake/HandleFinalChecks.cmake similarity index 100% rename from cmake/handle_final_checks.cmake rename to cmake/HandleFinalChecks.cmake diff --git a/cmake/handle_general_options.cmake b/cmake/HandleGeneralOptions.cmake similarity index 100% rename from cmake/handle_general_options.cmake rename to cmake/HandleGeneralOptions.cmake diff --git a/cmake/handle_global_build_flags.cmake b/cmake/HandleGlobalBuildFlags.cmake similarity index 100% rename from cmake/handle_global_build_flags.cmake rename to cmake/HandleGlobalBuildFlags.cmake diff --git a/cmake/handle_mkl.cmake b/cmake/HandleMKL.cmake similarity index 100% rename from cmake/handle_mkl.cmake rename to cmake/HandleMKL.cmake diff --git a/cmake/handle_openmp.cmake b/cmake/HandleOpenMP.cmake similarity index 100% rename from cmake/handle_openmp.cmake rename to cmake/HandleOpenMP.cmake diff --git a/cmake/handle_perftools.cmake b/cmake/HandlePerfTools.cmake similarity index 100% rename from cmake/handle_perftools.cmake rename to cmake/HandlePerfTools.cmake diff --git a/cmake/handle_print_configuration.cmake b/cmake/HandlePrintConfiguration.cmake similarity index 100% rename from cmake/handle_print_configuration.cmake rename to cmake/HandlePrintConfiguration.cmake diff --git a/cmake/handle_python.cmake b/cmake/HandlePython.cmake similarity index 100% rename from cmake/handle_python.cmake rename to cmake/HandlePython.cmake diff --git a/cmake/handle_tbb.cmake b/cmake/HandleTBB.cmake similarity index 100% rename from cmake/handle_tbb.cmake rename to cmake/HandleTBB.cmake diff --git a/cmake/handle_uninstall.cmake b/cmake/HandleUninstall.cmake similarity index 100% rename from cmake/handle_uninstall.cmake rename to cmake/HandleUninstall.cmake From 69b2cacbe7d408e92de011a82116007bcf03f590 Mon Sep 17 00:00:00 2001 From: Jose Luis Blanco Claraco Date: Wed, 7 Oct 2020 17:03:20 +0200 Subject: [PATCH 34/38] Revert use system Eigen if found --- cmake/HandleEigen.cmake | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/cmake/HandleEigen.cmake b/cmake/HandleEigen.cmake index 69111303d..fda441907 100644 --- a/cmake/HandleEigen.cmake +++ b/cmake/HandleEigen.cmake @@ -1,15 +1,7 @@ ############################################################################### # Option for using system Eigen or GTSAM-bundled Eigen -# Default: Use system Eigen if it's present: -find_package(Eigen3 QUIET) -if (Eigen3_FOUND) - set(SYS_EIGEN3_DEFAULT_ ON) -else() - set(SYS_EIGEN3_DEFAULT_ OFF) -endif() -option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" ${SYS_EIGEN3_DEFAULT_}) -unset(SYS_EIGEN3_DEFAULT_) +option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF) if(NOT GTSAM_USE_SYSTEM_EIGEN) # This option only makes sense if using the embedded copy of Eigen, it is From d4c801bb6bff04f924edccc847f97359b0906217 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 10 Oct 2020 12:39:05 -0400 Subject: [PATCH 35/38] Fix LLVM repo keys --- .github/workflows/build-linux.yml | 2 ++ .github/workflows/build-python.yml | 2 ++ .github/workflows/build-special.yml | 2 ++ 3 files changed, 6 insertions(+) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index afe328c3b..7553675c8 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -50,6 +50,8 @@ jobs: run: | # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 + gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" fi sudo apt-get -y update diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index b8d6bc311..d4796e2bb 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -63,6 +63,8 @@ jobs: run: | # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 + gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" fi sudo apt-get -y update diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml index 648365f24..c314acb16 100644 --- a/.github/workflows/build-special.yml +++ b/.github/workflows/build-special.yml @@ -56,6 +56,8 @@ jobs: run: | # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 + gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" fi sudo apt-get -y update From 22e64ac82d6dee47ba3640b442d3e0c85d4997ee Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Mon, 12 Oct 2020 11:54:24 -0400 Subject: [PATCH 36/38] Add comments --- .github/workflows/build-linux.yml | 3 ++- .github/workflows/build-python.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 7553675c8..2195ad05c 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -48,8 +48,9 @@ jobs: - name: Install (Linux) if: runner.os == 'Linux' run: | - # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + # LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository. if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index d4796e2bb..e348e3125 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -61,8 +61,9 @@ jobs: - name: Install (Linux) if: runner.os == 'Linux' run: | - # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + # LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository. if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" From d4bdaf80800487e96c56f59d748833598f1cecff Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Wed, 14 Oct 2020 16:42:54 -0400 Subject: [PATCH 37/38] Add comments --- .github/workflows/build-linux.yml | 1 + .github/workflows/build-python.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 2195ad05c..7aa818d04 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -50,6 +50,7 @@ jobs: run: | # LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository. if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + # pool.sks-keyservers.net is the SKS GPG global keyserver pool # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index e348e3125..5b9f7418b 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -63,6 +63,7 @@ jobs: run: | # LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository. if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + # pool.sks-keyservers.net is the SKS GPG global keyserver pool # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - From 2b8e9f44fada685d7b73fd47d84c15b35b449785 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Wed, 14 Oct 2020 16:44:28 -0400 Subject: [PATCH 38/38] Add comments --- .github/workflows/build-linux.yml | 1 + .github/workflows/build-python.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 7aa818d04..d90aad13c 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -52,6 +52,7 @@ jobs: if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then # pool.sks-keyservers.net is the SKS GPG global keyserver pool # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository + # This key is not in the keystore by default for Ubuntu so we need to add it. gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index 5b9f7418b..724f56dda 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -65,6 +65,7 @@ jobs: if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then # pool.sks-keyservers.net is the SKS GPG global keyserver pool # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository + # This key is not in the keystore by default for Ubuntu so we need to add it. gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"