From d0279d2738548b8e25aca2b9a39c172b99a561a6 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 11:57:31 -0500 Subject: [PATCH 01/78] Add pybind11/stl.h and get it compiling --- gtsam/base/base.i | 36 ----------------------------- python/gtsam/gtsam.tpl | 1 + python/gtsam/specializations/base.h | 3 --- 3 files changed, 1 insertion(+), 39 deletions(-) diff --git a/gtsam/base/base.i b/gtsam/base/base.i index 9838f97d3..f4184fa12 100644 --- a/gtsam/base/base.i +++ b/gtsam/base/base.i @@ -41,45 +41,9 @@ class DSFMap { std::map sets(); }; -class IndexPairSet { - IndexPairSet(); - // common STL methods - size_t size() const; - bool empty() const; - void clear(); - - // structure specific methods - void insert(gtsam::IndexPair key); - bool erase(gtsam::IndexPair key); // returns true if value was removed - bool count(gtsam::IndexPair key) const; // returns true if value exists -}; - -class IndexPairVector { - IndexPairVector(); - IndexPairVector(const gtsam::IndexPairVector& other); - - // common STL methods - size_t size() const; - bool empty() const; - void clear(); - - // structure specific methods - gtsam::IndexPair at(size_t i) const; - void push_back(gtsam::IndexPair key) const; -}; gtsam::IndexPairVector IndexPairSetAsArray(gtsam::IndexPairSet& set); -class IndexPairSetMap { - IndexPairSetMap(); - // common STL methods - size_t size() const; - bool empty() const; - void clear(); - - // structure specific methods - gtsam::IndexPairSet at(gtsam::IndexPair& key); -}; #include bool linear_independent(Matrix A, Matrix B, double tol); diff --git a/python/gtsam/gtsam.tpl b/python/gtsam/gtsam.tpl index b760e4eb5..bdeb2bccc 100644 --- a/python/gtsam/gtsam.tpl +++ b/python/gtsam/gtsam.tpl @@ -11,6 +11,7 @@ {include_boost} #include +#include #include #include #include diff --git a/python/gtsam/specializations/base.h b/python/gtsam/specializations/base.h index 9eefdeca8..995753da1 100644 --- a/python/gtsam/specializations/base.h +++ b/python/gtsam/specializations/base.h @@ -11,7 +11,4 @@ * and saves one copy operation. */ -py::bind_map(m_, "IndexPairSetMap"); -py::bind_vector(m_, "IndexPairVector"); - py::bind_vector >(m_, "JacobianVector"); From 394bb82dba39cb8b8f456612957cc1f8025dd19b Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 12:14:31 -0500 Subject: [PATCH 02/78] remove unnecessary includes --- python/gtsam/preamble/basis.h | 2 -- python/gtsam/preamble/discrete.h | 2 -- python/gtsam/preamble/inference.h | 2 -- 3 files changed, 6 deletions(-) diff --git a/python/gtsam/preamble/basis.h b/python/gtsam/preamble/basis.h index 56a07cfdd..d07a75f6f 100644 --- a/python/gtsam/preamble/basis.h +++ b/python/gtsam/preamble/basis.h @@ -10,5 +10,3 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ - -#include diff --git a/python/gtsam/preamble/discrete.h b/python/gtsam/preamble/discrete.h index 608508c32..598ab626d 100644 --- a/python/gtsam/preamble/discrete.h +++ b/python/gtsam/preamble/discrete.h @@ -11,6 +11,4 @@ * mutations on Python side will not be reflected on C++. */ -#include - PYBIND11_MAKE_OPAQUE(gtsam::DiscreteKeys); diff --git a/python/gtsam/preamble/inference.h b/python/gtsam/preamble/inference.h index 320e0ac71..bd71c0817 100644 --- a/python/gtsam/preamble/inference.h +++ b/python/gtsam/preamble/inference.h @@ -11,5 +11,3 @@ * mutations on Python side will not be reflected on C++. */ -#include - From 19287ad5c889b08cf64855fe48b7a89bd0d4e174 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 12:15:43 -0500 Subject: [PATCH 03/78] fix geometry modules --- gtsam/geometry/geometry.i | 54 ------------------------- python/gtsam/preamble/geometry.h | 12 ++---- python/gtsam/specializations/geometry.h | 7 ---- 3 files changed, 3 insertions(+), 70 deletions(-) diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index 1e42966f8..97ffe6f18 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -29,38 +29,6 @@ class Point2 { void serialize() const; }; -class Point2Pairs { - Point2Pairs(); - size_t size() const; - bool empty() const; - gtsam::Point2Pair at(size_t n) const; - void push_back(const gtsam::Point2Pair& point_pair); -}; - -// std::vector -class Point2Vector { - // Constructors - Point2Vector(); - Point2Vector(const gtsam::Point2Vector& v); - - // Capacity - size_t size() const; - size_t max_size() const; - void resize(size_t sz); - size_t capacity() const; - bool empty() const; - void reserve(size_t n); - - // Element access - gtsam::Point2 at(size_t n) const; - gtsam::Point2 front() const; - gtsam::Point2 back() const; - - // Modifiers - void assign(size_t n, const gtsam::Point2& u); - void push_back(const gtsam::Point2& x); - void pop_back(); -}; #include class StereoPoint2 { @@ -127,13 +95,6 @@ class Point3 { void serialize() const; }; -class Point3Pairs { - Point3Pairs(); - size_t size() const; - bool empty() const; - gtsam::Point3Pair at(size_t n) const; - void push_back(const gtsam::Point3Pair& point_pair); -}; #include class Rot2 { @@ -486,21 +447,6 @@ class Pose3 { void serialize() const; }; -class Pose3Pairs { - Pose3Pairs(); - size_t size() const; - bool empty() const; - gtsam::Pose3Pair at(size_t n) const; - void push_back(const gtsam::Pose3Pair& pose_pair); -}; - -class Pose3Vector { - Pose3Vector(); - size_t size() const; - bool empty() const; - gtsam::Pose3 at(size_t n) const; - void push_back(const gtsam::Pose3& pose); -}; #include class Unit3 { diff --git a/python/gtsam/preamble/geometry.h b/python/gtsam/preamble/geometry.h index 35fe2a577..ca03cdfe2 100644 --- a/python/gtsam/preamble/geometry.h +++ b/python/gtsam/preamble/geometry.h @@ -10,7 +10,6 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ -#include // Support for binding boost::optional types in C++11. // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html @@ -19,12 +18,7 @@ namespace pybind11 { namespace detail { struct type_caster> : optional_caster> {}; }} -PYBIND11_MAKE_OPAQUE( - std::vector>); -PYBIND11_MAKE_OPAQUE(gtsam::Point2Pairs); -PYBIND11_MAKE_OPAQUE(gtsam::Point3Pairs); -PYBIND11_MAKE_OPAQUE(gtsam::Pose3Pairs); -PYBIND11_MAKE_OPAQUE(std::vector); -PYBIND11_MAKE_OPAQUE( - gtsam::CameraSet>); PYBIND11_MAKE_OPAQUE(gtsam::CameraSet>); +PYBIND11_MAKE_OPAQUE(gtsam::CameraSet>); +PYBIND11_MAKE_OPAQUE(gtsam::CameraSet>); +PYBIND11_MAKE_OPAQUE(gtsam::CameraSet>); diff --git a/python/gtsam/specializations/geometry.h b/python/gtsam/specializations/geometry.h index a492ce8eb..f63e942a2 100644 --- a/python/gtsam/specializations/geometry.h +++ b/python/gtsam/specializations/geometry.h @@ -11,13 +11,6 @@ * and saves one copy operation. */ -py::bind_vector< - std::vector>>( - m_, "Point2Vector"); -py::bind_vector>(m_, "Point2Pairs"); -py::bind_vector>(m_, "Point3Pairs"); -py::bind_vector>(m_, "Pose3Pairs"); -py::bind_vector>(m_, "Pose3Vector"); py::bind_vector>>( m_, "CameraSetCal3_S2"); py::bind_vector>>( From f5da8522221b6d6e2e5f7de0eeec890f08074a5c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 12:49:06 -0500 Subject: [PATCH 04/78] remove KeyVector --- gtsam/gtsam.i | 54 ---------------------------- python/gtsam/preamble/gtsam.h | 5 --- python/gtsam/specializations/gtsam.h | 8 ----- 3 files changed, 67 deletions(-) diff --git a/gtsam/gtsam.i b/gtsam/gtsam.i index d4e959c3d..45bd61d0b 100644 --- a/gtsam/gtsam.i +++ b/gtsam/gtsam.i @@ -66,27 +66,6 @@ class KeySet { void serialize() const; }; -// Actually a vector -class KeyVector { - KeyVector(); - KeyVector(const gtsam::KeyVector& other); - - // Note: no print function - - // common STL methods - size_t size() const; - bool empty() const; - void clear(); - - // structure specific methods - size_t at(size_t i) const; - size_t front() const; - size_t back() const; - void push_back(size_t key) const; - - void serialize() const; -}; - // Actually a FastMap class KeyGroupMap { KeyGroupMap(); @@ -104,39 +83,6 @@ class KeyGroupMap { bool insert2(size_t key, int val); }; -// Actually a FastSet -class FactorIndexSet { - FactorIndexSet(); - FactorIndexSet(const gtsam::FactorIndexSet& set); - - // common STL methods - size_t size() const; - bool empty() const; - void clear(); - - // structure specific methods - void insert(size_t factorIndex); - bool erase(size_t factorIndex); // returns true if value was removed - bool count(size_t factorIndex) const; // returns true if value exists -}; - -// Actually a vector -class FactorIndices { - FactorIndices(); - FactorIndices(const gtsam::FactorIndices& other); - - // common STL methods - size_t size() const; - bool empty() const; - void clear(); - - // structure specific methods - size_t at(size_t i) const; - size_t front() const; - size_t back() const; - void push_back(size_t factorIndex) const; -}; - //************************************************************************* // Utilities //************************************************************************* diff --git a/python/gtsam/preamble/gtsam.h b/python/gtsam/preamble/gtsam.h index ec39c410a..d07a75f6f 100644 --- a/python/gtsam/preamble/gtsam.h +++ b/python/gtsam/preamble/gtsam.h @@ -10,8 +10,3 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ -#ifdef GTSAM_ALLOCATOR_TBB -PYBIND11_MAKE_OPAQUE(std::vector>); -#else -PYBIND11_MAKE_OPAQUE(std::vector); -#endif diff --git a/python/gtsam/specializations/gtsam.h b/python/gtsam/specializations/gtsam.h index 490d71902..da8842eaf 100644 --- a/python/gtsam/specializations/gtsam.h +++ b/python/gtsam/specializations/gtsam.h @@ -10,11 +10,3 @@ * with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python, * and saves one copy operation. */ - -#ifdef GTSAM_ALLOCATOR_TBB -py::bind_vector > >(m_, "KeyVector"); -py::implicitly_convertible > >(); -#else -py::bind_vector >(m_, "KeyVector"); -py::implicitly_convertible >(); -#endif From b93145cd89c420a47a95cd0deea29474548dad6a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 13:33:09 -0500 Subject: [PATCH 05/78] remove unneeded code --- gtsam/geometry/Rot3.h | 3 --- gtsam/sfm/sfm.i | 16 ---------------- gtsam/slam/slam.i | 20 +------------------- python/gtsam/gtsam.tpl | 2 +- python/gtsam/preamble/base.h | 1 - python/gtsam/preamble/slam.h | 6 ------ python/gtsam/specializations/sfm.h | 4 ---- python/gtsam/specializations/slam.h | 8 -------- 8 files changed, 2 insertions(+), 58 deletions(-) diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index 18bd88b52..d313883d9 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -564,9 +564,6 @@ class GTSAM_EXPORT Rot3 : public LieGroup { #endif }; - /// std::vector of Rot3s, mainly for wrapper - using Rot3Vector = std::vector >; - /** * [RQ] receives a 3 by 3 matrix and returns an upper triangular matrix R * and 3 rotation angles corresponding to the rotation matrix Q=Qz'*Qy'*Qx' diff --git a/gtsam/sfm/sfm.i b/gtsam/sfm/sfm.i index 705892e60..31057ec4b 100644 --- a/gtsam/sfm/sfm.i +++ b/gtsam/sfm/sfm.i @@ -29,13 +29,6 @@ class BinaryMeasurement { typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; -class BinaryMeasurementsUnit3 { - BinaryMeasurementsUnit3(); - size_t size() const; - gtsam::BinaryMeasurement at(size_t idx) const; - void push_back(const gtsam::BinaryMeasurement& measurement); -}; - #include // TODO(frank): copy/pasta below until we have integer template paremeters in @@ -178,15 +171,6 @@ class ShonanAveraging3 { #include -class KeyPairDoubleMap { - KeyPairDoubleMap(); - KeyPairDoubleMap(const gtsam::KeyPairDoubleMap& other); - - size_t size() const; - bool empty() const; - void clear(); - size_t at(const pair& keypair) const; -}; class MFAS { MFAS(const gtsam::BinaryMeasurementsUnit3& relativeTranslations, diff --git a/gtsam/slam/slam.i b/gtsam/slam/slam.i index e044dd2c1..da06c0e26 100644 --- a/gtsam/slam/slam.i +++ b/gtsam/slam/slam.i @@ -268,24 +268,6 @@ void save2D(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& config, gtsam::noiseModel::Diagonal* model, string filename); -// std::vector::shared_ptr> -// Ignored by pybind -> will be List[BetweenFactorPose2] -class BetweenFactorPose2s { - BetweenFactorPose2s(); - size_t size() const; - gtsam::BetweenFactor* at(size_t i) const; - void push_back(const gtsam::BetweenFactor* factor); -}; -gtsam::BetweenFactorPose2s parse2DFactors(string filename); - -// std::vector::shared_ptr> -// Ignored by pybind -> will be List[BetweenFactorPose3] -class BetweenFactorPose3s { - BetweenFactorPose3s(); - size_t size() const; - gtsam::BetweenFactor* at(size_t i) const; - void push_back(const gtsam::BetweenFactor* factor); -}; gtsam::BetweenFactorPose3s parse3DFactors(string filename); pair load3D(string filename); @@ -323,7 +305,7 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor { KarcherMeanFactor(const gtsam::KeyVector& keys); }; -gtsam::Rot3 FindKarcherMean(const gtsam::Rot3Vector& rotations); +gtsam::Rot3 FindKarcherMean(const std::vector& rotations); #include gtsam::noiseModel::Isotropic* ConvertNoiseModel(gtsam::noiseModel::Base* model, diff --git a/python/gtsam/gtsam.tpl b/python/gtsam/gtsam.tpl index bdeb2bccc..a974b246b 100644 --- a/python/gtsam/gtsam.tpl +++ b/python/gtsam/gtsam.tpl @@ -11,8 +11,8 @@ {include_boost} #include -#include #include +#include #include #include #include diff --git a/python/gtsam/preamble/base.h b/python/gtsam/preamble/base.h index 626b47ae4..a7effd1ca 100644 --- a/python/gtsam/preamble/base.h +++ b/python/gtsam/preamble/base.h @@ -12,5 +12,4 @@ */ PYBIND11_MAKE_OPAQUE(std::vector); - PYBIND11_MAKE_OPAQUE(std::vector); // JacobianVector diff --git a/python/gtsam/preamble/slam.h b/python/gtsam/preamble/slam.h index f7bf5863c..d07a75f6f 100644 --- a/python/gtsam/preamble/slam.h +++ b/python/gtsam/preamble/slam.h @@ -10,9 +10,3 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ - -PYBIND11_MAKE_OPAQUE( - std::vector > >); -PYBIND11_MAKE_OPAQUE( - std::vector > >); -PYBIND11_MAKE_OPAQUE(gtsam::Rot3Vector); diff --git a/python/gtsam/specializations/sfm.h b/python/gtsam/specializations/sfm.h index 6de15217f..da8842eaf 100644 --- a/python/gtsam/specializations/sfm.h +++ b/python/gtsam/specializations/sfm.h @@ -10,7 +10,3 @@ * with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python, * and saves one copy operation. */ - -py::bind_vector > >( - m_, "BinaryMeasurementsUnit3"); -py::bind_map(m_, "KeyPairDoubleMap"); diff --git a/python/gtsam/specializations/slam.h b/python/gtsam/specializations/slam.h index 6a439c370..da8842eaf 100644 --- a/python/gtsam/specializations/slam.h +++ b/python/gtsam/specializations/slam.h @@ -10,11 +10,3 @@ * with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python, * and saves one copy operation. */ - -py::bind_vector< - std::vector>>>( - m_, "BetweenFactorPose3s"); -py::bind_vector< - std::vector>>>( - m_, "BetweenFactorPose2s"); -py::bind_vector(m_, "Rot3Vector"); From 042c236123a9a804676fc1bcbbd72ba82cdb6390 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 14:08:13 -0500 Subject: [PATCH 06/78] update python tests --- gtsam/base/base.i | 14 +++++++- .../examples/TranslationAveragingExample.py | 11 +++--- python/gtsam/symbol_shorthand.py | 2 +- python/gtsam/tests/test_Cal3Fisheye.py | 13 ++++--- python/gtsam/tests/test_Cal3Unified.py | 13 ++++--- python/gtsam/tests/test_DecisionTreeFactor.py | 3 +- python/gtsam/tests/test_KarcherMeanFactor.py | 11 +++--- python/gtsam/tests/test_Pose2.py | 7 ++-- python/gtsam/tests/test_ShonanAveraging.py | 15 +++----- python/gtsam/tests/test_Sim3.py | 13 ++++--- .../gtsam/tests/test_TranslationRecovery.py | 6 ++-- python/gtsam/tests/test_Triangulation.py | 34 ++++++------------- 12 files changed, 63 insertions(+), 79 deletions(-) diff --git a/gtsam/base/base.i b/gtsam/base/base.i index f4184fa12..fbbd91553 100644 --- a/gtsam/base/base.i +++ b/gtsam/base/base.i @@ -41,10 +41,22 @@ class DSFMap { std::map sets(); }; +class IndexPairVector { + IndexPairVector(); + IndexPairVector(const gtsam::IndexPairVector& other); + + // common STL methods + size_t size() const; + bool empty() const; + void clear(); + + // structure specific methods + gtsam::IndexPair at(size_t i) const; + void push_back(gtsam::IndexPair key) const; +}; gtsam::IndexPairVector IndexPairSetAsArray(gtsam::IndexPairSet& set); - #include bool linear_independent(Matrix A, Matrix B, double tol); diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index 054b61126..a29b7e6a4 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -17,9 +17,8 @@ Date: September 2020 from collections import defaultdict from typing import List, Tuple -import numpy as np - import gtsam +import numpy as np from gtsam.examples import SFMdata # Hyperparameters for 1dsfm, values used from Kyle Wilson's code. @@ -59,7 +58,7 @@ def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]: return wRc_values, i_iZj_list -def filter_outliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryMeasurementsUnit3: +def filter_outliers(w_iZj_list: List[gtsam.BinaryMeasurementUnit3]) -> List[gtsam.BinaryMeasurementUnit3]: """Removes outliers from a list of Unit3 measurements that are the translation directions from camera i to camera j in the world frame.""" @@ -89,14 +88,14 @@ def filter_outliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryMe avg_outlier_weights[keypair] += weight / len(outlier_weights) # Remove w_iZj that have weight greater than threshold, these are outliers. - w_iZj_inliers = gtsam.BinaryMeasurementsUnit3() + w_iZj_inliers = [] [w_iZj_inliers.append(w_iZj) for w_iZj in w_iZj_list if avg_outlier_weights[( w_iZj.key1(), w_iZj.key2())] < OUTLIER_WEIGHT_THRESHOLD] return w_iZj_inliers -def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, +def estimate_poses(i_iZj_list: List[gtsam.BinaryMeasurementUnit3], wRc_values: gtsam.Values) -> gtsam.Values: """Estimate poses given rotations and normalized translation directions between cameras. @@ -112,7 +111,7 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, """ # Convert the translation direction measurements to world frame using the rotations. - w_iZj_list = gtsam.BinaryMeasurementsUnit3() + w_iZj_list = [] for i_iZj in i_iZj_list: w_iZj = gtsam.Unit3(wRc_values.atRot3(i_iZj.key1()) .rotate(i_iZj.measured().point3())) diff --git a/python/gtsam/symbol_shorthand.py b/python/gtsam/symbol_shorthand.py index 748d36558..5f7866d18 100644 --- a/python/gtsam/symbol_shorthand.py +++ b/python/gtsam/symbol_shorthand.py @@ -1,4 +1,4 @@ # This trick is to allow direct import of sub-modules # without this, we can only do `from gtsam.gtsam.symbol_shorthand import X` # with this trick, we can do `from gtsam.symbol_shorthand import X` -from .gtsam.symbol_shorthand import * \ No newline at end of file +from .gtsam.symbol_shorthand import * diff --git a/python/gtsam/tests/test_Cal3Fisheye.py b/python/gtsam/tests/test_Cal3Fisheye.py index e54afc757..246ec19ee 100644 --- a/python/gtsam/tests/test_Cal3Fisheye.py +++ b/python/gtsam/tests/test_Cal3Fisheye.py @@ -11,11 +11,10 @@ Refactored: Roderick Koehle """ import unittest -import numpy as np - import gtsam -from gtsam.utils.test_case import GtsamTestCase +import numpy as np from gtsam.symbol_shorthand import K, L, P +from gtsam.utils.test_case import GtsamTestCase def ulp(ftype=np.float64): @@ -51,9 +50,9 @@ class TestCal3Fisheye(GtsamTestCase): camera1 = gtsam.PinholeCameraCal3Fisheye(pose1) camera2 = gtsam.PinholeCameraCal3Fisheye(pose2) cls.origin = np.array([0.0, 0.0, 0.0]) - cls.poses = gtsam.Pose3Vector([pose1, pose2]) - cls.cameras = gtsam.CameraSetCal3Fisheye([camera1, camera2]) - cls.measurements = gtsam.Point2Vector([k.project(cls.origin) for k in cls.cameras]) + cls.poses = [pose1, pose2] + cls.cameras = [camera1, camera2] + cls.measurements = [k.project(cls.origin) for k in cls.cameras] def test_Cal3Fisheye(self): K = gtsam.Cal3Fisheye() @@ -187,7 +186,7 @@ class TestCal3Fisheye(GtsamTestCase): def test_triangulation_rectify(self): """Estimate spatial point from image measurements using rectification""" - rectified = gtsam.Point2Vector([k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)]) + rectified = [k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)] shared_cal = gtsam.Cal3_S2() triangulated = gtsam.triangulatePoint3(self.poses, shared_cal, rectified, rank_tol=1e-9, optimize=False) self.gtsamAssertEquals(triangulated, self.origin) diff --git a/python/gtsam/tests/test_Cal3Unified.py b/python/gtsam/tests/test_Cal3Unified.py index bafbacfa4..da9f4a939 100644 --- a/python/gtsam/tests/test_Cal3Unified.py +++ b/python/gtsam/tests/test_Cal3Unified.py @@ -10,11 +10,10 @@ Author: Frank Dellaert & Duy Nguyen Ta (Python) """ import unittest -import numpy as np - import gtsam -from gtsam.utils.test_case import GtsamTestCase +import numpy as np from gtsam.symbol_shorthand import K, L, P +from gtsam.utils.test_case import GtsamTestCase class TestCal3Unified(GtsamTestCase): @@ -48,9 +47,9 @@ class TestCal3Unified(GtsamTestCase): camera1 = gtsam.PinholeCameraCal3Unified(pose1, cls.stereographic) camera2 = gtsam.PinholeCameraCal3Unified(pose2, cls.stereographic) cls.origin = np.array([0.0, 0.0, 0.0]) - cls.poses = gtsam.Pose3Vector([pose1, pose2]) - cls.cameras = gtsam.CameraSetCal3Unified([camera1, camera2]) - cls.measurements = gtsam.Point2Vector([k.project(cls.origin) for k in cls.cameras]) + cls.poses = [pose1, pose2] + cls.cameras = [camera1, camera2] + cls.measurements = [k.project(cls.origin) for k in cls.cameras] def test_Cal3Unified(self): K = gtsam.Cal3Unified() @@ -147,7 +146,7 @@ class TestCal3Unified(GtsamTestCase): def test_triangulation_rectify(self): """Estimate spatial point from image measurements using rectification""" - rectified = gtsam.Point2Vector([k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)]) + rectified = [k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)] shared_cal = gtsam.Cal3_S2() triangulated = gtsam.triangulatePoint3(self.poses, shared_cal, rectified, rank_tol=1e-9, optimize=False) self.gtsamAssertEquals(triangulated, self.origin) diff --git a/python/gtsam/tests/test_DecisionTreeFactor.py b/python/gtsam/tests/test_DecisionTreeFactor.py index 0499e7215..1b8598953 100644 --- a/python/gtsam/tests/test_DecisionTreeFactor.py +++ b/python/gtsam/tests/test_DecisionTreeFactor.py @@ -13,7 +13,8 @@ Author: Frank Dellaert import unittest -from gtsam import DecisionTreeFactor, DiscreteValues, DiscreteDistribution, Ordering +from gtsam import (DecisionTreeFactor, DiscreteDistribution, DiscreteValues, + Ordering) from gtsam.utils.test_case import GtsamTestCase diff --git a/python/gtsam/tests/test_KarcherMeanFactor.py b/python/gtsam/tests/test_KarcherMeanFactor.py index f4ec64283..9ec33ad8a 100644 --- a/python/gtsam/tests/test_KarcherMeanFactor.py +++ b/python/gtsam/tests/test_KarcherMeanFactor.py @@ -31,7 +31,7 @@ class TestKarcherMean(GtsamTestCase): def test_find(self): # Check that optimizing for Karcher mean (which minimizes Between distance) # gets correct result. - rotations = gtsam.Rot3Vector([R, R.inverse()]) + rotations = [R, R.inverse()] expected = Rot3() actual = gtsam.FindKarcherMean(rotations) self.gtsamAssertEquals(expected, actual) @@ -42,7 +42,7 @@ class TestKarcherMean(GtsamTestCase): a2Rb2 = Rot3() a3Rb3 = Rot3() - aRb_list = gtsam.Rot3Vector([a1Rb1, a2Rb2, a3Rb3]) + aRb_list = [a1Rb1, a2Rb2, a3Rb3] aRb_expected = Rot3() aRb = gtsam.FindKarcherMean(aRb_list) @@ -58,9 +58,7 @@ class TestKarcherMean(GtsamTestCase): graph = gtsam.NonlinearFactorGraph() R12 = R.compose(R.compose(R)) graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) - keys = gtsam.KeyVector() - keys.append(1) - keys.append(2) + keys = [1, 2] graph.add(gtsam.KarcherMeanFactorRot3(keys)) initial = gtsam.Values() @@ -69,8 +67,7 @@ class TestKarcherMean(GtsamTestCase): expected = Rot3() result = gtsam.GaussNewtonOptimizer(graph, initial).optimize() - actual = gtsam.FindKarcherMean( - gtsam.Rot3Vector([result.atRot3(1), result.atRot3(2)])) + actual = gtsam.FindKarcherMean([result.atRot3(1), result.atRot3(2)]) self.gtsamAssertEquals(expected, actual) self.gtsamAssertEquals( R12, result.atRot3(1).between(result.atRot3(2))) diff --git a/python/gtsam/tests/test_Pose2.py b/python/gtsam/tests/test_Pose2.py index e5ffbad7d..d70483482 100644 --- a/python/gtsam/tests/test_Pose2.py +++ b/python/gtsam/tests/test_Pose2.py @@ -10,10 +10,9 @@ Author: Frank Dellaert & Duy Nguyen Ta & John Lambert """ import unittest -import numpy as np - import gtsam -from gtsam import Point2, Point2Pairs, Pose2 +import numpy as np +from gtsam import Point2, Pose2 from gtsam.utils.test_case import GtsamTestCase @@ -55,7 +54,7 @@ class TestPose2(GtsamTestCase): ] # fmt: on - ab_pairs = Point2Pairs(list(zip(pts_a, pts_b))) + ab_pairs = list(zip(pts_a, pts_b)) bTa = gtsam.align(ab_pairs) aTb = bTa.inverse() assert aTb is not None diff --git a/python/gtsam/tests/test_ShonanAveraging.py b/python/gtsam/tests/test_ShonanAveraging.py index 19b9f8dc1..036ea401c 100644 --- a/python/gtsam/tests/test_ShonanAveraging.py +++ b/python/gtsam/tests/test_ShonanAveraging.py @@ -14,16 +14,9 @@ import unittest import gtsam import numpy as np -from gtsam import ( - BetweenFactorPose2, - LevenbergMarquardtParams, - Rot2, - Pose2, - ShonanAveraging2, - ShonanAveragingParameters2, - ShonanAveraging3, - ShonanAveragingParameters3, -) +from gtsam import (BetweenFactorPose2, LevenbergMarquardtParams, Pose2, Rot2, + ShonanAveraging2, ShonanAveraging3, + ShonanAveragingParameters2, ShonanAveragingParameters3) from gtsam.utils.test_case import GtsamTestCase DEFAULT_PARAMS = ShonanAveragingParameters3( @@ -183,7 +176,7 @@ class TestShonanAveraging(GtsamTestCase): shonan_params.setCertifyOptimality(True) noise_model = gtsam.noiseModel.Unit.Create(3) - between_factors = gtsam.BetweenFactorPose2s() + between_factors = [] for (i1, i2), i2Ri1 in i2Ri1_dict.items(): i2Ti1 = Pose2(i2Ri1, np.zeros(2)) between_factors.append( diff --git a/python/gtsam/tests/test_Sim3.py b/python/gtsam/tests/test_Sim3.py index c00a36435..c3fd9e909 100644 --- a/python/gtsam/tests/test_Sim3.py +++ b/python/gtsam/tests/test_Sim3.py @@ -12,10 +12,9 @@ Author: John Lambert import unittest from typing import List, Optional -import numpy as np - import gtsam -from gtsam import Point3, Pose3, Pose3Pairs, Rot3, Similarity3 +import numpy as np +from gtsam import Point3, Pose3, Rot3, Similarity3 from gtsam.utils.test_case import GtsamTestCase @@ -49,7 +48,7 @@ class TestSim3(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = Pose3Pairs(list(zip(wToi_list, eToi_list))) + we_pairs = list(zip(wToi_list, eToi_list)) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity3.Align(we_pairs) @@ -84,7 +83,7 @@ class TestSim3(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = Pose3Pairs(list(zip(wToi_list, eToi_list))) + we_pairs = list(zip(wToi_list, eToi_list)) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity3.Align(we_pairs) @@ -122,7 +121,7 @@ class TestSim3(GtsamTestCase): bTi_list = [bTi0, bTi1, bTi2, bTi3] - ab_pairs = Pose3Pairs(list(zip(aTi_list, bTi_list))) + ab_pairs = list(zip(aTi_list, bTi_list)) # Recover the transformation wSe (i.e. world_S_egovehicle) aSb = Similarity3.Align(ab_pairs) @@ -689,7 +688,7 @@ def align_poses_sim3(aTi_list: List[Pose3], bTi_list: List[Pose3]) -> Similarity assert len(aTi_list) == len(bTi_list) assert n_to_align >= 2, "SIM(3) alignment uses at least 2 frames" - ab_pairs = Pose3Pairs(list(zip(aTi_list, bTi_list))) + ab_pairs = list(zip(aTi_list, bTi_list)) aSb = Similarity3.Align(ab_pairs) diff --git a/python/gtsam/tests/test_TranslationRecovery.py b/python/gtsam/tests/test_TranslationRecovery.py index 0fb0518b6..962b3cc9a 100644 --- a/python/gtsam/tests/test_TranslationRecovery.py +++ b/python/gtsam/tests/test_TranslationRecovery.py @@ -1,9 +1,9 @@ from __future__ import print_function -import numpy as np import unittest import gtsam +import numpy as np """ Returns example pose values of 3 points A, B and C in the world frame """ def ExampleValues(): @@ -19,7 +19,7 @@ def ExampleValues(): """ Returns binary measurements for the points in the given edges.""" def SimulateMeasurements(gt_poses, graph_edges): - measurements = gtsam.BinaryMeasurementsUnit3() + measurements = [] for edge in graph_edges: Ta = gt_poses.atPose3(edge[0]).translation() Tb = gt_poses.atPose3(edge[1]).translation() @@ -34,7 +34,7 @@ class TestTranslationRecovery(unittest.TestCase): def test_constructor(self): """Construct from binary measurements.""" - algorithm = gtsam.TranslationRecovery(gtsam.BinaryMeasurementsUnit3()) + algorithm = gtsam.TranslationRecovery([]) self.assertIsInstance(algorithm, gtsam.TranslationRecovery) def test_run(self): diff --git a/python/gtsam/tests/test_Triangulation.py b/python/gtsam/tests/test_Triangulation.py index 0a258a0af..0de0f6d95 100644 --- a/python/gtsam/tests/test_Triangulation.py +++ b/python/gtsam/tests/test_Triangulation.py @@ -11,23 +11,11 @@ Authors: Frank Dellaert & Fan Jiang (Python) & Sushmita Warrier & John Lambert import unittest from typing import Iterable, List, Optional, Tuple, Union -import numpy as np - import gtsam -from gtsam import ( - Cal3_S2, - Cal3Bundler, - CameraSetCal3_S2, - CameraSetCal3Bundler, - PinholeCameraCal3_S2, - PinholeCameraCal3Bundler, - Point2, - Point2Vector, - Point3, - Pose3, - Pose3Vector, - Rot3, -) +import numpy as np +from gtsam import (Cal3_S2, Cal3Bundler, CameraSetCal3_S2, + CameraSetCal3Bundler, PinholeCameraCal3_S2, + PinholeCameraCal3Bundler, Point2, Point3, Pose3, Rot3) from gtsam.utils.test_case import GtsamTestCase UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2) @@ -44,9 +32,7 @@ class TestTriangulationExample(GtsamTestCase): # create second camera 1 meter to the right of first camera pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0))) # twoPoses - self.poses = Pose3Vector() - self.poses.append(pose1) - self.poses.append(pose2) + self.poses = [pose1, pose2] # landmark ~5 meters infront of camera self.landmark = Point3(5, 0.5, 1.2) @@ -58,7 +44,7 @@ class TestTriangulationExample(GtsamTestCase): cal_params: Iterable[Iterable[Union[int, float]]], camera_set: Optional[Union[CameraSetCal3Bundler, CameraSetCal3_S2]] = None, - ) -> Tuple[Point2Vector, Union[CameraSetCal3Bundler, CameraSetCal3_S2, + ) -> Tuple[List[Point2], Union[CameraSetCal3Bundler, CameraSetCal3_S2, List[Cal3Bundler], List[Cal3_S2]]]: """ Generate vector of measurements for given calibration and camera model. @@ -76,7 +62,7 @@ class TestTriangulationExample(GtsamTestCase): cameras = camera_set() else: cameras = [] - measurements = Point2Vector() + measurements = [] for k, pose in zip(cal_params, self.poses): K = calibration(*k) @@ -105,7 +91,7 @@ class TestTriangulationExample(GtsamTestCase): self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9) # Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) - measurements_noisy = Point2Vector() + measurements_noisy = [] measurements_noisy.append(measurements[0] - np.array([0.1, 0.5])) measurements_noisy.append(measurements[1] - np.array([-0.2, 0.3])) @@ -172,8 +158,8 @@ class TestTriangulationExample(GtsamTestCase): z2: Point2 = camera2.project(landmark) z3: Point2 = camera3.project(landmark) - poses = gtsam.Pose3Vector([pose1, pose2, pose3]) - measurements = Point2Vector([z1, z2, z3]) + poses = [pose1, pose2, pose3] + measurements = [z1, z2, z3] # noise free, so should give exactly the landmark actual = gtsam.triangulatePoint3(poses, From fe9c5718bbea01f523eb4afb344d5b82f948a484 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 14:09:57 -0500 Subject: [PATCH 07/78] update CMake ignore list --- python/CMakeLists.txt | 27 ++------------------------- 1 file changed, 2 insertions(+), 25 deletions(-) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 85ddc7b6d..e666dced3 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -34,22 +34,7 @@ set(ignore gtsam::Point2 gtsam::Point3 gtsam::ISAM2ThresholdMapValue - gtsam::FactorIndices - gtsam::FactorIndexSet - gtsam::IndexPairSetMap - gtsam::IndexPairVector - gtsam::BetweenFactorPose2s - gtsam::BetweenFactorPose3s - gtsam::Point2Vector - gtsam::Point2Pairs - gtsam::Point3Pairs - gtsam::Pose3Pairs - gtsam::Pose3Vector - gtsam::Rot3Vector - gtsam::KeyVector - gtsam::BinaryMeasurementsUnit3 - gtsam::DiscreteKey - gtsam::KeyPairDoubleMap) + gtsam::DiscreteKey) set(interface_headers ${PROJECT_SOURCE_DIR}/gtsam/gtsam.i @@ -117,19 +102,11 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) gtsam::Point2 gtsam::Point3 gtsam::ISAM2ThresholdMapValue - gtsam::FactorIndices - gtsam::FactorIndexSet - gtsam::BetweenFactorPose3s - gtsam::Point2Vector - gtsam::Pose3Vector - gtsam::KeyVector gtsam::FixedLagSmootherKeyTimestampMapValue - gtsam::BinaryMeasurementsUnit3 gtsam::CameraSetCal3_S2 gtsam::CameraSetCal3Bundler gtsam::CameraSetCal3Unified - gtsam::CameraSetCal3Fisheye - gtsam::KeyPairDoubleMap) + gtsam::CameraSetCal3Fisheye) pybind_wrap(${GTSAM_PYTHON_UNSTABLE_TARGET} # target ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header From c68c986321d2b88fd17900da87c6302e8c236a5c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 17:54:50 -0500 Subject: [PATCH 08/78] Fix IndexPairVector wrapping --- gtsam/base/base.i | 13 ------------- python/gtsam/preamble/base.h | 1 - 2 files changed, 14 deletions(-) diff --git a/gtsam/base/base.i b/gtsam/base/base.i index fbbd91553..212578736 100644 --- a/gtsam/base/base.i +++ b/gtsam/base/base.i @@ -41,19 +41,6 @@ class DSFMap { std::map sets(); }; -class IndexPairVector { - IndexPairVector(); - IndexPairVector(const gtsam::IndexPairVector& other); - - // common STL methods - size_t size() const; - bool empty() const; - void clear(); - - // structure specific methods - gtsam::IndexPair at(size_t i) const; - void push_back(gtsam::IndexPair key) const; -}; gtsam::IndexPairVector IndexPairSetAsArray(gtsam::IndexPairSet& set); diff --git a/python/gtsam/preamble/base.h b/python/gtsam/preamble/base.h index a7effd1ca..affb03ffa 100644 --- a/python/gtsam/preamble/base.h +++ b/python/gtsam/preamble/base.h @@ -11,5 +11,4 @@ * mutations on Python side will not be reflected on C++. */ -PYBIND11_MAKE_OPAQUE(std::vector); PYBIND11_MAKE_OPAQUE(std::vector); // JacobianVector From ef912eefd3689edf05133c45e3f74483970456e1 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 18:53:39 -0500 Subject: [PATCH 09/78] minor cleanup --- python/gtsam/preamble/base.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/gtsam/preamble/base.h b/python/gtsam/preamble/base.h index affb03ffa..d07a75f6f 100644 --- a/python/gtsam/preamble/base.h +++ b/python/gtsam/preamble/base.h @@ -10,5 +10,3 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ - -PYBIND11_MAKE_OPAQUE(std::vector); // JacobianVector From fbe9a21070c90454bfdf9e3c5dc43d6387e64ffc Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 31 Jan 2022 18:55:40 -0500 Subject: [PATCH 10/78] attempt to get custom factor tests passing --- gtsam/nonlinear/CustomFactor.cpp | 16 +++++++++---- gtsam/nonlinear/CustomFactor.h | 5 ++-- python/gtsam/tests/test_custom_factor.py | 29 ++++++++++++------------ 3 files changed, 29 insertions(+), 21 deletions(-) diff --git a/gtsam/nonlinear/CustomFactor.cpp b/gtsam/nonlinear/CustomFactor.cpp index e33caed6f..b8368c497 100644 --- a/gtsam/nonlinear/CustomFactor.cpp +++ b/gtsam/nonlinear/CustomFactor.cpp @@ -22,13 +22,14 @@ namespace gtsam { /* * Calculates the unwhitened error by invoking the callback functor (i.e. from Python). */ -Vector CustomFactor::unwhitenedError(const Values& x, boost::optional&> H) const { +Vector CustomFactor::unwhitenedError( + const Values &x, boost::optional&> H) const { if(this->active(x)) { if(H) { /* * In this case, we pass the raw pointer to the `std::vector` object directly to pybind. - * As the type `std::vector` has been marked as opaque in `preamble.h`, any changes in + * As the type `std::vector` has been marked as opaque in `preamble/base.h`, any changes in * Python will be immediately reflected on the C++ side. * * Example: @@ -43,13 +44,20 @@ Vector CustomFactor::unwhitenedError(const Values& x, boost::optionalerror_function_(*this, x, H.get_ptr()); + std::pair errorAndJacobian = + this->error_function_(*this, x, H.get_ptr()); + + Vector error = errorAndJacobian.first; + (*H) = errorAndJacobian.second; + + return error; } else { /* * In this case, we pass the a `nullptr` to pybind, and it will translate to `None` in Python. * Users can check for `None` in their callback to determine if the Jacobian is requested. */ - return this->error_function_(*this, x, nullptr); + auto errorAndJacobian = this->error_function_(*this, x, nullptr); + return errorAndJacobian.first; } } else { return Vector::Zero(this->dim()); diff --git a/gtsam/nonlinear/CustomFactor.h b/gtsam/nonlinear/CustomFactor.h index 615b5418e..6261636b5 100644 --- a/gtsam/nonlinear/CustomFactor.h +++ b/gtsam/nonlinear/CustomFactor.h @@ -35,7 +35,8 @@ class CustomFactor; * This is safe because this is passing a const pointer, and pybind11 will maintain the `std::vector` memory layout. * Thus the pointer will never be invalidated. */ -using CustomErrorFunction = std::function; +using CustomErrorFunction = std::function( + const CustomFactor &, const Values &, JacobianVector *)>; /** * @brief Custom factor that takes a std::function as the error @@ -77,7 +78,7 @@ public: * Calls the errorFunction closure, which is a std::function object * One can check if a derivative is needed in the errorFunction by checking the length of Jacobian array */ - Vector unwhitenedError(const Values &x, boost::optional &> H = boost::none) const override; + Vector unwhitenedError(const Values &x, boost::optional&> H = boost::none) const override; /** print */ void print(const std::string &s, diff --git a/python/gtsam/tests/test_custom_factor.py b/python/gtsam/tests/test_custom_factor.py index 4f0f33361..03e6917f0 100644 --- a/python/gtsam/tests/test_custom_factor.py +++ b/python/gtsam/tests/test_custom_factor.py @@ -8,13 +8,12 @@ See LICENSE for the license information CustomFactor unit tests. Author: Fan Jiang """ -from typing import List import unittest -from gtsam import Values, Pose2, CustomFactor - -import numpy as np +from typing import List import gtsam +import numpy as np +from gtsam import CustomFactor, JacobianFactor, Pose2, Values from gtsam.utils.test_case import GtsamTestCase @@ -24,17 +23,17 @@ class TestCustomFactor(GtsamTestCase): def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """Minimal error function stub""" - return np.array([1, 0, 0]) + return np.array([1, 0, 0]), H noise_model = gtsam.noiseModel.Unit.Create(3) - cf = CustomFactor(noise_model, gtsam.KeyVector([0]), error_func) + cf = CustomFactor(noise_model, [0], error_func) def test_new_keylist(self): """Test the creation of a new CustomFactor""" def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """Minimal error function stub""" - return np.array([1, 0, 0]) + return np.array([1, 0, 0]), H noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0], error_func) @@ -47,7 +46,7 @@ class TestCustomFactor(GtsamTestCase): """Minimal error function with no Jacobian""" key0 = this.keys()[0] error = -v.atPose2(key0).localCoordinates(expected_pose) - return error + return error, H noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0], error_func) @@ -81,10 +80,10 @@ class TestCustomFactor(GtsamTestCase): result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) - return error + return error, H noise_model = gtsam.noiseModel.Unit.Create(3) - cf = CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func) + cf = CustomFactor(noise_model, [0, 1], error_func) v = Values() v.insert(0, gT1) v.insert(1, gT2) @@ -104,9 +103,9 @@ class TestCustomFactor(GtsamTestCase): gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) - def error_func(this: CustomFactor, v: gtsam.Values, _: List[np.ndarray]): + def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """Minimal error function stub""" - return np.array([1, 0, 0]) + return np.array([1, 0, 0]), H noise_model = gtsam.noiseModel.Unit.Create(3) from gtsam.symbol_shorthand import X @@ -144,10 +143,10 @@ class TestCustomFactor(GtsamTestCase): result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) - return error + return error, H noise_model = gtsam.noiseModel.Unit.Create(3) - cf = CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func) + cf = CustomFactor(noise_model, [0, 1], error_func) v = Values() v.insert(0, gT1) v.insert(1, gT2) @@ -182,7 +181,7 @@ class TestCustomFactor(GtsamTestCase): result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) - return error + return error, H noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0, 1], error_func) From b60c5e3ab54f303e35e99535d0744d652698efa2 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 17 Jul 2022 16:05:59 -0400 Subject: [PATCH 11/78] undo CustomFactor changes --- gtsam/nonlinear/CustomFactor.cpp | 10 ++-------- gtsam/nonlinear/CustomFactor.h | 5 ++--- python/gtsam/tests/test_custom_factor.py | 14 +++++++------- 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/gtsam/nonlinear/CustomFactor.cpp b/gtsam/nonlinear/CustomFactor.cpp index b8368c497..4f86bc757 100644 --- a/gtsam/nonlinear/CustomFactor.cpp +++ b/gtsam/nonlinear/CustomFactor.cpp @@ -44,20 +44,14 @@ Vector CustomFactor::unwhitenedError( * return error * ``` */ - std::pair errorAndJacobian = - this->error_function_(*this, x, H.get_ptr()); + return this->error_function_(*this, x, H.get_ptr()); - Vector error = errorAndJacobian.first; - (*H) = errorAndJacobian.second; - - return error; } else { /* * In this case, we pass the a `nullptr` to pybind, and it will translate to `None` in Python. * Users can check for `None` in their callback to determine if the Jacobian is requested. */ - auto errorAndJacobian = this->error_function_(*this, x, nullptr); - return errorAndJacobian.first; + return this->error_function_(*this, x, nullptr); } } else { return Vector::Zero(this->dim()); diff --git a/gtsam/nonlinear/CustomFactor.h b/gtsam/nonlinear/CustomFactor.h index 6261636b5..615b5418e 100644 --- a/gtsam/nonlinear/CustomFactor.h +++ b/gtsam/nonlinear/CustomFactor.h @@ -35,8 +35,7 @@ class CustomFactor; * This is safe because this is passing a const pointer, and pybind11 will maintain the `std::vector` memory layout. * Thus the pointer will never be invalidated. */ -using CustomErrorFunction = std::function( - const CustomFactor &, const Values &, JacobianVector *)>; +using CustomErrorFunction = std::function; /** * @brief Custom factor that takes a std::function as the error @@ -78,7 +77,7 @@ public: * Calls the errorFunction closure, which is a std::function object * One can check if a derivative is needed in the errorFunction by checking the length of Jacobian array */ - Vector unwhitenedError(const Values &x, boost::optional&> H = boost::none) const override; + Vector unwhitenedError(const Values &x, boost::optional &> H = boost::none) const override; /** print */ void print(const std::string &s, diff --git a/python/gtsam/tests/test_custom_factor.py b/python/gtsam/tests/test_custom_factor.py index 03e6917f0..a3bb00384 100644 --- a/python/gtsam/tests/test_custom_factor.py +++ b/python/gtsam/tests/test_custom_factor.py @@ -33,7 +33,7 @@ class TestCustomFactor(GtsamTestCase): def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): """Minimal error function stub""" - return np.array([1, 0, 0]), H + return np.array([1, 0, 0]) noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0], error_func) @@ -46,7 +46,7 @@ class TestCustomFactor(GtsamTestCase): """Minimal error function with no Jacobian""" key0 = this.keys()[0] error = -v.atPose2(key0).localCoordinates(expected_pose) - return error, H + return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0], error_func) @@ -80,7 +80,7 @@ class TestCustomFactor(GtsamTestCase): result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) - return error, H + return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0, 1], error_func) @@ -103,9 +103,9 @@ class TestCustomFactor(GtsamTestCase): gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) - def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): + def error_func(this: CustomFactor, v: gtsam.Values, _: List[np.ndarray]): """Minimal error function stub""" - return np.array([1, 0, 0]), H + return np.array([1, 0, 0]) noise_model = gtsam.noiseModel.Unit.Create(3) from gtsam.symbol_shorthand import X @@ -143,7 +143,7 @@ class TestCustomFactor(GtsamTestCase): result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) - return error, H + return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0, 1], error_func) @@ -181,7 +181,7 @@ class TestCustomFactor(GtsamTestCase): result = gT1.between(gT2) H[0] = -result.inverse().AdjointMap() H[1] = np.eye(3) - return error, H + return error noise_model = gtsam.noiseModel.Unit.Create(3) cf = CustomFactor(noise_model, [0, 1], error_func) From d6415deac4329c37c4995270feac0cd3085753f8 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 19 Jul 2022 12:13:20 -0400 Subject: [PATCH 12/78] minor updates to sfm.h preamble --- python/gtsam/preamble/sfm.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/python/gtsam/preamble/sfm.h b/python/gtsam/preamble/sfm.h index bce2a750d..5ba48f1c8 100644 --- a/python/gtsam/preamble/sfm.h +++ b/python/gtsam/preamble/sfm.h @@ -11,12 +11,5 @@ * mutations on Python side will not be reflected on C++. */ -// Including can cause some serious hard-to-debug bugs!!! -// #include -#include - -PYBIND11_MAKE_OPAQUE( - std::vector); - -PYBIND11_MAKE_OPAQUE( - std::vector); +PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(std::vector); From eb6e56a4f889426b4d07ab85052fa42498dd97d8 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 25 Jul 2022 17:20:09 -0400 Subject: [PATCH 13/78] Mark JacobianVector as opaque --- python/gtsam/preamble/base.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/gtsam/preamble/base.h b/python/gtsam/preamble/base.h index d07a75f6f..b1ed2ba14 100644 --- a/python/gtsam/preamble/base.h +++ b/python/gtsam/preamble/base.h @@ -10,3 +10,5 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ + +PYBIND11_MAKE_OPAQUE(std::vector); // JacobianVector From 8d4fb8b854a3d83819980896e25516006af9a09b Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Fri, 23 Dec 2022 12:26:11 +0530 Subject: [PATCH 14/78] fix doxygen issues --- gtsam/base/treeTraversal-inst.h | 6 ++++-- gtsam/linear/LossFunctions.h | 26 +++++++++++++------------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/gtsam/base/treeTraversal-inst.h b/gtsam/base/treeTraversal-inst.h index be45a248e..0578143f0 100644 --- a/gtsam/base/treeTraversal-inst.h +++ b/gtsam/base/treeTraversal-inst.h @@ -138,6 +138,8 @@ void DepthFirstForest(FOREST& forest, DATA& rootData, VISITOR_PRE& visitorPre) { /** Traverse a forest depth-first with pre-order and post-order visits. * @param forest The forest of trees to traverse. The method \c forest.roots() should exist * and return a collection of (shared) pointers to \c FOREST::Node. + * @param rootData The data to pass by reference to \c visitorPre when it is called on each + * root node. * @param visitorPre \c visitorPre(node, parentData) will be called at every node, before * visiting its children, and will be passed, by reference, the \c DATA object returned * by the visit to its parent. Likewise, \c visitorPre should return the \c DATA object @@ -147,8 +149,8 @@ void DepthFirstForest(FOREST& forest, DATA& rootData, VISITOR_PRE& visitorPre) { * @param visitorPost \c visitorPost(node, data) will be called at every node, after visiting * its children, and will be passed, by reference, the \c DATA object returned by the * call to \c visitorPre (the \c DATA object may be modified by visiting the children). - * @param rootData The data to pass by reference to \c visitorPre when it is called on each - * root node. */ + * @param problemSizeThreshold + */ template void DepthFirstForestParallel(FOREST& forest, DATA& rootData, diff --git a/gtsam/linear/LossFunctions.h b/gtsam/linear/LossFunctions.h index d9cfc1f3c..e227d7162 100644 --- a/gtsam/linear/LossFunctions.h +++ b/gtsam/linear/LossFunctions.h @@ -89,7 +89,7 @@ class GTSAM_EXPORT Base { * functions. It would be better for this function to accept the vector and * internally call the norm if necessary. * - * This returns \rho(x) in \ref mEstimator + * This returns \f$\rho(x)\f$ in \ref mEstimator */ virtual double loss(double distance) const { return 0; } @@ -141,9 +141,9 @@ class GTSAM_EXPORT Base { * * This model has no additional parameters. * - * - Loss \rho(x) = 0.5 x² - * - Derivative \phi(x) = x - * - Weight w(x) = \phi(x)/x = 1 + * - Loss \f$ \rho(x) = 0.5 x² \f$ + * - Derivative \f$ \phi(x) = x \f$ + * - Weight \f$ w(x) = \phi(x)/x = 1 \f$ */ class GTSAM_EXPORT Null : public Base { public: @@ -275,9 +275,9 @@ class GTSAM_EXPORT Cauchy : public Base { * * This model has a scalar parameter "c". * - * - Loss \rho(x) = c² (1 - (1-x²/c²)³)/6 if |x|k, (k+x) if x<-k - * - Weight w(x) = \phi(x)/x = 0 if |x|k, (k+x)/x if x<-k + * - Loss \f$ \rho(x) = 0 \f$ if |x|k, (k+x) if x<-k + * - Weight \f$ w(x) = \phi(x)/x = 0 \f$ if |x|k, (k+x)/x if x<-k */ class GTSAM_EXPORT L2WithDeadZone : public Base { protected: From 6baa7e102b29ae5009f0b9fb4f63f5c8ae972b8e Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Mon, 27 Mar 2023 17:28:45 -0400 Subject: [PATCH 15/78] Adds jacobians for Pose2 component extraction Adds jacobians to translation() and rotation() for Pose2 to bring it into spec with Pose3's equilivent functions. Also adds tests. --- gtsam/geometry/Pose2.cpp | 15 +++++++++++++++ gtsam/geometry/Pose2.h | 8 ++++---- gtsam/geometry/tests/testPose2.cpp | 27 +++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 4 deletions(-) diff --git a/gtsam/geometry/Pose2.cpp b/gtsam/geometry/Pose2.cpp index 0d9f1bc01..4892af60a 100644 --- a/gtsam/geometry/Pose2.cpp +++ b/gtsam/geometry/Pose2.cpp @@ -32,6 +32,21 @@ GTSAM_CONCEPT_POSE_INST(Pose2) static const Rot2 R_PI_2(Rot2::fromCosSin(0., 1.)); +/* ************************************************************************* */ +const Point2 &Pose2::translation(OptionalJacobian<2, 3> Hself) const { + if (Hself) { + *Hself = Matrix::Zero(2, 3); + (*Hself).block<2, 2>(0, 0) = rotation().matrix(); + } + return t_; +} + +/* ************************************************************************* */ +const Rot2& Pose2::rotation(OptionalJacobian<1, 3> Hself) const { + if (Hself) *Hself << 0, 0, 1; + return r_; +} + /* ************************************************************************* */ Matrix3 Pose2::matrix() const { Matrix2 R = r_.matrix(); diff --git a/gtsam/geometry/Pose2.h b/gtsam/geometry/Pose2.h index c3f588dcc..fa148b1c5 100644 --- a/gtsam/geometry/Pose2.h +++ b/gtsam/geometry/Pose2.h @@ -252,16 +252,16 @@ public: inline double theta() const { return r_.theta(); } /// translation - inline const Point2& t() const { return t_; } + inline const Point2& t() const { return translation(); } /// rotation - inline const Rot2& r() const { return r_; } + inline const Rot2& r() const { return rotation(); } /// translation - inline const Point2& translation() const { return t_; } + const Point2& translation(OptionalJacobian<2, 3> Hself={}) const; /// rotation - inline const Rot2& rotation() const { return r_; } + const Rot2& rotation(OptionalJacobian<1, 3> Hself={}) const; //// return transformation matrix GTSAM_EXPORT Matrix3 matrix() const; diff --git a/gtsam/geometry/tests/testPose2.cpp b/gtsam/geometry/tests/testPose2.cpp index 8c1bdccc0..b52a6e590 100644 --- a/gtsam/geometry/tests/testPose2.cpp +++ b/gtsam/geometry/tests/testPose2.cpp @@ -474,6 +474,33 @@ TEST( Pose2, compose_matrix ) EXPECT(assert_equal(gM1*_1M2,matrix(gT1.compose(_1T2)))); // RIGHT DOES NOT } + + +/* ************************************************************************* */ +TEST( Pose2, translation ) { + Pose2 pose(3.5, -8.2, 4.2); + + Matrix actualH; + EXPECT(assert_equal((Vector2() << 3.5, -8.2).finished(), pose.translation(actualH), 1e-8)); + + std::function f = [](const Pose2& T) { return T.translation(); }; + Matrix numericalH = numericalDerivative11(f, pose); + EXPECT(assert_equal(numericalH, actualH, 1e-6)); +} + +/* ************************************************************************* */ +TEST( Pose2, rotation ) { + Pose2 pose(3.5, -8.2, 4.2); + + Matrix actualH(4, 3); + EXPECT(assert_equal(Rot2(4.2), pose.rotation(actualH), 1e-8)); + + std::function f = [](const Pose2& T) { return T.rotation(); }; + Matrix numericalH = numericalDerivative11(f, pose); + EXPECT(assert_equal(numericalH, actualH, 1e-6)); +} + + /* ************************************************************************* */ TEST( Pose2, between ) { From d6a24847f182e76f9140f8e0365fee3de8410c2f Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Mon, 27 Mar 2023 18:05:18 -0400 Subject: [PATCH 16/78] Extends python iface Pose2 component jacobians --- gtsam/geometry/geometry.i | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index e9929227a..4ea322fa7 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -424,7 +424,9 @@ class Pose2 { gtsam::Rot2 bearing(const gtsam::Point2& point) const; double range(const gtsam::Point2& point) const; gtsam::Point2 translation() const; + gtsam::Point2 translation(Eigen::Ref Hself) const; gtsam::Rot2 rotation() const; + gtsam::Rot2 rotation(Eigen::Ref Hself) const; Matrix matrix() const; // enabling serialization functionality From 7e30cc7c9d4ec295ea0a08171ad238681b52ce08 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sun, 9 Apr 2023 11:38:40 -0400 Subject: [PATCH 17/78] Convert Pose2 r/t accessors to inline --- gtsam/geometry/Pose2.cpp | 15 --------------- gtsam/geometry/Pose2.h | 13 +++++++++++-- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/gtsam/geometry/Pose2.cpp b/gtsam/geometry/Pose2.cpp index 4892af60a..0d9f1bc01 100644 --- a/gtsam/geometry/Pose2.cpp +++ b/gtsam/geometry/Pose2.cpp @@ -32,21 +32,6 @@ GTSAM_CONCEPT_POSE_INST(Pose2) static const Rot2 R_PI_2(Rot2::fromCosSin(0., 1.)); -/* ************************************************************************* */ -const Point2 &Pose2::translation(OptionalJacobian<2, 3> Hself) const { - if (Hself) { - *Hself = Matrix::Zero(2, 3); - (*Hself).block<2, 2>(0, 0) = rotation().matrix(); - } - return t_; -} - -/* ************************************************************************* */ -const Rot2& Pose2::rotation(OptionalJacobian<1, 3> Hself) const { - if (Hself) *Hself << 0, 0, 1; - return r_; -} - /* ************************************************************************* */ Matrix3 Pose2::matrix() const { Matrix2 R = r_.matrix(); diff --git a/gtsam/geometry/Pose2.h b/gtsam/geometry/Pose2.h index fa148b1c5..80418745e 100644 --- a/gtsam/geometry/Pose2.h +++ b/gtsam/geometry/Pose2.h @@ -258,10 +258,19 @@ public: inline const Rot2& r() const { return rotation(); } /// translation - const Point2& translation(OptionalJacobian<2, 3> Hself={}) const; + inline const Point2& translation(OptionalJacobian<2, 3> Hself={}) const { + if (Hself) { + *Hself = Matrix::Zero(2, 3); + (*Hself).block<2, 2>(0, 0) = rotation().matrix(); + } + return t_; + } /// rotation - const Rot2& rotation(OptionalJacobian<1, 3> Hself={}) const; + inline const Rot2& rotation(OptionalJacobian<1, 3> Hself={}) const { + if (Hself) *Hself << 0, 0, 1; + return r_; + } //// return transformation matrix GTSAM_EXPORT Matrix3 matrix() const; From 2175f804ddb46d9e0731e2b92899e7f226a8f089 Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Sun, 9 Apr 2023 14:18:53 -0400 Subject: [PATCH 18/78] Revert changes to Pose2 r(), t() --- gtsam/geometry/Pose2.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gtsam/geometry/Pose2.h b/gtsam/geometry/Pose2.h index 80418745e..f1b38c5a6 100644 --- a/gtsam/geometry/Pose2.h +++ b/gtsam/geometry/Pose2.h @@ -252,10 +252,10 @@ public: inline double theta() const { return r_.theta(); } /// translation - inline const Point2& t() const { return translation(); } + inline const Point2& t() const { return t_; } /// rotation - inline const Rot2& r() const { return rotation(); } + inline const Rot2& r() const { return r_; } /// translation inline const Point2& translation(OptionalJacobian<2, 3> Hself={}) const { From 3f30ac2aefec137eaee9d3e2d21541172611f751 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Tue, 25 Apr 2023 15:01:57 -0400 Subject: [PATCH 19/78] Add missing option in the CMake for Pose2 --- cmake/HandleGeneralOptions.cmake | 1 + gtsam/config.h.in | 2 ++ gtsam/geometry/Pose2.cpp | 4 ++-- gtsam/geometry/geometry.i | 7 +++++++ gtsam/geometry/tests/testPose2.cpp | 4 ++-- 5 files changed, 14 insertions(+), 4 deletions(-) diff --git a/cmake/HandleGeneralOptions.cmake b/cmake/HandleGeneralOptions.cmake index 4a4f1a36e..eab40d16b 100644 --- a/cmake/HandleGeneralOptions.cmake +++ b/cmake/HandleGeneralOptions.cmake @@ -30,6 +30,7 @@ option(GTSAM_ALLOW_DEPRECATED_SINCE_V43 "Allow use of methods/functions depr option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF) +option(GTSAM_SLOW_BUT_CORRECT_EXPMAP "Use slower but correct expmap" ON) if (GTSAM_FORCE_SHARED_LIB) message(STATUS "GTSAM is a shared library due to GTSAM_FORCE_SHARED_LIB") diff --git a/gtsam/config.h.in b/gtsam/config.h.in index 7f8936d1e..ec06d90c9 100644 --- a/gtsam/config.h.in +++ b/gtsam/config.h.in @@ -83,3 +83,5 @@ // Toggle switch for BetweenFactor jacobian computation #cmakedefine GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR + +#cmakedefine GTSAM_SLOW_BUT_CORRECT_EXPMAP diff --git a/gtsam/geometry/Pose2.cpp b/gtsam/geometry/Pose2.cpp index 0d9f1bc01..05678dc27 100644 --- a/gtsam/geometry/Pose2.cpp +++ b/gtsam/geometry/Pose2.cpp @@ -97,7 +97,7 @@ Vector3 Pose2::Logmap(const Pose2& p, OptionalJacobian<3, 3> H) { /* ************************************************************************* */ Pose2 Pose2::ChartAtOrigin::Retract(const Vector3& v, ChartJacobian H) { -#ifdef SLOW_BUT_CORRECT_EXPMAP +#ifdef GTSAM_SLOW_BUT_CORRECT_EXPMAP return Expmap(v, H); #else if (H) { @@ -109,7 +109,7 @@ Pose2 Pose2::ChartAtOrigin::Retract(const Vector3& v, ChartJacobian H) { } /* ************************************************************************* */ Vector3 Pose2::ChartAtOrigin::Local(const Pose2& r, ChartJacobian H) { -#ifdef SLOW_BUT_CORRECT_EXPMAP +#ifdef GTSAM_SLOW_BUT_CORRECT_EXPMAP return Logmap(r, H); #else if (H) { diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index e9929227a..480655cc3 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -159,7 +159,9 @@ class Rot2 { // Manifold gtsam::Rot2 retract(Vector v) const; + gtsam::Rot2 retract(Vector v, Eigen::Ref H1, Eigen::Ref H2) const; Vector localCoordinates(const gtsam::Rot2& p) const; + Vector localCoordinates(const gtsam::Rot2& p, Eigen::Ref H1, Eigen::Ref H2) const; // Lie Group static gtsam::Rot2 Expmap(Vector v); @@ -387,19 +389,24 @@ class Pose2 { static gtsam::Pose2 Identity(); gtsam::Pose2 inverse() const; gtsam::Pose2 compose(const gtsam::Pose2& p2) const; + gtsam::Pose2 compose(const gtsam::Pose2& p2, Eigen::Ref H1, Eigen::Ref H2) const; gtsam::Pose2 between(const gtsam::Pose2& p2) const; + gtsam::Pose2 between(const gtsam::Pose2& p2, Eigen::Ref H1, Eigen::Ref H2) const; // Operator Overloads gtsam::Pose2 operator*(const gtsam::Pose2& p2) const; // Manifold gtsam::Pose2 retract(Vector v) const; + gtsam::Pose2 retract(Vector v, Eigen::Ref H1, Eigen::Ref H2) const; Vector localCoordinates(const gtsam::Pose2& p) const; + Vector localCoordinates(const gtsam::Pose2& p, Eigen::Ref H1, Eigen::Ref H2) const; // Lie Group static gtsam::Pose2 Expmap(Vector v); static Vector Logmap(const gtsam::Pose2& p); Vector logmap(const gtsam::Pose2& p); + Vector logmap(const gtsam::Pose2& p, Eigen::Ref H); static Matrix ExpmapDerivative(Vector v); static Matrix LogmapDerivative(const gtsam::Pose2& v); Matrix AdjointMap() const; diff --git a/gtsam/geometry/tests/testPose2.cpp b/gtsam/geometry/tests/testPose2.cpp index 8c1bdccc0..56b8a779a 100644 --- a/gtsam/geometry/tests/testPose2.cpp +++ b/gtsam/geometry/tests/testPose2.cpp @@ -66,7 +66,7 @@ TEST(Pose2, manifold) { /* ************************************************************************* */ TEST(Pose2, retract) { Pose2 pose(M_PI/2.0, Point2(1, 2)); -#ifdef SLOW_BUT_CORRECT_EXPMAP +#ifdef GTSAM_SLOW_BUT_CORRECT_EXPMAP Pose2 expected(1.00811, 2.01528, 2.5608); #else Pose2 expected(M_PI/2.0+0.99, Point2(1.015, 2.01)); @@ -204,7 +204,7 @@ TEST(Pose2, Adjoint_hat) { TEST(Pose2, logmap) { Pose2 pose0(M_PI/2.0, Point2(1, 2)); Pose2 pose(M_PI/2.0+0.018, Point2(1.015, 2.01)); -#ifdef SLOW_BUT_CORRECT_EXPMAP +#ifdef GTSAM_SLOW_BUT_CORRECT_EXPMAP Vector3 expected(0.00986473, -0.0150896, 0.018); #else Vector3 expected(0.01, -0.015, 0.018); From b252f64c33a8cf9653784db3bef65a1e13aff59c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 27 Apr 2023 16:32:25 -0400 Subject: [PATCH 20/78] re-enable testSmartStereoProjectionFactorPP --- gtsam_unstable/slam/tests/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/gtsam_unstable/slam/tests/CMakeLists.txt b/gtsam_unstable/slam/tests/CMakeLists.txt index 6872dd575..bb5259ef2 100644 --- a/gtsam_unstable/slam/tests/CMakeLists.txt +++ b/gtsam_unstable/slam/tests/CMakeLists.txt @@ -2,7 +2,6 @@ # Exclude tests that don't work set (slam_excluded_tests testSerialization.cpp - testSmartStereoProjectionFactorPP.cpp # unstable after PR #1442 ) gtsamAddTestsGlob(slam_unstable "test*.cpp" "${slam_excluded_tests}" "gtsam_unstable") From c8c10d3f5d3407b56cc7e5b9d82c46cab0a0a1fb Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 30 Apr 2023 15:43:09 -0400 Subject: [PATCH 21/78] install newer version of TBB --- .github/scripts/unix.sh | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/.github/scripts/unix.sh b/.github/scripts/unix.sh index af9ac8991..557255474 100644 --- a/.github/scripts/unix.sh +++ b/.github/scripts/unix.sh @@ -8,33 +8,14 @@ # install TBB with _debug.so files function install_tbb() { - TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download - TBB_VERSION=4.4.5 - TBB_DIR=tbb44_20160526oss - TBB_SAVEPATH="/tmp/tbb.tgz" - if [ "$(uname)" == "Linux" ]; then - OS_SHORT="lin" - TBB_LIB_DIR="intel64/gcc4.4" - SUDO="sudo" + sudo apt-get -y install libtbb-dev elif [ "$(uname)" == "Darwin" ]; then - OS_SHORT="osx" - TBB_LIB_DIR="" - SUDO="" + brew install tbb fi - wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH - tar -C /tmp -xf $TBB_SAVEPATH - - TBBROOT=/tmp/$TBB_DIR - # Copy the needed files to the correct places. - # This works correctly for CI builds, instead of setting path variables. - # This is what Homebrew does to install TBB on Macs - $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ - $SUDO cp -R $TBBROOT/include/ /usr/local/include/ - } # common tasks before either build or test From 00c784e5efb0ef829252c49bc2c06ca3bf378d41 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 30 Apr 2023 18:20:57 -0400 Subject: [PATCH 22/78] install_tbb update in python.sh --- .github/scripts/python.sh | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh index 99fddda68..d026aa123 100644 --- a/.github/scripts/python.sh +++ b/.github/scripts/python.sh @@ -9,33 +9,14 @@ set -x -e # install TBB with _debug.so files function install_tbb() { - TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download - TBB_VERSION=4.4.5 - TBB_DIR=tbb44_20160526oss - TBB_SAVEPATH="/tmp/tbb.tgz" - if [ "$(uname)" == "Linux" ]; then - OS_SHORT="lin" - TBB_LIB_DIR="intel64/gcc4.4" - SUDO="sudo" + sudo apt-get -y install libtbb-dev elif [ "$(uname)" == "Darwin" ]; then - OS_SHORT="osx" - TBB_LIB_DIR="" - SUDO="" + brew install tbb fi - wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH - tar -C /tmp -xf $TBB_SAVEPATH - - TBBROOT=/tmp/$TBB_DIR - # Copy the needed files to the correct places. - # This works correctly for CI builds, instead of setting path variables. - # This is what Homebrew does to install TBB on Macs - $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ - $SUDO cp -R $TBBROOT/include/ /usr/local/include/ - } if [ -z ${PYTHON_VERSION+x} ]; then From dca7a980dc8b2610bc622f81d73155b1e0ca4a68 Mon Sep 17 00:00:00 2001 From: ykim742 Date: Tue, 16 May 2023 12:14:32 -0400 Subject: [PATCH 23/78] Added TableFactor, a discrete factor optimized for sparsity. --- gtsam/discrete/TableFactor.cpp | 566 +++++++++++++++++++++++ gtsam/discrete/TableFactor.h | 333 +++++++++++++ gtsam/discrete/tests/testTableFactor.cpp | 359 ++++++++++++++ 3 files changed, 1258 insertions(+) create mode 100644 gtsam/discrete/TableFactor.cpp create mode 100644 gtsam/discrete/TableFactor.h create mode 100644 gtsam/discrete/tests/testTableFactor.cpp diff --git a/gtsam/discrete/TableFactor.cpp b/gtsam/discrete/TableFactor.cpp new file mode 100644 index 000000000..c852afdc2 --- /dev/null +++ b/gtsam/discrete/TableFactor.cpp @@ -0,0 +1,566 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file TableFactor.cpp + * @brief discrete factor + * @date May 4, 2023 + * @author Yoonwoo Kim + */ + +#include +#include +#include +#include +#include + +#include +#include + +using namespace std; + +namespace gtsam { + + /* ************************************************************************ */ + TableFactor::TableFactor() {} + + /* ************************************************************************ */ + TableFactor::TableFactor(const DiscreteKeys& dkeys, + const TableFactor& potentials) + : DiscreteFactor(dkeys.indices()), + cardinalities_(potentials .cardinalities_) { + sparse_table_ = potentials.sparse_table_; + denominators_ = potentials.denominators_; + sorted_dkeys_ = discreteKeys(); + sort(sorted_dkeys_.begin(), sorted_dkeys_.end()); + } + + /* ************************************************************************ */ + TableFactor::TableFactor(const DiscreteKeys& dkeys, + const Eigen::SparseVector& table) + : DiscreteFactor(dkeys.indices()), sparse_table_(table.size()) { + sparse_table_ = table; + double denom = table.size(); + for (const DiscreteKey& dkey : dkeys) { + cardinalities_.insert(dkey); + denom /= dkey.second; + denominators_.insert(std::pair(dkey.first, denom)); + } + sorted_dkeys_ = discreteKeys(); + sort(sorted_dkeys_.begin(), sorted_dkeys_.end()); + } + + /* ************************************************************************ */ + TableFactor::TableFactor(const SparseDiscreteConditional& c) + : DiscreteFactor(c.keys()), + sparse_table_(c.sparse_table_), + denominators_(c.denominators_) { + cardinalities_ = c.cardinalities_; + sorted_dkeys_ = discreteKeys(); + sort(sorted_dkeys_.begin(), sorted_dkeys_.end()); + } + + /* ************************************************************************ */ + Eigen::SparseVector TableFactor::Convert( + const std::vector& table) { + Eigen::SparseVector sparse_table(table.size()); + // Count number of nonzero elements in table and reserving the space. + const uint64_t nnz = std::count_if(table.begin(), table.end(), + [](uint64_t i) { return i != 0; }); + sparse_table.reserve(nnz); + for (uint64_t i = 0; i < table.size(); i++) { + if (table[i] != 0) sparse_table.insert(i) = table[i]; + } + sparse_table.pruned(); + sparse_table.data().squeeze(); + return sparse_table; + } + + /* ************************************************************************ */ + Eigen::SparseVector TableFactor::Convert(const std::string& table) { + // Convert string to doubles. + std::vector ys; + std::istringstream iss(table); + std::copy(std::istream_iterator(iss), std::istream_iterator(), + std::back_inserter(ys)); + return Convert(ys); + } + + /* ************************************************************************ */ + bool TableFactor::equals(const DiscreteFactor& other, + double tol) const { + if (!dynamic_cast(&other)) { + return false; + } else { + const auto& f(static_cast(other)); + return sparse_table_.isApprox(f.sparse_table_, tol); + } + } + + /* ************************************************************************ */ + double TableFactor::operator()(const DiscreteValues& values) const { + // a b c d => D * (C * (B * (a) + b) + c) + d + uint64_t idx = 0, card = 1; + for (auto it = sorted_dkeys_.rbegin(); it != sorted_dkeys_.rend(); ++it) { + if (values.find(it->first) != values.end()) { + idx += card * values.at(it->first); + } + card *= it->second; + } + return sparse_table_.coeff(idx); + + } + + /* ************************************************************************ */ + double TableFactor::findValue(const DiscreteValues& values) const { + // a b c d => D * (C * (B * (a) + b) + c) + d + uint64_t idx = 0, card = 1; + for (auto it = keys_.rbegin(); it != keys_.rend(); ++it) { + if (values.find(*it) != values.end()) { + idx += card * values.at(*it); + } + card *= cardinality(*it); + } + return sparse_table_.coeff(idx); + } + + /* ************************************************************************ */ + double TableFactor::error(const DiscreteValues& values) const { + return -log(evaluate(values)); + } + + /* ************************************************************************ */ + double TableFactor::error(const HybridValues& values) const { + return error(values.discrete()); + } + + /* ************************************************************************ */ + DecisionTreeFactor TableFactor::operator*(const DecisionTreeFactor& f) const { + return toDecisionTreeFactor() * f; + } + + /* ************************************************************************ */ + DecisionTreeFactor TableFactor::toDecisionTreeFactor() const { + DiscreteKeys dkeys = discreteKeys(); + std::vector table; + for (auto i = 0; i < sparse_table_.size(); i++) { + table.push_back(sparse_table_.coeff(i)); + } + DecisionTreeFactor f(dkeys, table); + return f; + } + + /* ************************************************************************ */ + TableFactor TableFactor::choose(const DiscreteValues parent_assign, + DiscreteKeys parent_keys) const { + if (parent_keys.empty()) return *this; + + // Unique representation of parent values. + uint64_t unique = 0; + uint64_t card = 1; + for (auto it = keys_.rbegin(); it != keys_.rend(); ++it) { + if (parent_assign.find(*it) != parent_assign.end()) { + unique += parent_assign.at(*it) * card; + card *= cardinality(*it); + } + } + + // Find child DiscreteKeys + DiscreteKeys child_dkeys; + std::sort(parent_keys.begin(), parent_keys.end()); + std::set_difference(sorted_dkeys_.begin(), sorted_dkeys_.end(), parent_keys.begin(), + parent_keys.end(), std::back_inserter(child_dkeys)); + + // Create child sparse table to populate. + uint64_t child_card = 1; + for (const DiscreteKey& child_dkey : child_dkeys) + child_card *= child_dkey.second; + Eigen::SparseVector child_sparse_table_(child_card); + child_sparse_table_.reserve(child_card); + + // Populate child sparse table. + for (SparseIt it(sparse_table_); it; ++it) { + // Create unique representation of parent keys + uint64_t parent_unique = uniqueRep(parent_keys, it.index()); + // Populate the table + if (parent_unique == unique) { + uint64_t idx = uniqueRep(child_dkeys, it.index()); + child_sparse_table_.insert(idx) = it.value(); + } + } + + child_sparse_table_.pruned(); + child_sparse_table_.data().squeeze(); + return TableFactor(child_dkeys, child_sparse_table_); + } + + /* ************************************************************************ */ + double TableFactor::safe_div(const double& a, const double& b) { + // The use for safe_div is when we divide the product factor by the sum + // factor. If the product or sum is zero, we accord zero probability to the + // event. + return (a == 0 || b == 0) ? 0 : (a / b); + } + + /* ************************************************************************ */ + void TableFactor::print(const string& s, const KeyFormatter& formatter) const { + cout << s; + cout << " f["; + for (auto&& key : keys()) + cout << boost::format(" (%1%,%2%),") % formatter(key) % cardinality(key); + cout << " ]" << endl; + for (SparseIt it(sparse_table_); it; ++it) { + DiscreteValues assignment = findAssignments(it.index()); + for (auto&& kv : assignment) { + cout << "(" << formatter(kv.first) << ", " << kv.second << ")"; + } + cout << " | " << it.value() << " | " << it.index() << endl; + } + cout << "number of nnzs: " < map_f = + f.createMap(contract_dkeys, f_free_dkeys); + // 3. Initialize multiplied factor. + uint64_t card = 1; + for (auto u_dkey : union_dkeys) card *= u_dkey.second; + Eigen::SparseVector mult_sparse_table(card); + mult_sparse_table.reserve(card); + // 3. Multiply. + for (SparseIt it(sparse_table_); it; ++it) { + uint64_t contract_unique = uniqueRep(contract_dkeys, it.index()); + if (map_f.find(contract_unique) == map_f.end()) continue; + for (auto assignVal : map_f[contract_unique]) { + uint64_t union_idx = unionRep(union_dkeys, assignVal.first, it.index()); + mult_sparse_table.insert(union_idx) = op(it.value(), assignVal.second); + } + } + // 4. Free unused memory. + mult_sparse_table.pruned(); + mult_sparse_table.data().squeeze(); + // 5. Create union keys and return. + return TableFactor(union_dkeys, mult_sparse_table); + } + + /* ************************************************************************ */ + DiscreteKeys TableFactor::contractDkeys(const TableFactor& f) const { + // Find contract modes. + DiscreteKeys contract; + set_intersection(sorted_dkeys_.begin(), sorted_dkeys_.end(), + f.sorted_dkeys_.begin(), f.sorted_dkeys_.end(), + back_inserter(contract)); + return contract; + } + + /* ************************************************************************ */ + DiscreteKeys TableFactor::freeDkeys(const TableFactor& f) const { + // Find free modes. + DiscreteKeys free; + set_difference(sorted_dkeys_.begin(), sorted_dkeys_.end(), + f.sorted_dkeys_.begin(), f.sorted_dkeys_.end(), + back_inserter(free)); + return free; + } + + /* ************************************************************************ */ + DiscreteKeys TableFactor::unionDkeys(const TableFactor& f) const { + // Find union modes. + DiscreteKeys union_dkeys; + set_union(sorted_dkeys_.begin(), sorted_dkeys_.end(), + f.sorted_dkeys_.begin(), f.sorted_dkeys_.end(), + back_inserter(union_dkeys)); + return union_dkeys; + } + + /* ************************************************************************ */ + uint64_t TableFactor::unionRep(const DiscreteKeys& union_keys, + const DiscreteValues& f_free, const uint64_t idx) const { + uint64_t union_idx = 0, card = 1; + for (auto it = union_keys.rbegin(); it != union_keys.rend(); it++) { + if (f_free.find(it->first) == f_free.end()) { + union_idx += keyValueForIndex(it->first, idx) * card; + } else { + union_idx += f_free.at(it->first) * card; + } + card *= it->second; + } + return union_idx; + } + + /* ************************************************************************ */ + unordered_map TableFactor::createMap( + const DiscreteKeys& contract, const DiscreteKeys& free) const { + // 1. Initialize map. + unordered_map map_f; + // 2. Iterate over nonzero elements. + for (SparseIt it(sparse_table_); it; ++it) { + // 3. Create unique representation of contract modes. + uint64_t unique_rep = uniqueRep(contract, it.index()); + // 4. Create assignment for free modes. + DiscreteValues free_assignments; + for (auto& key : free) free_assignments[key.first] + = keyValueForIndex(key.first, it.index()); + // 5. Populate map. + if (map_f.find(unique_rep) == map_f.end()) { + map_f[unique_rep] = {make_pair(free_assignments, it.value())}; + } else { + map_f[unique_rep].push_back(make_pair(free_assignments, it.value())); + } + } + return map_f; + } + + /* ************************************************************************ */ + uint64_t TableFactor::uniqueRep(const DiscreteKeys& dkeys, const uint64_t idx) const { + if (dkeys.empty()) return 0; + uint64_t unique_rep = 0, card = 1; + for (auto it = dkeys.rbegin(); it != dkeys.rend(); it++) { + unique_rep += keyValueForIndex(it->first, idx) * card; + card *= it->second; + } + return unique_rep; + } + + /* ************************************************************************ */ + uint64_t TableFactor::uniqueRep(const DiscreteValues& assignments) const { + if (assignments.empty()) return 0; + uint64_t unique_rep = 0, card = 1; + for (auto it = assignments.rbegin(); it != assignments.rend(); it++) { + unique_rep += it->second * card; + card *= cardinalities_.at(it->first); + } + return unique_rep; + } + + /* ************************************************************************ */ + DiscreteValues TableFactor::findAssignments(const uint64_t idx) const { + DiscreteValues assignment; + for (Key key : keys_) { + assignment[key] = keyValueForIndex(key, idx); + } + return assignment; + } + + /* ************************************************************************ */ + TableFactor::shared_ptr TableFactor::combine( + size_t nrFrontals, Binary op) const { + if (nrFrontals > size()) { + throw invalid_argument( + "TableFactor::combine: invalid number of frontal " + "keys " + + to_string(nrFrontals) + ", nr.keys=" + std::to_string(size())); + } + // Find remaining keys. + DiscreteKeys remain_dkeys; + uint64_t card = 1; + for (auto i = nrFrontals; i < keys_.size(); i++) { + remain_dkeys.push_back(discreteKey(i)); + card *= cardinality(keys_[i]); + } + // Create combined table. + Eigen::SparseVector combined_table(card); + combined_table.reserve(sparse_table_.nonZeros()); + // Populate combined table. + for (SparseIt it(sparse_table_); it; ++it) { + uint64_t idx = uniqueRep(remain_dkeys, it.index()); + double new_val = op(combined_table.coeff(idx), it.value()); + combined_table.coeffRef(idx) = new_val; + } + // Free unused memory. + combined_table.pruned(); + combined_table.data().squeeze(); + return std::make_shared(remain_dkeys, combined_table); + } + + /* ************************************************************************ */ + TableFactor::shared_ptr TableFactor::combine( + const Ordering& frontalKeys, Binary op) const { + if (frontalKeys.size() > size()) { + throw invalid_argument( + "TableFactor::combine: invalid number of frontal " + "keys " + + std::to_string(frontalKeys.size()) + ", nr.keys=" + + std::to_string(size())); + } + // Find remaining keys. + DiscreteKeys remain_dkeys; + uint64_t card = 1; + for (Key key : keys_) { + if (std::find(frontalKeys.begin(), frontalKeys.end(), key) == + frontalKeys.end()) { + remain_dkeys.emplace_back(key, cardinality(key)); + card *= cardinality(key); + } + } + // Create combined table. + Eigen::SparseVector combined_table(card); + combined_table.reserve(sparse_table_.nonZeros()); + // Populate combined table. + for (SparseIt it(sparse_table_); it; ++it) { + uint64_t idx = uniqueRep(remain_dkeys, it.index()); + double new_val = op(combined_table.coeff(idx), it.value()); + combined_table.coeffRef(idx) = new_val; + } + // Free unused memory. + combined_table.pruned(); + combined_table.data().squeeze(); + return std::make_shared(remain_dkeys, combined_table); + } + + /* ************************************************************************ */ + size_t TableFactor::keyValueForIndex(Key target_key, uint64_t index) const { + // http://phrogz.net/lazy-cartesian-product + return (index / denominators_.at(target_key)) % cardinality(target_key); + } + + /* ************************************************************************ */ + std::vector> TableFactor::enumerate() + const { + // Get all possible assignments + std::vector> pairs = discreteKeys(); + // Reverse to make cartesian product output a more natural ordering. + std::vector> rpairs(pairs.rbegin(), pairs.rend()); + const auto assignments = DiscreteValues::CartesianProduct(rpairs); + // Construct unordered_map with values + std::vector> result; + for (const auto& assignment : assignments) { + result.emplace_back(assignment, operator()(assignment)); + } + return result; + } + + /* ************************************************************************ */ + DiscreteKeys TableFactor::discreteKeys() const { + DiscreteKeys result; + for (auto&& key : keys()) { + DiscreteKey dkey(key, cardinality(key)); + if (std::find(result.begin(), result.end(), dkey) == result.end()) { + result.push_back(dkey); + } + } + return result; + } + + // Print out header. + /* ************************************************************************ */ + string TableFactor::markdown(const KeyFormatter& keyFormatter, + const Names& names) const { + stringstream ss; + + // Print out header. + ss << "|"; + for (auto& key : keys()) { + ss << keyFormatter(key) << "|"; + } + ss << "value|\n"; + + // Print out separator with alignment hints. + ss << "|"; + for (size_t j = 0; j < size(); j++) ss << ":-:|"; + ss << ":-:|\n"; + + // Print out all rows. + for (SparseIt it(sparse_table_); it; ++it) { + DiscreteValues assignment = findAssignments(it.index()); + ss << "|"; + for (auto& key : keys()) { + size_t index = assignment.at(key); + ss << DiscreteValues::Translate(names, key, index) << "|"; + } + ss << it.value() << "|\n"; + } + return ss.str(); + } + + /* ************************************************************************ */ + string TableFactor::html(const KeyFormatter& keyFormatter, + const Names& names) const { + stringstream ss; + + // Print out preamble. + ss << "
\n\n \n"; + + // Print out header row. + ss << " "; + for (auto& key : keys()) { + ss << ""; + } + ss << "\n"; + + // Finish header and start body. + ss << " \n \n"; + + // Print out all rows. + for (SparseIt it(sparse_table_); it; ++it) { + DiscreteValues assignment = findAssignments(it.index()); + ss << " "; + for (auto& key : keys()) { + size_t index = assignment.at(key); + ss << ""; + } + ss << ""; // value + ss << "\n"; + } + ss << " \n
" << keyFormatter(key) << "value
" << DiscreteValues::Translate(names, key, index) << "" << it.value() << "
\n
"; + return ss.str(); + } + + /* ************************************************************************ */ + TableFactor TableFactor::prune(size_t maxNrAssignments) const { + const size_t N = maxNrAssignments; + + // Get the probabilities in the TableFactor so we can threshold. + vector> probabilities; + + // Store non-zero probabilities along with their indices in a vector. + for (SparseIt it(sparse_table_); it; ++it) { + probabilities.emplace_back(it.index(), it.value()); + } + + // The number of probabilities can be lower than max_leaves. + if (probabilities.size() <= N) return *this; + + // Sort the vector in descending order based on the element values. + sort(probabilities.begin(), probabilities.end(), [] ( + const std::pair& a, + const std::pair& b) { + return a.second > b.second; + }); + + // Keep the largest N probabilities in the vector. + if (probabilities.size() > N) probabilities.resize(N); + + // Create pruned sparse vector. + Eigen::SparseVector pruned_vec(sparse_table_.size()); + pruned_vec.reserve(probabilities.size()); + + // Populate pruned sparse vector. + for (const auto& prob : probabilities) { + pruned_vec.insert(prob.first) = prob.second; + } + + // Create pruned decision tree factor and return. + return TableFactor(this->discreteKeys(), pruned_vec); + } + + /* ************************************************************************ */ +} // namespace gtsam diff --git a/gtsam/discrete/TableFactor.h b/gtsam/discrete/TableFactor.h new file mode 100644 index 000000000..1a328eabf --- /dev/null +++ b/gtsam/discrete/TableFactor.h @@ -0,0 +1,333 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file TableFactor.h + * @date May 4, 2023 + * @author Yoonwoo Kim + */ + +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace gtsam { + + class SparseDiscreteConditional; + class HybridValues; + + /** + * A discrete probabilistic factor optimized for sparsity. + * + * @ingroup discrete + */ + class GTSAM_EXPORT TableFactor : public DiscreteFactor { + protected: + std::map cardinalities_; + Eigen::SparseVector sparse_table_; + + private: + std::map denominators_; + DiscreteKeys sorted_dkeys_; + + /** + * @brief Finds nth entry in the cartesian product of arrays in O(1) + * Example) + * v0 | v1 | val + * 0 | 0 | 10 + * 0 | 1 | 21 + * 1 | 0 | 32 + * 1 | 1 | 43 + * keyValueForIndex(v1, 2) = 0 + * @param target_key nth entry's key to find out its assigned value + * @param index nth entry in the sparse vector + * @return TableFactor + */ + size_t keyValueForIndex(Key target_key, uint64_t index) const; + + DiscreteKey discreteKey(size_t i) const { + return DiscreteKey(keys_[i], cardinalities_.at(keys_[i])); + } + + /// Convert probability table given as doubles to SparseVector. + static Eigen::SparseVector Convert(const std::vector& table); + + /// Convert probability table given as string to SparseVector. + static Eigen::SparseVector Convert(const std::string& table); + + public: + // typedefs needed to play nice with gtsam + typedef TableFactor This; + typedef DiscreteFactor Base; ///< Typedef to base class + typedef std::shared_ptr shared_ptr; + typedef Eigen::SparseVector::InnerIterator SparseIt; + typedef std::vector> AssignValList; + using Binary = std::function; + + public: + /** The Real ring with addition and multiplication */ + struct Ring { + static inline double zero() { return 0.0; } + static inline double one() { return 1.0; } + static inline double add(const double& a, const double& b) { return a + b; } + static inline double max(const double& a, const double& b) { + return std::max(a, b); + } + static inline double mul(const double& a, const double& b) { return a * b; } + static inline double div(const double& a, const double& b) { + return (a == 0 || b == 0) ? 0 : (a / b); + } + static inline double id(const double& x) { return x; } + }; + + /// @name Standard Constructors + /// @{ + + /** Default constructor for I/O */ + TableFactor(); + + /** Constructor from DiscreteKeys and TableFactor */ + TableFactor(const DiscreteKeys& keys, const TableFactor& potentials); + + /** Constructor from sparse_table */ + TableFactor(const DiscreteKeys& keys, + const Eigen::SparseVector& table); + + /** Constructor from doubles */ + TableFactor(const DiscreteKeys& keys, const std::vector& table) + : TableFactor(keys, Convert(table)) {} + + /** Constructor from string */ + TableFactor(const DiscreteKeys& keys, const std::string& table) + : TableFactor(keys, Convert(table)) {} + + /// Single-key specialization + template + TableFactor(const DiscreteKey& key, SOURCE table) + : TableFactor(DiscreteKeys{key}, table) {} + + /// Single-key specialization, with vector of doubles. + TableFactor(const DiscreteKey& key, const std::vector& row) + : TableFactor(DiscreteKeys{key}, row) {} + + /** Construct from a DiscreteTableConditional type */ + explicit TableFactor(const SparseDiscreteConditional& c); + + /// @} + /// @name Testable + /// @{ + + /// equality + bool equals(const DiscreteFactor& other, double tol = 1e-9) const override; + + // print + void print( + const std::string& s = "TableFactor:\n", + const KeyFormatter& formatter = DefaultKeyFormatter) const override; + + // /// @} + // /// @name Standard Interface + // /// @{ + + /// Calculate probability for given values `x`, + /// is just look up in TableFactor. + double evaluate(const DiscreteValues& values) const { + return operator()(values); + } + + /// Evaluate probability distribution, sugar. + double operator()(const DiscreteValues& values) const override; + + /// Calculate error for DiscreteValues `x`, is -log(probability). + double error(const DiscreteValues& values) const; + + /// multiply two TableFactors + TableFactor operator*(const TableFactor& f) const { + return apply(f, Ring::mul); + }; + + /// multiple with DecisionTreeFactor + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; + + static double safe_div(const double& a, const double& b); + + size_t cardinality(Key j) const { return cardinalities_.at(j); } + + /// divide by factor f (safely) + TableFactor operator/(const TableFactor& f) const { + return apply(f, safe_div); + } + + /// Convert into a decisiontree + DecisionTreeFactor toDecisionTreeFactor() const override; + + /// Generate TableFactor from TableFactor + // TableFactor toTableFactor() const override { return *this; } + + /// Create a TableFactor that is a subset of this TableFactor + TableFactor choose(const DiscreteValues assignments, + DiscreteKeys parent_keys) const; + + /// Create new factor by summing all values with the same separator values + shared_ptr sum(size_t nrFrontals) const { + return combine(nrFrontals, Ring::add); + } + + /// Create new factor by summing all values with the same separator values + shared_ptr sum(const Ordering& keys) const { + return combine(keys, Ring::add); + } + + /// Create new factor by maximizing over all values with the same separator. + shared_ptr max(size_t nrFrontals) const { + return combine(nrFrontals, Ring::max); + } + + /// Create new factor by maximizing over all values with the same separator. + shared_ptr max(const Ordering& keys) const { + return combine(keys, Ring::max); + } + + /// @} + /// @name Advanced Interface + /// @{ + + /** + * Apply binary operator (*this) "op" f + * @param f the second argument for op + * @param op a binary operator that operates on TableFactor + */ + TableFactor apply(const TableFactor& f, Binary op) const; + + /// Return keys in contract mode. + DiscreteKeys contractDkeys(const TableFactor& f) const; + + /// Return keys in free mode. + DiscreteKeys freeDkeys(const TableFactor& f) const; + + /// Return union of DiscreteKeys in two factors. + DiscreteKeys unionDkeys(const TableFactor& f) const; + + /// Create unique representation of union modes. + uint64_t unionRep(const DiscreteKeys& keys, + const DiscreteValues& assign, const uint64_t idx) const; + + /// Create a hash map of input factor with assignment of contract modes as + /// keys and vector of hashed assignment of free modes and value as values. + std::unordered_map createMap( + const DiscreteKeys& contract, const DiscreteKeys& free) const; + + /// Create unique representation + uint64_t uniqueRep(const DiscreteKeys& keys, const uint64_t idx) const; + + /// Create unique representation with DiscreteValues + uint64_t uniqueRep(const DiscreteValues& assignments) const; + + /// Find DiscreteValues for corresponding index. + DiscreteValues findAssignments(const uint64_t idx) const; + + /// Find value for corresponding DiscreteValues. + double findValue(const DiscreteValues& values) const; + + /** + * Combine frontal variables using binary operator "op" + * @param nrFrontals nr. of frontal to combine variables in this factor + * @param op a binary operator that operates on TableFactor + * @return shared pointer to newly created TableFactor + */ + shared_ptr combine(size_t nrFrontals, Binary op) const; + + /** + * Combine frontal variables in an Ordering using binary operator "op" + * @param nrFrontals nr. of frontal to combine variables in this factor + * @param op a binary operator that operates on TableFactor + * @return shared pointer to newly created TableFactor + */ + shared_ptr combine(const Ordering& keys, Binary op) const; + + /// Enumerate all values into a map from values to double. + std::vector> enumerate() const; + + /// Return all the discrete keys associated with this factor. + DiscreteKeys discreteKeys() const; + + /** + * @brief Prune the decision tree of discrete variables. + * + * Pruning will set the values to be "pruned" to 0 indicating a 0 + * probability. An assignment is pruned if it is not in the top + * `maxNrAssignments` values. + * + * A violation can occur if there are more + * duplicate values than `maxNrAssignments`. A violation here is the need to + * un-prune the decision tree (e.g. all assignment values are 1.0). We could + * have another case where some subset of duplicates exist (e.g. for a tree + * with 8 assignments we have 1, 1, 1, 1, 0.8, 0.7, 0.6, 0.5), but this is + * not a violation since the for `maxNrAssignments=5` the top values are (1, + * 0.8). + * + * @param maxNrAssignments The maximum number of assignments to keep. + * @return TableFactor + */ + TableFactor prune(size_t maxNrAssignments) const; + + /// @} + /// @name Wrapper support + /// @{ + + /** + * @brief Render as markdown table + * + * @param keyFormatter GTSAM-style Key formatter. + * @param names optional, category names corresponding to choices. + * @return std::string a markdown string. + */ + std::string markdown(const KeyFormatter& keyFormatter = DefaultKeyFormatter, + const Names& names = {}) const override; + + /** + * @brief Render as html table + * + * @param keyFormatter GTSAM-style Key formatter. + * @param names optional, category names corresponding to choices. + * @return std::string a html string. + */ + std::string html(const KeyFormatter& keyFormatter = DefaultKeyFormatter, + const Names& names = {}) const override; + + /// @} + /// @name HybridValues methods. + /// @{ + + /** + * Calculate error for HybridValues `x`, is -log(probability) + * Simply dispatches to DiscreteValues version. + */ + double error(const HybridValues& values) const override; + + /// @} + }; + +// traits +template <> +struct traits : public Testable {}; +} // namespace gtsam diff --git a/gtsam/discrete/tests/testTableFactor.cpp b/gtsam/discrete/tests/testTableFactor.cpp new file mode 100644 index 000000000..4acde8167 --- /dev/null +++ b/gtsam/discrete/tests/testTableFactor.cpp @@ -0,0 +1,359 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * testTableFactor.cpp + * + * @date Feb 15, 2023 + * @author Yoonwoo Kim + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +vector genArr(double dropout, size_t size) { + random_device rd; + mt19937 g(rd()); + vector dropoutmask(size); // Chance of 0 + + uniform_int_distribution<> dist(1, 9); + auto gen = [&dist, &g]() { return dist(g); }; + generate(dropoutmask.begin(), dropoutmask.end(), gen); + + fill_n(dropoutmask.begin(), dropoutmask.size() * (dropout), 0); + shuffle(dropoutmask.begin(), dropoutmask.end(), g); + + return dropoutmask; +} + +map> + measureTime(DiscreteKeys keys1, DiscreteKeys keys2, size_t size) { + vector dropouts = {0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9}; + map> + measured_times; + + for (auto dropout : dropouts) { + vector arr1 = genArr(dropout, size); + vector arr2 = genArr(dropout, size); + TableFactor f1(keys1, arr1); + TableFactor f2(keys2, arr2); + DecisionTreeFactor f1_dt(keys1, arr1); + DecisionTreeFactor f2_dt(keys2, arr2); + + // measure time TableFactor + auto tb_start = chrono::high_resolution_clock::now(); + TableFactor actual = f1 * f2; + auto tb_end = chrono::high_resolution_clock::now(); + auto tb_time_diff = chrono::duration_cast(tb_end - tb_start); + + // measure time DT + auto dt_start = chrono::high_resolution_clock::now(); + DecisionTreeFactor actual_dt = f1_dt * f2_dt; + auto dt_end = chrono::high_resolution_clock::now(); + auto dt_time_diff = chrono::duration_cast(dt_end - dt_start); + + bool flag = true; + for (auto assignmentVal : actual_dt.enumerate()) { + flag = actual_dt(assignmentVal.first) != actual(assignmentVal.first); + if (flag) { + std::cout << "something is wrong: " << std::endl; + assignmentVal.first.print(); + std::cout << "dt: " << actual_dt(assignmentVal.first) << std::endl; + std::cout << "tb: " << actual(assignmentVal.first) << std::endl; + break; + } + } + if (flag) break; + measured_times[dropout] = make_pair(tb_time_diff, dt_time_diff); + } + return measured_times; +} + +void printTime(map> measured_time) { + for (auto&& kv : measured_time) { + cout << "dropout: " << kv.first << " | TableFactor time: " + << kv.second.first.count() << " | DecisionTreeFactor time: " << kv.second.second.count() + << endl; + } + +} + +/* ************************************************************************* */ +TEST( TableFactor, constructors) +{ + // Declare a bunch of keys + DiscreteKey X(0,2), Y(1,3), Z(2,2), A(3, 5); + + // Create factors + TableFactor f_zeros(A, {0, 0, 0, 0, 1}); + TableFactor f1(X, {2, 8}); + TableFactor f2(X & Y, "2 5 3 6 4 7"); + TableFactor f3(X & Y & Z, "2 5 3 6 4 7 25 55 35 65 45 75"); + EXPECT_LONGS_EQUAL(1,f1.size()); + EXPECT_LONGS_EQUAL(2,f2.size()); + EXPECT_LONGS_EQUAL(3,f3.size()); + + DiscreteValues values; + values[0] = 1; // x + values[1] = 2; // y + values[2] = 1; // z + values[3] = 4; // a + EXPECT_DOUBLES_EQUAL(1, f_zeros(values), 1e-9); + EXPECT_DOUBLES_EQUAL(8, f1(values), 1e-9); + EXPECT_DOUBLES_EQUAL(7, f2(values), 1e-9); + EXPECT_DOUBLES_EQUAL(75, f3(values), 1e-9); + + // Assert that error = -log(value) + EXPECT_DOUBLES_EQUAL(-log(f1(values)), f1.error(values), 1e-9); +} + +/* ************************************************************************* */ +TEST(TableFactor, multiplication) { + DiscreteKey v0(0, 2), v1(1, 2), v2(2, 2); + + // Multiply with a DiscreteDistribution, i.e., Bayes Law! + DiscreteDistribution prior(v1 % "1/3"); + TableFactor f1(v0 & v1, "1 2 3 4"); + DecisionTreeFactor expected(v0 & v1, "0.25 1.5 0.75 3"); + CHECK(assert_equal(expected, static_cast(prior) * + f1.toDecisionTreeFactor())); + CHECK(assert_equal(expected, f1 * prior)); + + // Multiply two factors + TableFactor f2(v1 & v2, "5 6 7 8"); + TableFactor actual = f1 * f2; + TableFactor expected2(v0 & v1 & v2, "5 6 14 16 15 18 28 32"); + CHECK(assert_equal(expected2, actual)); + + DiscreteKey A(0, 3), B(1, 2), C(2, 2); + TableFactor f_zeros1(A & C, "0 0 0 2 0 3"); + TableFactor f_zeros2(B & C, "4 0 0 5"); + TableFactor actual_zeros = f_zeros1 * f_zeros2; + TableFactor expected3(A & B & C, "0 0 0 0 0 0 0 10 0 0 0 15"); + CHECK(assert_equal(expected3, actual_zeros)); + +} + +/* ************************************************************************* */ +TEST(TableFactor, benchmark) { +DiscreteKey A(0, 5), B(1, 2), C(2, 5), D(3, 2), E(4, 5), + F(5, 2), G(6, 3), H(7, 2), I(8, 5), J(9, 7), K(10, 2), L(11, 3); + + // 100 + DiscreteKeys one_1 = {A, B, C, D}; + DiscreteKeys one_2 = {C, D, E, F}; + map> time_map_1 = + measureTime(one_1, one_2, 100); + printTime(time_map_1); + // 200 + DiscreteKeys two_1 = {A, B, C, D, F}; + DiscreteKeys two_2 = {B, C, D, E, F}; + map> time_map_2 = + measureTime(two_1, two_2, 200); + printTime(time_map_2); + // 300 + DiscreteKeys three_1 = {A, B, C, D, G}; + DiscreteKeys three_2 = {C, D, E, F, G}; + map> time_map_3 = + measureTime(three_1, three_2, 300); + printTime(time_map_3); + // 400 + DiscreteKeys four_1 = {A, B, C, D, F, H}; + DiscreteKeys four_2 = {B, C, D, E, F, H}; + map> time_map_4 = + measureTime(four_1, four_2, 400); + printTime(time_map_4); + // 500 + DiscreteKeys five_1 = {A, B, C, D, I}; + DiscreteKeys five_2 = {C, D, E, F, I}; + map> time_map_5 = + measureTime(five_1, five_2, 500); + printTime(time_map_5); + // 600 + DiscreteKeys six_1 = {A, B, C, D, F, G}; + DiscreteKeys six_2 = {B, C, D, E, F, G}; + map> time_map_6 = + measureTime(six_1, six_2, 600); + printTime(time_map_6); + // 700 + DiscreteKeys seven_1 = {A, B, C, D, J}; + DiscreteKeys seven_2 = {C, D, E, F, J}; + map> time_map_7 = + measureTime(seven_1, seven_2, 700); + printTime(time_map_7); + // 800 + DiscreteKeys eight_1 = {A, B, C, D, F, H, K}; + DiscreteKeys eight_2 = {B, C, D, E, F, H, K}; + map> time_map_8 = + measureTime(eight_1, eight_2, 800); + printTime(time_map_8); + // 900 + DiscreteKeys nine_1 = {A, B, C, D, G, L}; + DiscreteKeys nine_2 = {C, D, E, F, G, L}; + map> time_map_9 = + measureTime(nine_1, nine_2, 900); + printTime(time_map_9); +} + +/* ************************************************************************* */ +TEST( TableFactor, sum_max) +{ + DiscreteKey v0(0,3), v1(1,2); + TableFactor f1(v0 & v1, "1 2 3 4 5 6"); + + TableFactor expected(v1, "9 12"); + TableFactor::shared_ptr actual = f1.sum(1); + CHECK(assert_equal(expected, *actual, 1e-5)); + + TableFactor expected2(v1, "5 6"); + TableFactor::shared_ptr actual2 = f1.max(1); + CHECK(assert_equal(expected2, *actual2)); + + TableFactor f2(v1 & v0, "1 2 3 4 5 6"); + TableFactor::shared_ptr actual22 = f2.sum(1); +} + +/* ************************************************************************* */ +// Check enumerate yields the correct list of assignment/value pairs. +TEST(TableFactor, enumerate) { + DiscreteKey A(12, 3), B(5, 2); + TableFactor f(A & B, "1 2 3 4 5 6"); + auto actual = f.enumerate(); + std::vector> expected; + DiscreteValues values; + for (size_t a : {0, 1, 2}) { + for (size_t b : {0, 1}) { + values[12] = a; + values[5] = b; + expected.emplace_back(values, f(values)); + } + } + EXPECT(actual == expected); +} + +/* ************************************************************************* */ +// Check pruning of the decision tree works as expected. +TEST(TableFactor, Prune) { + DiscreteKey A(1, 2), B(2, 2), C(3, 2); + TableFactor f(A & B & C, "1 5 3 7 2 6 4 8"); + + // Only keep the leaves with the top 5 values. + size_t maxNrAssignments = 5; + auto pruned5 = f.prune(maxNrAssignments); + + // Pruned leaves should be 0 + TableFactor expected(A & B & C, "0 5 0 7 0 6 4 8"); + EXPECT(assert_equal(expected, pruned5)); + + // Check for more extreme pruning where we only keep the top 2 leaves + maxNrAssignments = 2; + auto pruned2 = f.prune(maxNrAssignments); + TableFactor expected2(A & B & C, "0 0 0 7 0 0 0 8"); + EXPECT(assert_equal(expected2, pruned2)); + + DiscreteKey D(4, 2); + TableFactor factor( + D & C & B & A, + "0.0 0.0 0.0 0.60658897 0.61241912 0.61241969 0.61247685 0.61247742 0.0 " + "0.0 0.0 0.99995287 1.0 1.0 1.0 1.0"); + + TableFactor expected3( + D & C & B & A, + "0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 " + "0.999952870000 1.0 1.0 1.0 1.0"); + maxNrAssignments = 5; + auto pruned3 = factor.prune(maxNrAssignments); + EXPECT(assert_equal(expected3, pruned3)); +} + +/* ************************************************************************* */ +// Check markdown representation looks as expected. +TEST(TableFactor, markdown) { + DiscreteKey A(12, 3), B(5, 2); + TableFactor f(A & B, "1 2 3 4 5 6"); + string expected = + "|A|B|value|\n" + "|:-:|:-:|:-:|\n" + "|0|0|1|\n" + "|0|1|2|\n" + "|1|0|3|\n" + "|1|1|4|\n" + "|2|0|5|\n" + "|2|1|6|\n"; + auto formatter = [](Key key) { return key == 12 ? "A" : "B"; }; + string actual = f.markdown(formatter); + EXPECT(actual == expected); +} + +/* ************************************************************************* */ +// Check markdown representation with a value formatter. +TEST(TableFactor, markdownWithValueFormatter) { + DiscreteKey A(12, 3), B(5, 2); + TableFactor f(A & B, "1 2 3 4 5 6"); + string expected = + "|A|B|value|\n" + "|:-:|:-:|:-:|\n" + "|Zero|-|1|\n" + "|Zero|+|2|\n" + "|One|-|3|\n" + "|One|+|4|\n" + "|Two|-|5|\n" + "|Two|+|6|\n"; + auto keyFormatter = [](Key key) { return key == 12 ? "A" : "B"; }; + TableFactor::Names names{{12, {"Zero", "One", "Two"}}, + {5, {"-", "+"}}}; + string actual = f.markdown(keyFormatter, names); + EXPECT(actual == expected); +} + +/* ************************************************************************* */ +// Check html representation with a value formatter. +TEST(TableFactor, htmlWithValueFormatter) { + DiscreteKey A(12, 3), B(5, 2); + TableFactor f(A & B, "1 2 3 4 5 6"); + string expected = + "
\n" + "\n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + "
ABvalue
Zero-1
Zero+2
One-3
One+4
Two-5
Two+6
\n" + "
"; + auto keyFormatter = [](Key key) { return key == 12 ? "A" : "B"; }; + TableFactor::Names names{{12, {"Zero", "One", "Two"}}, + {5, {"-", "+"}}}; + string actual = f.html(keyFormatter, names); + EXPECT(actual == expected); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ From 6976cd6ea22b47fc9ac1db6ce125816608a8de6a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 22 May 2023 15:25:54 -0400 Subject: [PATCH 24/78] Squashed 'wrap/' changes from 076a5e3a9..520dbca0f 520dbca0f Merge pull request #158 from borglab/matlab-enum-support 661daf0dd fix python version specification 6f9111ddb fix python install 691e47734 update CI to newer OS versions 579539b1c finish wrapping 474aece68 fix issue in _collector_return 660c21bcc wrap enum types in cpp 1fa5c2756 begin updating generated cpp for enums 7b156a3f5 add wrap_enum and unwrap_enum helper functions 2a5423061 finish wrapping every part of enum.i 68cfa8a51 wrap enums inside classes ce734fa9f wrap enums declared on their own 66c84e5cb unit test for enum wrapping in matlab 1cc126669 module docstring for matlab_wrapper/templates.py git-subtree-dir: wrap git-subtree-split: 520dbca0f2c3db4d30f0a0fd020a729cc0caa7b7 --- .github/workflows/linux-ci.yml | 6 +- .github/workflows/macos-ci.yml | 4 +- gtwrap/matlab_wrapper/mixins.py | 5 + gtwrap/matlab_wrapper/templates.py | 2 + gtwrap/matlab_wrapper/wrapper.py | 148 ++++++++-- matlab.h | 36 ++- tests/expected/matlab/+Pet/Kind.m | 6 + tests/expected/matlab/+gtsam/+MCU/Avengers.m | 9 + tests/expected/matlab/+gtsam/+MCU/GotG.m | 9 + .../+OptimizerGaussNewtonParams/Verbosity.m | 7 + tests/expected/matlab/+gtsam/VerbosityLM.m | 12 + tests/expected/matlab/Color.m | 7 + tests/expected/matlab/enum_wrapper.cpp | 266 ++++++++++++++++++ .../expected/matlab/special_cases_wrapper.cpp | 4 +- tests/test_matlab_wrapper.py | 26 ++ 15 files changed, 510 insertions(+), 37 deletions(-) create mode 100644 tests/expected/matlab/+Pet/Kind.m create mode 100644 tests/expected/matlab/+gtsam/+MCU/Avengers.m create mode 100644 tests/expected/matlab/+gtsam/+MCU/GotG.m create mode 100644 tests/expected/matlab/+gtsam/+OptimizerGaussNewtonParams/Verbosity.m create mode 100644 tests/expected/matlab/+gtsam/VerbosityLM.m create mode 100644 tests/expected/matlab/Color.m create mode 100644 tests/expected/matlab/enum_wrapper.cpp diff --git a/.github/workflows/linux-ci.yml b/.github/workflows/linux-ci.yml index 34623385e..6c7ef1285 100644 --- a/.github/workflows/linux-ci.yml +++ b/.github/workflows/linux-ci.yml @@ -5,12 +5,12 @@ on: [pull_request] jobs: build: name: Tests for 🐍 ${{ matrix.python-version }} - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: - python-version: [3.6, 3.7, 3.8, 3.9] + python-version: ["3.7", "3.8", "3.9", "3.10"] steps: - name: Checkout @@ -19,7 +19,7 @@ jobs: - name: Install Dependencies run: | sudo apt-get -y update - sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev + sudo apt install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 diff --git a/.github/workflows/macos-ci.yml b/.github/workflows/macos-ci.yml index 8119a3acb..adba486c5 100644 --- a/.github/workflows/macos-ci.yml +++ b/.github/workflows/macos-ci.yml @@ -5,12 +5,12 @@ on: [pull_request] jobs: build: name: Tests for 🐍 ${{ matrix.python-version }} - runs-on: macos-10.15 + runs-on: macos-12 strategy: fail-fast: false matrix: - python-version: [3.6, 3.7, 3.8, 3.9] + python-version: ["3.7", "3.8", "3.9", "3.10"] steps: - name: Checkout diff --git a/gtwrap/matlab_wrapper/mixins.py b/gtwrap/matlab_wrapper/mixins.py index 4c2b005b7..ed5c5dbc6 100644 --- a/gtwrap/matlab_wrapper/mixins.py +++ b/gtwrap/matlab_wrapper/mixins.py @@ -60,6 +60,11 @@ class CheckMixin: arg_type.typename.name not in self.not_ptr_type and \ arg_type.is_ref + def is_class_enum(self, arg_type: parser.Type, class_: parser.Class): + """Check if `arg_type` is an enum in the class `class_`.""" + enums = (enum.name for enum in class_.enums) + return arg_type.ctype.typename.name in enums + class FormatMixin: """Mixin to provide formatting utilities.""" diff --git a/gtwrap/matlab_wrapper/templates.py b/gtwrap/matlab_wrapper/templates.py index 7783c8e9c..c1c7e75ce 100644 --- a/gtwrap/matlab_wrapper/templates.py +++ b/gtwrap/matlab_wrapper/templates.py @@ -1,3 +1,5 @@ +"""Code generation templates for the Matlab wrapper.""" + import textwrap diff --git a/gtwrap/matlab_wrapper/wrapper.py b/gtwrap/matlab_wrapper/wrapper.py index 0f156a6de..c2a8468c1 100755 --- a/gtwrap/matlab_wrapper/wrapper.py +++ b/gtwrap/matlab_wrapper/wrapper.py @@ -341,11 +341,26 @@ class MatlabWrapper(CheckMixin, FormatMixin): return check_statement - def _unwrap_argument(self, arg, arg_id=0, constructor=False): + def _unwrap_argument(self, + arg, + arg_id=0, + constructor=False, + instantiated_class=None): ctype_camel = self._format_type_name(arg.ctype.typename, separator='') ctype_sep = self._format_type_name(arg.ctype.typename) - if self.is_ref(arg.ctype): # and not constructor: + if instantiated_class and \ + self.is_class_enum(arg, instantiated_class): + + if instantiated_class.original.template: + enum_type = f"{arg.ctype.typename}" + else: + enum_type = f"{instantiated_class.name}::{arg.ctype}" + + arg_type = f"std::shared_ptr<{enum_type}>" + unwrap = f'unwrap_enum<{enum_type}>(in[{arg_id}]);' + + elif self.is_ref(arg.ctype): # and not constructor: arg_type = "{ctype}&".format(ctype=ctype_sep) unwrap = '*unwrap_shared_ptr< {ctype} >(in[{id}], "ptr_{ctype_camel}");'.format( ctype=ctype_sep, ctype_camel=ctype_camel, id=arg_id) @@ -372,7 +387,11 @@ class MatlabWrapper(CheckMixin, FormatMixin): return arg_type, unwrap - def _wrapper_unwrap_arguments(self, args, arg_id=0, constructor=False): + def _wrapper_unwrap_arguments(self, + args, + arg_id=0, + constructor=False, + instantiated_class=None): """Format the interface_parser.Arguments. Examples: @@ -383,7 +402,11 @@ class MatlabWrapper(CheckMixin, FormatMixin): body_args = '' for arg in args.list(): - arg_type, unwrap = self._unwrap_argument(arg, arg_id, constructor) + arg_type, unwrap = self._unwrap_argument( + arg, + arg_id, + constructor, + instantiated_class=instantiated_class) body_args += textwrap.indent(textwrap.dedent('''\ {arg_type} {name} = {unwrap} @@ -535,7 +558,7 @@ class MatlabWrapper(CheckMixin, FormatMixin): def wrap_methods(self, methods, global_funcs=False, global_ns=None): """ - Wrap a sequence of methods. Groups methods with the same names + Wrap a sequence of methods/functions. Groups methods with the same names together. If global_funcs is True then output every method into its own file. """ @@ -1027,7 +1050,7 @@ class MatlabWrapper(CheckMixin, FormatMixin): if uninstantiated_name in self.ignore_classes: return None - # Class comment + # Class docstring/comment content_text = self.class_comment(instantiated_class) content_text += self.wrap_methods(instantiated_class.methods) @@ -1108,31 +1131,73 @@ class MatlabWrapper(CheckMixin, FormatMixin): end ''') + # Enums + # Place enums into the correct submodule so we can access them + # e.g. gtsam.Class.Enum.A + for enum in instantiated_class.enums: + enum_text = self.wrap_enum(enum) + if namespace_name != '': + submodule = f"+{namespace_name}/" + else: + submodule = "" + submodule += f"+{instantiated_class.name}" + self.content.append((submodule, [enum_text])) + return file_name + '.m', content_text - def wrap_namespace(self, namespace): + def wrap_enum(self, enum): + """ + Wrap an enum definition. + + Args: + enum: The interface_parser.Enum instance + """ + file_name = enum.name + '.m' + enum_template = textwrap.dedent("""\ + classdef {0} < uint32 + enumeration + {1} + end + end + """) + enumerators = "\n ".join([ + f"{enumerator.name}({idx})" + for idx, enumerator in enumerate(enum.enumerators) + ]) + + content = enum_template.format(enum.name, enumerators) + return file_name, content + + def wrap_namespace(self, namespace, add_mex_file=True): """Wrap a namespace by wrapping all of its components. Args: namespace: the interface_parser.namespace instance of the namespace - parent: parent namespace + add_cpp_file: Flag indicating whether the mex file should be added """ namespaces = namespace.full_namespaces() inner_namespace = namespace.name != '' wrapped = [] - cpp_filename = self._wrapper_name() + '.cpp' - self.content.append((cpp_filename, self.wrapper_file_headers)) - - current_scope = [] - namespace_scope = [] + top_level_scope = [] + inner_namespace_scope = [] for element in namespace.content: if isinstance(element, parser.Include): self.includes.append(element) elif isinstance(element, parser.Namespace): - self.wrap_namespace(element) + self.wrap_namespace(element, False) + + elif isinstance(element, parser.Enum): + file, content = self.wrap_enum(element) + if inner_namespace: + module = "".join([ + '+' + x + '/' for x in namespace.full_namespaces()[1:] + ])[:-1] + inner_namespace_scope.append((module, [(file, content)])) + else: + top_level_scope.append((file, content)) elif isinstance(element, instantiator.InstantiatedClass): self.add_class(element) @@ -1142,18 +1207,22 @@ class MatlabWrapper(CheckMixin, FormatMixin): element, "".join(namespace.full_namespaces())) if not class_text is None: - namespace_scope.append(("".join([ + inner_namespace_scope.append(("".join([ '+' + x + '/' for x in namespace.full_namespaces()[1:] ])[:-1], [(class_text[0], class_text[1])])) else: class_text = self.wrap_instantiated_class(element) - current_scope.append((class_text[0], class_text[1])) + top_level_scope.append((class_text[0], class_text[1])) - self.content.extend(current_scope) + self.content.extend(top_level_scope) if inner_namespace: - self.content.append(namespace_scope) + self.content.append(inner_namespace_scope) + + if add_mex_file: + cpp_filename = self._wrapper_name() + '.cpp' + self.content.append((cpp_filename, self.wrapper_file_headers)) # Global functions all_funcs = [ @@ -1213,10 +1282,22 @@ class MatlabWrapper(CheckMixin, FormatMixin): return return_type_text - def _collector_return(self, obj: str, ctype: parser.Type): + def _collector_return(self, + obj: str, + ctype: parser.Type, + class_property: parser.Variable = None, + instantiated_class: InstantiatedClass = None): """Helper method to get the final statement before the return in the collector function.""" expanded = '' - if self.is_shared_ptr(ctype) or self.is_ptr(ctype) or \ + + if class_property and instantiated_class and \ + self.is_class_enum(class_property, instantiated_class): + class_name = ".".join(instantiated_class.namespaces()[1:] + [instantiated_class.name]) + enum_type = f"{class_name}.{ctype.typename.name}" + expanded = textwrap.indent( + f'out[0] = wrap_enum({obj},\"{enum_type}\");', prefix=' ') + + elif self.is_shared_ptr(ctype) or self.is_ptr(ctype) or \ self.can_be_pointer(ctype): sep_method_name = partial(self._format_type_name, ctype.typename, @@ -1316,13 +1397,19 @@ class MatlabWrapper(CheckMixin, FormatMixin): return expanded - def wrap_collector_property_return(self, class_property: parser.Variable): + def wrap_collector_property_return( + self, + class_property: parser.Variable, + instantiated_class: InstantiatedClass = None): """Get the last collector function statement before return for a property.""" property_name = class_property.name obj = 'obj->{}'.format(property_name) - property_type = class_property.ctype - return self._collector_return(obj, property_type) + ctype = class_property.ctype + return self._collector_return(obj, + ctype, + class_property=class_property, + instantiated_class=instantiated_class) def wrap_collector_function_upcast_from_void(self, class_name, func_id, cpp_name): @@ -1381,7 +1468,9 @@ class MatlabWrapper(CheckMixin, FormatMixin): elif collector_func[2] == 'constructor': base = '' params, body_args = self._wrapper_unwrap_arguments( - extra.args, constructor=True) + extra.args, + constructor=True, + instantiated_class=collector_func[1]) if collector_func[1].parent_class: base += textwrap.indent(textwrap.dedent(''' @@ -1442,7 +1531,9 @@ class MatlabWrapper(CheckMixin, FormatMixin): method_name += extra.name _, body_args = self._wrapper_unwrap_arguments( - extra.args, arg_id=1 if is_method else 0) + extra.args, + arg_id=1 if is_method else 0, + instantiated_class=collector_func[1]) return_body = self.wrap_collector_function_return(extra) shared_obj = '' @@ -1472,7 +1563,8 @@ class MatlabWrapper(CheckMixin, FormatMixin): class_name=class_name) # Unpack the property from mxArray - property_type, unwrap = self._unwrap_argument(extra, arg_id=1) + property_type, unwrap = self._unwrap_argument( + extra, arg_id=1, instantiated_class=collector_func[1]) unpack_property = textwrap.indent(textwrap.dedent('''\ {arg_type} {name} = {unwrap} '''.format(arg_type=property_type, @@ -1482,7 +1574,8 @@ class MatlabWrapper(CheckMixin, FormatMixin): # Getter if "_get_" in method_name: - return_body = self.wrap_collector_property_return(extra) + return_body = self.wrap_collector_property_return( + extra, instantiated_class=collector_func[1]) getter = ' checkArguments("{property_name}",nargout,nargin{min1},' \ '{num_args});\n' \ @@ -1837,3 +1930,4 @@ class MatlabWrapper(CheckMixin, FormatMixin): self.generate_content(self.content, path) return self.content + diff --git a/matlab.h b/matlab.h index 7bfa62e50..7be5589dd 100644 --- a/matlab.h +++ b/matlab.h @@ -228,8 +228,22 @@ mxArray* wrap(const gtsam::Matrix& A) { return wrap_Matrix(A); } +template +mxArray* wrap_enum(const T x, const std::string& classname) { + // create double array to store value in + mxArray* a = mxCreateDoubleMatrix(1, 1, mxREAL); + double* data = mxGetPr(a); + data[0] = static_cast(x); + + // convert to Matlab enumeration type + mxArray* result; + mexCallMATLAB(1, &result, 1, &a, classname.c_str()); + + return result; +} + //***************************************************************************** -// unwrapping MATLAB arrays into C++ basis types +// unwrapping MATLAB arrays into C++ basic types //***************************************************************************** // default unwrapping throws an error @@ -240,6 +254,22 @@ T unwrap(const mxArray* array) { return T(); } +template +shared_ptr unwrap_enum(const mxArray* array) { + // Make duplicate to remove const-ness + mxArray* a = mxDuplicateArray(array); + std::cout << "unwrap enum type: " << typeid(array).name() << std::endl; + + // convert void* to int32* array + mxArray* a_int32; + mexCallMATLAB(1, &a_int32, 1, &a, "int32"); + + // Get the value in the input array + int32_T* value = (int32_T*)mxGetData(a_int32); + // cast int32 to enum type + return std::make_shared(static_cast(*value)); +} + // specialization to string // expects a character array // Warning: relies on mxChar==char @@ -485,7 +515,7 @@ Class* unwrap_ptr(const mxArray* obj, const string& propertyName) { //template <> //Vector unwrap_shared_ptr(const mxArray* obj, const string& propertyName) { // bool unwrap_shared_ptr_Vector_attempted = false; -// BOOST_STATIC_ASSERT(unwrap_shared_ptr_Vector_attempted, "Vector cannot be unwrapped as a shared pointer"); +// static_assert(unwrap_shared_ptr_Vector_attempted, "Vector cannot be unwrapped as a shared pointer"); // return Vector(); //} @@ -493,7 +523,7 @@ Class* unwrap_ptr(const mxArray* obj, const string& propertyName) { //template <> //Matrix unwrap_shared_ptr(const mxArray* obj, const string& propertyName) { // bool unwrap_shared_ptr_Matrix_attempted = false; -// BOOST_STATIC_ASSERT(unwrap_shared_ptr_Matrix_attempted, "Matrix cannot be unwrapped as a shared pointer"); +// static_assert(unwrap_shared_ptr_Matrix_attempted, "Matrix cannot be unwrapped as a shared pointer"); // return Matrix(); //} diff --git a/tests/expected/matlab/+Pet/Kind.m b/tests/expected/matlab/+Pet/Kind.m new file mode 100644 index 000000000..0d1836feb --- /dev/null +++ b/tests/expected/matlab/+Pet/Kind.m @@ -0,0 +1,6 @@ +classdef Kind < uint32 + enumeration + Dog(0) + Cat(1) + end +end diff --git a/tests/expected/matlab/+gtsam/+MCU/Avengers.m b/tests/expected/matlab/+gtsam/+MCU/Avengers.m new file mode 100644 index 000000000..9daca71f5 --- /dev/null +++ b/tests/expected/matlab/+gtsam/+MCU/Avengers.m @@ -0,0 +1,9 @@ +classdef Avengers < uint32 + enumeration + CaptainAmerica(0) + IronMan(1) + Hulk(2) + Hawkeye(3) + Thor(4) + end +end diff --git a/tests/expected/matlab/+gtsam/+MCU/GotG.m b/tests/expected/matlab/+gtsam/+MCU/GotG.m new file mode 100644 index 000000000..78a80d2cd --- /dev/null +++ b/tests/expected/matlab/+gtsam/+MCU/GotG.m @@ -0,0 +1,9 @@ +classdef GotG < uint32 + enumeration + Starlord(0) + Gamorra(1) + Rocket(2) + Drax(3) + Groot(4) + end +end diff --git a/tests/expected/matlab/+gtsam/+OptimizerGaussNewtonParams/Verbosity.m b/tests/expected/matlab/+gtsam/+OptimizerGaussNewtonParams/Verbosity.m new file mode 100644 index 000000000..7b8264157 --- /dev/null +++ b/tests/expected/matlab/+gtsam/+OptimizerGaussNewtonParams/Verbosity.m @@ -0,0 +1,7 @@ +classdef Verbosity < uint32 + enumeration + SILENT(0) + SUMMARY(1) + VERBOSE(2) + end +end diff --git a/tests/expected/matlab/+gtsam/VerbosityLM.m b/tests/expected/matlab/+gtsam/VerbosityLM.m new file mode 100644 index 000000000..636585543 --- /dev/null +++ b/tests/expected/matlab/+gtsam/VerbosityLM.m @@ -0,0 +1,12 @@ +classdef VerbosityLM < uint32 + enumeration + SILENT(0) + SUMMARY(1) + TERMINATION(2) + LAMBDA(3) + TRYLAMBDA(4) + TRYCONFIG(5) + DAMPED(6) + TRYDELTA(7) + end +end diff --git a/tests/expected/matlab/Color.m b/tests/expected/matlab/Color.m new file mode 100644 index 000000000..bd18c4123 --- /dev/null +++ b/tests/expected/matlab/Color.m @@ -0,0 +1,7 @@ +classdef Color < uint32 + enumeration + Red(0) + Green(1) + Blue(2) + end +end diff --git a/tests/expected/matlab/enum_wrapper.cpp b/tests/expected/matlab/enum_wrapper.cpp new file mode 100644 index 000000000..9d041ee77 --- /dev/null +++ b/tests/expected/matlab/enum_wrapper.cpp @@ -0,0 +1,266 @@ +#include +#include + + + +typedef gtsam::Optimizer OptimizerGaussNewtonParams; + +typedef std::set*> Collector_Pet; +static Collector_Pet collector_Pet; +typedef std::set*> Collector_gtsamMCU; +static Collector_gtsamMCU collector_gtsamMCU; +typedef std::set*> Collector_gtsamOptimizerGaussNewtonParams; +static Collector_gtsamOptimizerGaussNewtonParams collector_gtsamOptimizerGaussNewtonParams; + + +void _deleteAllObjects() +{ + mstream mout; + std::streambuf *outbuf = std::cout.rdbuf(&mout); + + bool anyDeleted = false; + { for(Collector_Pet::iterator iter = collector_Pet.begin(); + iter != collector_Pet.end(); ) { + delete *iter; + collector_Pet.erase(iter++); + anyDeleted = true; + } } + { for(Collector_gtsamMCU::iterator iter = collector_gtsamMCU.begin(); + iter != collector_gtsamMCU.end(); ) { + delete *iter; + collector_gtsamMCU.erase(iter++); + anyDeleted = true; + } } + { for(Collector_gtsamOptimizerGaussNewtonParams::iterator iter = collector_gtsamOptimizerGaussNewtonParams.begin(); + iter != collector_gtsamOptimizerGaussNewtonParams.end(); ) { + delete *iter; + collector_gtsamOptimizerGaussNewtonParams.erase(iter++); + anyDeleted = true; + } } + + if(anyDeleted) + cout << + "WARNING: Wrap modules with variables in the workspace have been reloaded due to\n" + "calling destructors, call 'clear all' again if you plan to now recompile a wrap\n" + "module, so that your recompiled module is used instead of the old one." << endl; + std::cout.rdbuf(outbuf); +} + +void _enum_RTTIRegister() { + const mxArray *alreadyCreated = mexGetVariablePtr("global", "gtsam_enum_rttiRegistry_created"); + if(!alreadyCreated) { + std::map types; + + + + mxArray *registry = mexGetVariable("global", "gtsamwrap_rttiRegistry"); + if(!registry) + registry = mxCreateStructMatrix(1, 1, 0, NULL); + typedef std::pair StringPair; + for(const StringPair& rtti_matlab: types) { + int fieldId = mxAddField(registry, rtti_matlab.first.c_str()); + if(fieldId < 0) { + mexErrMsgTxt("gtsam wrap: Error indexing RTTI types, inheritance will not work correctly"); + } + mxArray *matlabName = mxCreateString(rtti_matlab.second.c_str()); + mxSetFieldByNumber(registry, 0, fieldId, matlabName); + } + if(mexPutVariable("global", "gtsamwrap_rttiRegistry", registry) != 0) { + mexErrMsgTxt("gtsam wrap: Error indexing RTTI types, inheritance will not work correctly"); + } + mxDestroyArray(registry); + + mxArray *newAlreadyCreated = mxCreateNumericMatrix(0, 0, mxINT8_CLASS, mxREAL); + if(mexPutVariable("global", "gtsam_enum_rttiRegistry_created", newAlreadyCreated) != 0) { + mexErrMsgTxt("gtsam wrap: Error indexing RTTI types, inheritance will not work correctly"); + } + mxDestroyArray(newAlreadyCreated); + } +} + +void Pet_collectorInsertAndMakeBase_0(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + mexAtExit(&_deleteAllObjects); + typedef std::shared_ptr Shared; + + Shared *self = *reinterpret_cast (mxGetData(in[0])); + collector_Pet.insert(self); +} + +void Pet_constructor_1(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + mexAtExit(&_deleteAllObjects); + typedef std::shared_ptr Shared; + + string& name = *unwrap_shared_ptr< string >(in[0], "ptr_string"); + std::shared_ptr type = unwrap_enum(in[1]); + Shared *self = new Shared(new Pet(name,*type)); + collector_Pet.insert(self); + out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL); + *reinterpret_cast (mxGetData(out[0])) = self; +} + +void Pet_deconstructor_2(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + typedef std::shared_ptr Shared; + checkArguments("delete_Pet",nargout,nargin,1); + Shared *self = *reinterpret_cast(mxGetData(in[0])); + Collector_Pet::iterator item; + item = collector_Pet.find(self); + if(item != collector_Pet.end()) { + collector_Pet.erase(item); + } + delete self; +} + +void Pet_get_name_3(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("name",nargout,nargin-1,0); + auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); + out[0] = wrap< string >(obj->name); +} + +void Pet_set_name_4(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("name",nargout,nargin-1,1); + auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); + string name = unwrap< string >(in[1]); + obj->name = name; +} + +void Pet_get_type_5(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("type",nargout,nargin-1,0); + auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); + out[0] = wrap_enum(obj->type,"Pet.Kind"); +} + +void Pet_set_type_6(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("type",nargout,nargin-1,1); + auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); + std::shared_ptr type = unwrap_enum(in[1]); + obj->type = *type; +} + +void gtsamMCU_collectorInsertAndMakeBase_7(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + mexAtExit(&_deleteAllObjects); + typedef std::shared_ptr Shared; + + Shared *self = *reinterpret_cast (mxGetData(in[0])); + collector_gtsamMCU.insert(self); +} + +void gtsamMCU_constructor_8(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + mexAtExit(&_deleteAllObjects); + typedef std::shared_ptr Shared; + + Shared *self = new Shared(new gtsam::MCU()); + collector_gtsamMCU.insert(self); + out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL); + *reinterpret_cast (mxGetData(out[0])) = self; +} + +void gtsamMCU_deconstructor_9(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + typedef std::shared_ptr Shared; + checkArguments("delete_gtsamMCU",nargout,nargin,1); + Shared *self = *reinterpret_cast(mxGetData(in[0])); + Collector_gtsamMCU::iterator item; + item = collector_gtsamMCU.find(self); + if(item != collector_gtsamMCU.end()) { + collector_gtsamMCU.erase(item); + } + delete self; +} + +void gtsamOptimizerGaussNewtonParams_collectorInsertAndMakeBase_10(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + mexAtExit(&_deleteAllObjects); + typedef std::shared_ptr> Shared; + + Shared *self = *reinterpret_cast (mxGetData(in[0])); + collector_gtsamOptimizerGaussNewtonParams.insert(self); +} + +void gtsamOptimizerGaussNewtonParams_deconstructor_11(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + typedef std::shared_ptr> Shared; + checkArguments("delete_gtsamOptimizerGaussNewtonParams",nargout,nargin,1); + Shared *self = *reinterpret_cast(mxGetData(in[0])); + Collector_gtsamOptimizerGaussNewtonParams::iterator item; + item = collector_gtsamOptimizerGaussNewtonParams.find(self); + if(item != collector_gtsamOptimizerGaussNewtonParams.end()) { + collector_gtsamOptimizerGaussNewtonParams.erase(item); + } + delete self; +} + +void gtsamOptimizerGaussNewtonParams_setVerbosity_12(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("setVerbosity",nargout,nargin-1,1); + auto obj = unwrap_shared_ptr>(in[0], "ptr_gtsamOptimizerGaussNewtonParams"); + std::shared_ptr::Verbosity> value = unwrap_enum::Verbosity>(in[1]); + obj->setVerbosity(*value); +} + + +void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + mstream mout; + std::streambuf *outbuf = std::cout.rdbuf(&mout); + + _enum_RTTIRegister(); + + int id = unwrap(in[0]); + + try { + switch(id) { + case 0: + Pet_collectorInsertAndMakeBase_0(nargout, out, nargin-1, in+1); + break; + case 1: + Pet_constructor_1(nargout, out, nargin-1, in+1); + break; + case 2: + Pet_deconstructor_2(nargout, out, nargin-1, in+1); + break; + case 3: + Pet_get_name_3(nargout, out, nargin-1, in+1); + break; + case 4: + Pet_set_name_4(nargout, out, nargin-1, in+1); + break; + case 5: + Pet_get_type_5(nargout, out, nargin-1, in+1); + break; + case 6: + Pet_set_type_6(nargout, out, nargin-1, in+1); + break; + case 7: + gtsamMCU_collectorInsertAndMakeBase_7(nargout, out, nargin-1, in+1); + break; + case 8: + gtsamMCU_constructor_8(nargout, out, nargin-1, in+1); + break; + case 9: + gtsamMCU_deconstructor_9(nargout, out, nargin-1, in+1); + break; + case 10: + gtsamOptimizerGaussNewtonParams_collectorInsertAndMakeBase_10(nargout, out, nargin-1, in+1); + break; + case 11: + gtsamOptimizerGaussNewtonParams_deconstructor_11(nargout, out, nargin-1, in+1); + break; + case 12: + gtsamOptimizerGaussNewtonParams_setVerbosity_12(nargout, out, nargin-1, in+1); + break; + } + } catch(const std::exception& e) { + mexErrMsgTxt(("Exception from gtsam:\n" + std::string(e.what()) + "\n").c_str()); + } + + std::cout.rdbuf(outbuf); +} diff --git a/tests/expected/matlab/special_cases_wrapper.cpp b/tests/expected/matlab/special_cases_wrapper.cpp index 0669b442e..565368c2c 100644 --- a/tests/expected/matlab/special_cases_wrapper.cpp +++ b/tests/expected/matlab/special_cases_wrapper.cpp @@ -204,14 +204,14 @@ void gtsamGeneralSFMFactorCal3Bundler_get_verbosity_11(int nargout, mxArray *out { checkArguments("verbosity",nargout,nargin-1,0); auto obj = unwrap_shared_ptr, gtsam::Point3>>(in[0], "ptr_gtsamGeneralSFMFactorCal3Bundler"); - out[0] = wrap_shared_ptr(std::make_shared, gtsam::Point3>::Verbosity>(obj->verbosity),"gtsam.GeneralSFMFactor, gtsam::Point3>.Verbosity", false); + out[0] = wrap_enum(obj->verbosity,"gtsam.GeneralSFMFactorCal3Bundler.Verbosity"); } void gtsamGeneralSFMFactorCal3Bundler_set_verbosity_12(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("verbosity",nargout,nargin-1,1); auto obj = unwrap_shared_ptr, gtsam::Point3>>(in[0], "ptr_gtsamGeneralSFMFactorCal3Bundler"); - std::shared_ptr, gtsam::Point3>::Verbosity> verbosity = unwrap_shared_ptr< gtsam::GeneralSFMFactor, gtsam::Point3>::Verbosity >(in[1], "ptr_gtsamGeneralSFMFactor, gtsam::Point3>Verbosity"); + std::shared_ptr, gtsam::Point3>::Verbosity> verbosity = unwrap_enum, gtsam::Point3>::Verbosity>(in[1]); obj->verbosity = *verbosity; } diff --git a/tests/test_matlab_wrapper.py b/tests/test_matlab_wrapper.py index 17b2dd11d..0ca95b66d 100644 --- a/tests/test_matlab_wrapper.py +++ b/tests/test_matlab_wrapper.py @@ -141,6 +141,32 @@ class TestWrap(unittest.TestCase): actual = osp.join(self.MATLAB_ACTUAL_DIR, file) self.compare_and_diff(file, actual) + def test_enum(self): + """Test interface file with only enum info.""" + file = osp.join(self.INTERFACE_DIR, 'enum.i') + + wrapper = MatlabWrapper( + module_name='enum', + top_module_namespace=['gtsam'], + ignore_classes=[''], + ) + + wrapper.wrap([file], path=self.MATLAB_ACTUAL_DIR) + + files = [ + 'enum_wrapper.cpp', + 'Color.m', + '+Pet/Kind.m', + '+gtsam/VerbosityLM.m', + '+gtsam/+MCU/Avengers.m', + '+gtsam/+MCU/GotG.m', + '+gtsam/+OptimizerGaussNewtonParams/Verbosity.m', + ] + + for file in files: + actual = osp.join(self.MATLAB_ACTUAL_DIR, file) + self.compare_and_diff(file, actual) + def test_templates(self): """Test interface file with template info.""" file = osp.join(self.INTERFACE_DIR, 'templates.i') From 80e0d4afe9414f5af23bab247a2df158a7da04b0 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 22 May 2023 16:04:22 -0400 Subject: [PATCH 25/78] matlab tests for enum wrapping --- matlab/gtsam_tests/testEnum.m | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 matlab/gtsam_tests/testEnum.m diff --git a/matlab/gtsam_tests/testEnum.m b/matlab/gtsam_tests/testEnum.m new file mode 100644 index 000000000..8e5e935f6 --- /dev/null +++ b/matlab/gtsam_tests/testEnum.m @@ -0,0 +1,12 @@ +% test Enum +import gtsam.*; + +params = GncLMParams(); + +EXPECT('Get lossType',params.lossType==GncLossType.TLS); + +params.lossType = GncLossType.GM; +EXPECT('Set lossType',params.lossType==GncLossType.GM); + +params.setLossType(GncLossType.TLS); +EXPECT('setLossType',params.lossType==GncLossType.TLS); From 30a39a0bdbb7f4116b0bb30f9ebd8995a78f8ca8 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 24 May 2023 12:12:22 -0400 Subject: [PATCH 26/78] Squashed 'wrap/' changes from 520dbca0f..2f136936d 2f136936d Merge pull request #159 from borglab/fix-matlab-enum d1da38776 fix pybind test 2a00e255b additional enum test and wrapper update to pass test f0076ec18 fixp python enum fixture a0c87e0df don't cast enum to shared ptr a6ad343af improve enum wrapping e0a504328 is_enum method in mixin 8d9d380c7 fix bug in fully qualified enum type 0491a8361 update docstrings to reflect update from basis to basic d1fb05c41 improve docs and clean up fdc1a00b8 rename Basis to Basic for basic c++ types 00ee34133 specify full namespace for enum-type arg f86724e30 add docstrings 38fb0e3a3 docs for enum wrapping functions 9d3bd43c0 add test fixtures git-subtree-dir: wrap git-subtree-split: 2f136936dbc33d9c3875952d6f0b29c43b8e26b4 --- gtwrap/interface_parser/tokens.py | 9 +- gtwrap/interface_parser/type.py | 30 +++-- gtwrap/matlab_wrapper/mixins.py | 26 ++++- gtwrap/matlab_wrapper/wrapper.py | 70 ++++++------ matlab.h | 16 ++- tests/expected/matlab/enum_wrapper.cpp | 108 +++++++++++++----- .../expected/matlab/special_cases_wrapper.cpp | 4 +- tests/expected/python/enum_pybind.cpp | 9 +- tests/fixtures/enum.i | 13 ++- tests/test_interface_parser.py | 4 +- 10 files changed, 190 insertions(+), 99 deletions(-) diff --git a/gtwrap/interface_parser/tokens.py b/gtwrap/interface_parser/tokens.py index 0f8d38d86..02e6d82f8 100644 --- a/gtwrap/interface_parser/tokens.py +++ b/gtwrap/interface_parser/tokens.py @@ -10,9 +10,10 @@ All the token definitions. Author: Duy Nguyen Ta, Fan Jiang, Matthew Sklar, Varun Agrawal, and Frank Dellaert """ -from pyparsing import (Keyword, Literal, OneOrMore, Or, # type: ignore - QuotedString, Suppress, Word, alphanums, alphas, - nestedExpr, nums, originalTextFor, printables) +from pyparsing import Or # type: ignore +from pyparsing import (Keyword, Literal, OneOrMore, QuotedString, Suppress, + Word, alphanums, alphas, nestedExpr, nums, + originalTextFor, printables) # rule for identifiers (e.g. variable names) IDENT = Word(alphas + '_', alphanums + '_') ^ Word(nums) @@ -52,7 +53,7 @@ CONST, VIRTUAL, CLASS, STATIC, PAIR, TEMPLATE, TYPEDEF, INCLUDE = map( ) ENUM = Keyword("enum") ^ Keyword("enum class") ^ Keyword("enum struct") NAMESPACE = Keyword("namespace") -BASIS_TYPES = map( +BASIC_TYPES = map( Keyword, [ "void", diff --git a/gtwrap/interface_parser/type.py b/gtwrap/interface_parser/type.py index deb2e2256..e56a2f015 100644 --- a/gtwrap/interface_parser/type.py +++ b/gtwrap/interface_parser/type.py @@ -17,15 +17,13 @@ from typing import List, Sequence, Union from pyparsing import ParseResults # type: ignore from pyparsing import Forward, Optional, Or, delimitedList -from .tokens import (BASIS_TYPES, CONST, IDENT, LOPBRACK, RAW_POINTER, REF, +from .tokens import (BASIC_TYPES, CONST, IDENT, LOPBRACK, RAW_POINTER, REF, ROPBRACK, SHARED_POINTER) class Typename: """ - Generic type which can be either a basic type or a class type, - similar to C++'s `typename` aka a qualified dependent type. - Contains type name with full namespace and template arguments. + Class which holds a type's name, full namespace, and template arguments. E.g. ``` @@ -89,7 +87,6 @@ class Typename: def to_cpp(self) -> str: """Generate the C++ code for wrapping.""" - idx = 1 if self.namespaces and not self.namespaces[0] else 0 if self.instantiations: cpp_name = self.name + "<{}>".format(", ".join( [inst.to_cpp() for inst in self.instantiations])) @@ -116,7 +113,7 @@ class BasicType: """ Basic types are the fundamental built-in types in C++ such as double, int, char, etc. - When using templates, the basis type will take on the same form as the template. + When using templates, the basic type will take on the same form as the template. E.g. ``` @@ -127,16 +124,16 @@ class BasicType: will give ``` - m_.def("CoolFunctionDoubleDouble",[](const double& s) { - return wrap_example::CoolFunction(s); - }, py::arg("s")); + m_.def("funcDouble",[](const double& x){ + ::func(x); + }, py::arg("x")); ``` """ - rule = (Or(BASIS_TYPES)("typename")).setParseAction(lambda t: BasicType(t)) + rule = (Or(BASIC_TYPES)("typename")).setParseAction(lambda t: BasicType(t)) def __init__(self, t: ParseResults): - self.typename = Typename(t.asList()) + self.typename = Typename(t) class CustomType: @@ -160,9 +157,9 @@ class CustomType: class Type: """ - Parsed datatype, can be either a fundamental type or a custom datatype. + Parsed datatype, can be either a fundamental/basic type or a custom datatype. E.g. void, double, size_t, Matrix. - Think of this as a high-level type which encodes the typename and other + Think of this as a high-level type which encodes the typename and other characteristics of the type. The type can optionally be a raw pointer, shared pointer or reference. @@ -170,7 +167,7 @@ class Type: """ rule = ( Optional(CONST("is_const")) # - + (BasicType.rule("basis") | CustomType.rule("qualified")) # BR + + (BasicType.rule("basic") | CustomType.rule("qualified")) # BR + Optional( SHARED_POINTER("is_shared_ptr") | RAW_POINTER("is_ptr") | REF("is_ref")) # @@ -188,9 +185,10 @@ class Type: @staticmethod def from_parse_result(t: ParseResults): """Return the resulting Type from parsing the source.""" - if t.basis: + # If the type is a basic/fundamental c++ type (e.g int, bool) + if t.basic: return Type( - typename=t.basis.typename, + typename=t.basic.typename, is_const=t.is_const, is_shared_ptr=t.is_shared_ptr, is_ptr=t.is_ptr, diff --git a/gtwrap/matlab_wrapper/mixins.py b/gtwrap/matlab_wrapper/mixins.py index ed5c5dbc6..df4de98f3 100644 --- a/gtwrap/matlab_wrapper/mixins.py +++ b/gtwrap/matlab_wrapper/mixins.py @@ -61,9 +61,29 @@ class CheckMixin: arg_type.is_ref def is_class_enum(self, arg_type: parser.Type, class_: parser.Class): - """Check if `arg_type` is an enum in the class `class_`.""" - enums = (enum.name for enum in class_.enums) - return arg_type.ctype.typename.name in enums + """Check if arg_type is an enum in the class `class_`.""" + if class_: + class_enums = [enum.name for enum in class_.enums] + return arg_type.typename.name in class_enums + else: + return False + + def is_global_enum(self, arg_type: parser.Type, class_: parser.Class): + """Check if arg_type is a global enum.""" + if class_: + # Get the enums in the class' namespace + global_enums = [ + member.name for member in class_.parent.content + if isinstance(member, parser.Enum) + ] + return arg_type.typename.name in global_enums + else: + return False + + def is_enum(self, arg_type: parser.Type, class_: parser.Class): + """Check if `arg_type` is an enum.""" + return self.is_class_enum(arg_type, class_) or self.is_global_enum( + arg_type, class_) class FormatMixin: diff --git a/gtwrap/matlab_wrapper/wrapper.py b/gtwrap/matlab_wrapper/wrapper.py index c2a8468c1..146209c44 100755 --- a/gtwrap/matlab_wrapper/wrapper.py +++ b/gtwrap/matlab_wrapper/wrapper.py @@ -341,23 +341,14 @@ class MatlabWrapper(CheckMixin, FormatMixin): return check_statement - def _unwrap_argument(self, - arg, - arg_id=0, - constructor=False, - instantiated_class=None): + def _unwrap_argument(self, arg, arg_id=0, instantiated_class=None): ctype_camel = self._format_type_name(arg.ctype.typename, separator='') ctype_sep = self._format_type_name(arg.ctype.typename) if instantiated_class and \ - self.is_class_enum(arg, instantiated_class): - - if instantiated_class.original.template: - enum_type = f"{arg.ctype.typename}" - else: - enum_type = f"{instantiated_class.name}::{arg.ctype}" - - arg_type = f"std::shared_ptr<{enum_type}>" + self.is_enum(arg.ctype, instantiated_class): + enum_type = f"{arg.ctype.typename}" + arg_type = f"{enum_type}" unwrap = f'unwrap_enum<{enum_type}>(in[{arg_id}]);' elif self.is_ref(arg.ctype): # and not constructor: @@ -390,7 +381,6 @@ class MatlabWrapper(CheckMixin, FormatMixin): def _wrapper_unwrap_arguments(self, args, arg_id=0, - constructor=False, instantiated_class=None): """Format the interface_parser.Arguments. @@ -403,10 +393,7 @@ class MatlabWrapper(CheckMixin, FormatMixin): for arg in args.list(): arg_type, unwrap = self._unwrap_argument( - arg, - arg_id, - constructor, - instantiated_class=instantiated_class) + arg, arg_id, instantiated_class=instantiated_class) body_args += textwrap.indent(textwrap.dedent('''\ {arg_type} {name} = {unwrap} @@ -428,7 +415,8 @@ class MatlabWrapper(CheckMixin, FormatMixin): continue if not self.is_ref(arg.ctype) and (self.is_shared_ptr(arg.ctype) or \ - self.is_ptr(arg.ctype) or self.can_be_pointer(arg.ctype))and \ + self.is_ptr(arg.ctype) or self.can_be_pointer(arg.ctype)) and \ + not self.is_enum(arg.ctype, instantiated_class) and \ arg.ctype.typename.name not in self.ignore_namespace: if arg.ctype.is_shared_ptr: call_type = arg.ctype.is_shared_ptr @@ -1147,7 +1135,7 @@ class MatlabWrapper(CheckMixin, FormatMixin): def wrap_enum(self, enum): """ - Wrap an enum definition. + Wrap an enum definition as a Matlab class. Args: enum: The interface_parser.Enum instance @@ -1285,15 +1273,23 @@ class MatlabWrapper(CheckMixin, FormatMixin): def _collector_return(self, obj: str, ctype: parser.Type, - class_property: parser.Variable = None, instantiated_class: InstantiatedClass = None): """Helper method to get the final statement before the return in the collector function.""" expanded = '' - if class_property and instantiated_class and \ - self.is_class_enum(class_property, instantiated_class): - class_name = ".".join(instantiated_class.namespaces()[1:] + [instantiated_class.name]) - enum_type = f"{class_name}.{ctype.typename.name}" + if instantiated_class and \ + self.is_enum(ctype, instantiated_class): + if self.is_class_enum(ctype, instantiated_class): + class_name = ".".join(instantiated_class.namespaces()[1:] + + [instantiated_class.name]) + else: + # Get the full namespace + class_name = ".".join(instantiated_class.parent.full_namespaces()[1:]) + + if class_name != "": + class_name += '.' + + enum_type = f"{class_name}{ctype.typename.name}" expanded = textwrap.indent( f'out[0] = wrap_enum({obj},\"{enum_type}\");', prefix=' ') @@ -1340,13 +1336,14 @@ class MatlabWrapper(CheckMixin, FormatMixin): return expanded - def wrap_collector_function_return(self, method): + def wrap_collector_function_return(self, method, instantiated_class=None): """ Wrap the complete return type of the function. """ expanded = '' - params = self._wrapper_unwrap_arguments(method.args, arg_id=1)[0] + params = self._wrapper_unwrap_arguments( + method.args, arg_id=1, instantiated_class=instantiated_class)[0] return_1 = method.return_type.type1 return_count = self._return_count(method.return_type) @@ -1382,7 +1379,8 @@ class MatlabWrapper(CheckMixin, FormatMixin): if return_1_name != 'void': if return_count == 1: - expanded += self._collector_return(obj, return_1) + expanded += self._collector_return( + obj, return_1, instantiated_class=instantiated_class) elif return_count == 2: return_2 = method.return_type.type2 @@ -1405,10 +1403,8 @@ class MatlabWrapper(CheckMixin, FormatMixin): property_name = class_property.name obj = 'obj->{}'.format(property_name) - ctype = class_property.ctype return self._collector_return(obj, - ctype, - class_property=class_property, + class_property.ctype, instantiated_class=instantiated_class) def wrap_collector_function_upcast_from_void(self, class_name, func_id, @@ -1468,9 +1464,7 @@ class MatlabWrapper(CheckMixin, FormatMixin): elif collector_func[2] == 'constructor': base = '' params, body_args = self._wrapper_unwrap_arguments( - extra.args, - constructor=True, - instantiated_class=collector_func[1]) + extra.args, instantiated_class=collector_func[1]) if collector_func[1].parent_class: base += textwrap.indent(textwrap.dedent(''' @@ -1534,7 +1528,9 @@ class MatlabWrapper(CheckMixin, FormatMixin): extra.args, arg_id=1 if is_method else 0, instantiated_class=collector_func[1]) - return_body = self.wrap_collector_function_return(extra) + + return_body = self.wrap_collector_function_return( + extra, collector_func[1]) shared_obj = '' @@ -1591,7 +1587,8 @@ class MatlabWrapper(CheckMixin, FormatMixin): # Setter if "_set_" in method_name: - is_ptr_type = self.can_be_pointer(extra.ctype) + is_ptr_type = self.can_be_pointer(extra.ctype) and \ + not self.is_enum(extra.ctype, collector_func[1]) return_body = ' obj->{0} = {1}{0};'.format( extra.name, '*' if is_ptr_type else '') @@ -1930,4 +1927,3 @@ class MatlabWrapper(CheckMixin, FormatMixin): self.generate_content(self.content, path) return self.content - diff --git a/matlab.h b/matlab.h index 7be5589dd..f44294770 100644 --- a/matlab.h +++ b/matlab.h @@ -118,10 +118,10 @@ void checkArguments(const string& name, int nargout, int nargin, int expected) { } //***************************************************************************** -// wrapping C++ basis types in MATLAB arrays +// wrapping C++ basic types in MATLAB arrays //***************************************************************************** -// default wrapping throws an error: only basis types are allowed in wrap +// default wrapping throws an error: only basic types are allowed in wrap template mxArray* wrap(const Class& value) { error("wrap internal error: attempted wrap of invalid type"); @@ -228,6 +228,10 @@ mxArray* wrap(const gtsam::Matrix& A) { return wrap_Matrix(A); } +/// @brief Wrap the C++ enum to Matlab mxArray +/// @tparam T The C++ enum type +/// @param x C++ enum +/// @param classname Matlab enum classdef used to call Matlab constructor template mxArray* wrap_enum(const T x, const std::string& classname) { // create double array to store value in @@ -254,11 +258,13 @@ T unwrap(const mxArray* array) { return T(); } +/// @brief Unwrap from matlab array to C++ enum type +/// @tparam T The C++ enum type +/// @param array Matlab mxArray template -shared_ptr unwrap_enum(const mxArray* array) { +T unwrap_enum(const mxArray* array) { // Make duplicate to remove const-ness mxArray* a = mxDuplicateArray(array); - std::cout << "unwrap enum type: " << typeid(array).name() << std::endl; // convert void* to int32* array mxArray* a_int32; @@ -267,7 +273,7 @@ shared_ptr unwrap_enum(const mxArray* array) { // Get the value in the input array int32_T* value = (int32_T*)mxGetData(a_int32); // cast int32 to enum type - return std::make_shared(static_cast(*value)); + return static_cast(*value); } // specialization to string diff --git a/tests/expected/matlab/enum_wrapper.cpp b/tests/expected/matlab/enum_wrapper.cpp index 9d041ee77..4860f9b8d 100644 --- a/tests/expected/matlab/enum_wrapper.cpp +++ b/tests/expected/matlab/enum_wrapper.cpp @@ -93,8 +93,8 @@ void Pet_constructor_1(int nargout, mxArray *out[], int nargin, const mxArray *i typedef std::shared_ptr Shared; string& name = *unwrap_shared_ptr< string >(in[0], "ptr_string"); - std::shared_ptr type = unwrap_enum(in[1]); - Shared *self = new Shared(new Pet(name,*type)); + Pet::Kind type = unwrap_enum(in[1]); + Shared *self = new Shared(new Pet(name,type)); collector_Pet.insert(self); out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL); *reinterpret_cast (mxGetData(out[0])) = self; @@ -113,14 +113,29 @@ void Pet_deconstructor_2(int nargout, mxArray *out[], int nargin, const mxArray delete self; } -void Pet_get_name_3(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Pet_getColor_3(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("getColor",nargout,nargin-1,0); + auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); + out[0] = wrap_enum(obj->getColor(),"Color"); +} + +void Pet_setColor_4(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("setColor",nargout,nargin-1,1); + auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); + Color color = unwrap_enum(in[1]); + obj->setColor(color); +} + +void Pet_get_name_5(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("name",nargout,nargin-1,0); auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); out[0] = wrap< string >(obj->name); } -void Pet_set_name_4(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Pet_set_name_6(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("name",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); @@ -128,22 +143,22 @@ void Pet_set_name_4(int nargout, mxArray *out[], int nargin, const mxArray *in[] obj->name = name; } -void Pet_get_type_5(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Pet_get_type_7(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("type",nargout,nargin-1,0); auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); out[0] = wrap_enum(obj->type,"Pet.Kind"); } -void Pet_set_type_6(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Pet_set_type_8(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("type",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Pet"); - std::shared_ptr type = unwrap_enum(in[1]); - obj->type = *type; + Pet::Kind type = unwrap_enum(in[1]); + obj->type = type; } -void gtsamMCU_collectorInsertAndMakeBase_7(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void gtsamMCU_collectorInsertAndMakeBase_9(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef std::shared_ptr Shared; @@ -152,7 +167,7 @@ void gtsamMCU_collectorInsertAndMakeBase_7(int nargout, mxArray *out[], int narg collector_gtsamMCU.insert(self); } -void gtsamMCU_constructor_8(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void gtsamMCU_constructor_10(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef std::shared_ptr Shared; @@ -163,7 +178,7 @@ void gtsamMCU_constructor_8(int nargout, mxArray *out[], int nargin, const mxArr *reinterpret_cast (mxGetData(out[0])) = self; } -void gtsamMCU_deconstructor_9(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void gtsamMCU_deconstructor_11(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef std::shared_ptr Shared; checkArguments("delete_gtsamMCU",nargout,nargin,1); @@ -176,7 +191,7 @@ void gtsamMCU_deconstructor_9(int nargout, mxArray *out[], int nargin, const mxA delete self; } -void gtsamOptimizerGaussNewtonParams_collectorInsertAndMakeBase_10(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void gtsamOptimizerGaussNewtonParams_collectorInsertAndMakeBase_12(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef std::shared_ptr> Shared; @@ -185,7 +200,19 @@ void gtsamOptimizerGaussNewtonParams_collectorInsertAndMakeBase_10(int nargout, collector_gtsamOptimizerGaussNewtonParams.insert(self); } -void gtsamOptimizerGaussNewtonParams_deconstructor_11(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void gtsamOptimizerGaussNewtonParams_constructor_13(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + mexAtExit(&_deleteAllObjects); + typedef std::shared_ptr> Shared; + + Optimizer::Verbosity verbosity = unwrap_enum::Verbosity>(in[0]); + Shared *self = new Shared(new gtsam::Optimizer(verbosity)); + collector_gtsamOptimizerGaussNewtonParams.insert(self); + out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL); + *reinterpret_cast (mxGetData(out[0])) = self; +} + +void gtsamOptimizerGaussNewtonParams_deconstructor_14(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef std::shared_ptr> Shared; checkArguments("delete_gtsamOptimizerGaussNewtonParams",nargout,nargin,1); @@ -198,12 +225,26 @@ void gtsamOptimizerGaussNewtonParams_deconstructor_11(int nargout, mxArray *out[ delete self; } -void gtsamOptimizerGaussNewtonParams_setVerbosity_12(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void gtsamOptimizerGaussNewtonParams_getVerbosity_15(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("getVerbosity",nargout,nargin-1,0); + auto obj = unwrap_shared_ptr>(in[0], "ptr_gtsamOptimizerGaussNewtonParams"); + out[0] = wrap_enum(obj->getVerbosity(),"gtsam.OptimizerGaussNewtonParams.Verbosity"); +} + +void gtsamOptimizerGaussNewtonParams_getVerbosity_16(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("getVerbosity",nargout,nargin-1,0); + auto obj = unwrap_shared_ptr>(in[0], "ptr_gtsamOptimizerGaussNewtonParams"); + out[0] = wrap_enum(obj->getVerbosity(),"gtsam.VerbosityLM"); +} + +void gtsamOptimizerGaussNewtonParams_setVerbosity_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("setVerbosity",nargout,nargin-1,1); auto obj = unwrap_shared_ptr>(in[0], "ptr_gtsamOptimizerGaussNewtonParams"); - std::shared_ptr::Verbosity> value = unwrap_enum::Verbosity>(in[1]); - obj->setVerbosity(*value); + Optimizer::Verbosity value = unwrap_enum::Verbosity>(in[1]); + obj->setVerbosity(value); } @@ -228,34 +269,49 @@ void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[]) Pet_deconstructor_2(nargout, out, nargin-1, in+1); break; case 3: - Pet_get_name_3(nargout, out, nargin-1, in+1); + Pet_getColor_3(nargout, out, nargin-1, in+1); break; case 4: - Pet_set_name_4(nargout, out, nargin-1, in+1); + Pet_setColor_4(nargout, out, nargin-1, in+1); break; case 5: - Pet_get_type_5(nargout, out, nargin-1, in+1); + Pet_get_name_5(nargout, out, nargin-1, in+1); break; case 6: - Pet_set_type_6(nargout, out, nargin-1, in+1); + Pet_set_name_6(nargout, out, nargin-1, in+1); break; case 7: - gtsamMCU_collectorInsertAndMakeBase_7(nargout, out, nargin-1, in+1); + Pet_get_type_7(nargout, out, nargin-1, in+1); break; case 8: - gtsamMCU_constructor_8(nargout, out, nargin-1, in+1); + Pet_set_type_8(nargout, out, nargin-1, in+1); break; case 9: - gtsamMCU_deconstructor_9(nargout, out, nargin-1, in+1); + gtsamMCU_collectorInsertAndMakeBase_9(nargout, out, nargin-1, in+1); break; case 10: - gtsamOptimizerGaussNewtonParams_collectorInsertAndMakeBase_10(nargout, out, nargin-1, in+1); + gtsamMCU_constructor_10(nargout, out, nargin-1, in+1); break; case 11: - gtsamOptimizerGaussNewtonParams_deconstructor_11(nargout, out, nargin-1, in+1); + gtsamMCU_deconstructor_11(nargout, out, nargin-1, in+1); break; case 12: - gtsamOptimizerGaussNewtonParams_setVerbosity_12(nargout, out, nargin-1, in+1); + gtsamOptimizerGaussNewtonParams_collectorInsertAndMakeBase_12(nargout, out, nargin-1, in+1); + break; + case 13: + gtsamOptimizerGaussNewtonParams_constructor_13(nargout, out, nargin-1, in+1); + break; + case 14: + gtsamOptimizerGaussNewtonParams_deconstructor_14(nargout, out, nargin-1, in+1); + break; + case 15: + gtsamOptimizerGaussNewtonParams_getVerbosity_15(nargout, out, nargin-1, in+1); + break; + case 16: + gtsamOptimizerGaussNewtonParams_getVerbosity_16(nargout, out, nargin-1, in+1); + break; + case 17: + gtsamOptimizerGaussNewtonParams_setVerbosity_17(nargout, out, nargin-1, in+1); break; } } catch(const std::exception& e) { diff --git a/tests/expected/matlab/special_cases_wrapper.cpp b/tests/expected/matlab/special_cases_wrapper.cpp index 565368c2c..2fe55ec01 100644 --- a/tests/expected/matlab/special_cases_wrapper.cpp +++ b/tests/expected/matlab/special_cases_wrapper.cpp @@ -211,8 +211,8 @@ void gtsamGeneralSFMFactorCal3Bundler_set_verbosity_12(int nargout, mxArray *out { checkArguments("verbosity",nargout,nargin-1,1); auto obj = unwrap_shared_ptr, gtsam::Point3>>(in[0], "ptr_gtsamGeneralSFMFactorCal3Bundler"); - std::shared_ptr, gtsam::Point3>::Verbosity> verbosity = unwrap_enum, gtsam::Point3>::Verbosity>(in[1]); - obj->verbosity = *verbosity; + gtsam::GeneralSFMFactor, gtsam::Point3>::Verbosity verbosity = unwrap_enum, gtsam::Point3>::Verbosity>(in[1]); + obj->verbosity = verbosity; } diff --git a/tests/expected/python/enum_pybind.cpp b/tests/expected/python/enum_pybind.cpp index 2fa804ac9..c67bf1de0 100644 --- a/tests/expected/python/enum_pybind.cpp +++ b/tests/expected/python/enum_pybind.cpp @@ -23,7 +23,9 @@ PYBIND11_MODULE(enum_py, m_) { py::class_> pet(m_, "Pet"); pet - .def(py::init(), py::arg("name"), py::arg("type")) + .def(py::init(), py::arg("name"), py::arg("type")) + .def("setColor",[](Pet* self, const Color& color){ self->setColor(color);}, py::arg("color")) + .def("getColor",[](Pet* self){return self->getColor();}) .def_readwrite("name", &Pet::name) .def_readwrite("type", &Pet::type); @@ -65,7 +67,10 @@ PYBIND11_MODULE(enum_py, m_) { py::class_, std::shared_ptr>> optimizergaussnewtonparams(m_gtsam, "OptimizerGaussNewtonParams"); optimizergaussnewtonparams - .def("setVerbosity",[](gtsam::Optimizer* self, const Optimizer::Verbosity value){ self->setVerbosity(value);}, py::arg("value")); + .def(py::init::Verbosity&>(), py::arg("verbosity")) + .def("setVerbosity",[](gtsam::Optimizer* self, const Optimizer::Verbosity value){ self->setVerbosity(value);}, py::arg("value")) + .def("getVerbosity",[](gtsam::Optimizer* self){return self->getVerbosity();}) + .def("getVerbosity",[](gtsam::Optimizer* self){return self->getVerbosity();}); py::enum_::Verbosity>(optimizergaussnewtonparams, "Verbosity", py::arithmetic()) .value("SILENT", gtsam::Optimizer::Verbosity::SILENT) diff --git a/tests/fixtures/enum.i b/tests/fixtures/enum.i index 71918c25a..6e70d9c57 100644 --- a/tests/fixtures/enum.i +++ b/tests/fixtures/enum.i @@ -3,13 +3,16 @@ enum Color { Red, Green, Blue }; class Pet { enum Kind { Dog, Cat }; - Pet(const string &name, Kind type); + Pet(const string &name, Pet::Kind type); + void setColor(const Color& color); + Color getColor() const; string name; - Kind type; + Pet::Kind type; }; namespace gtsam { +// Test global enums enum VerbosityLM { SILENT, SUMMARY, @@ -21,6 +24,7 @@ enum VerbosityLM { TRYDELTA }; +// Test multiple enums in a classs class MCU { MCU(); @@ -50,7 +54,12 @@ class Optimizer { VERBOSE }; + Optimizer(const This::Verbosity& verbosity); + void setVerbosity(const This::Verbosity value); + + gtsam::Optimizer::Verbosity getVerbosity() const; + gtsam::VerbosityLM getVerbosity() const; }; typedef gtsam::Optimizer OptimizerGaussNewtonParams; diff --git a/tests/test_interface_parser.py b/tests/test_interface_parser.py index 19462a51a..45415995f 100644 --- a/tests/test_interface_parser.py +++ b/tests/test_interface_parser.py @@ -38,7 +38,7 @@ class TestInterfaceParser(unittest.TestCase): def test_basic_type(self): """Tests for BasicType.""" - # Check basis type + # Check basic type t = Type.rule.parseString("int x")[0] self.assertEqual("int", t.typename.name) self.assertTrue(t.is_basic) @@ -243,7 +243,7 @@ class TestInterfaceParser(unittest.TestCase): self.assertEqual("void", return_type.type1.typename.name) self.assertTrue(return_type.type1.is_basic) - # Test basis type + # Test basic type return_type = ReturnType.rule.parseString("size_t")[0] self.assertEqual("size_t", return_type.type1.typename.name) self.assertTrue(not return_type.type2) From 20ba6b41dd7f956975e1a312041fa399684d81e4 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Fri, 26 May 2023 15:05:30 -0400 Subject: [PATCH 27/78] fix geometry wrapper --- gtsam/geometry/geometry.i | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index 630f6d252..0710959bc 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -575,13 +575,13 @@ class Unit3 { gtsam::Point3 point3() const; gtsam::Point3 point3(Eigen::Ref H) const; - Vector3 unitVector() const; - Vector3 unitVector(Eigen::Ref H) const; + gtsam::Vector3 unitVector() const; + gtsam::Vector3 unitVector(Eigen::Ref H) const; double dot(const gtsam::Unit3& q) const; double dot(const gtsam::Unit3& q, Eigen::Ref H1, Eigen::Ref H2) const; - Vector2 errorVector(const gtsam::Unit3& q) const; - Vector2 errorVector(const gtsam::Unit3& q, Eigen::Ref H_p, + gtsam::Vector2 errorVector(const gtsam::Unit3& q) const; + gtsam::Vector2 errorVector(const gtsam::Unit3& q, Eigen::Ref H_p, Eigen::Ref H_q) const; // Manifold From 9fb651d8708e396a5b82aedf948be39a39f5b7f4 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Fri, 26 May 2023 15:37:18 -0400 Subject: [PATCH 28/78] additional matlab test --- matlab/gtsam_tests/testCal3Unified.m | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/matlab/gtsam_tests/testCal3Unified.m b/matlab/gtsam_tests/testCal3Unified.m index 498c65343..ec5bff871 100644 --- a/matlab/gtsam_tests/testCal3Unified.m +++ b/matlab/gtsam_tests/testCal3Unified.m @@ -5,3 +5,8 @@ K = Cal3Unified; EXPECT('fx',K.fx()==1); EXPECT('fy',K.fy()==1); +params = PreintegrationParams.MakeSharedU(-9.81); +%params.getOmegaCoriolis() + +expectedBodyPSensor = gtsam.Pose3(gtsam.Rot3(0, 0, 0, 0, 0, 0, 0, 0, 0), gtsam.Point3(0, 0, 0)); +EXPECT('getBodyPSensor', expectedBodyPSensor.equals(params.getBodyPSensor(), 1e-9)); From c55772801f691584bb45d86f4fdc0386a8aaa1bd Mon Sep 17 00:00:00 2001 From: Yoonwoo Kim Date: Sun, 28 May 2023 13:08:15 +0900 Subject: [PATCH 29/78] Fixed build issue, added more detailed explanation of the TableFactor. --- gtsam/discrete/TableFactor.cpp | 11 ----------- gtsam/discrete/TableFactor.h | 8 ++++---- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/gtsam/discrete/TableFactor.cpp b/gtsam/discrete/TableFactor.cpp index c852afdc2..e79f32bbc 100644 --- a/gtsam/discrete/TableFactor.cpp +++ b/gtsam/discrete/TableFactor.cpp @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -58,16 +57,6 @@ namespace gtsam { sort(sorted_dkeys_.begin(), sorted_dkeys_.end()); } - /* ************************************************************************ */ - TableFactor::TableFactor(const SparseDiscreteConditional& c) - : DiscreteFactor(c.keys()), - sparse_table_(c.sparse_table_), - denominators_(c.denominators_) { - cardinalities_ = c.cardinalities_; - sorted_dkeys_ = discreteKeys(); - sort(sorted_dkeys_.begin(), sorted_dkeys_.end()); - } - /* ************************************************************************ */ Eigen::SparseVector TableFactor::Convert( const std::vector& table) { diff --git a/gtsam/discrete/TableFactor.h b/gtsam/discrete/TableFactor.h index 1a328eabf..59d601537 100644 --- a/gtsam/discrete/TableFactor.h +++ b/gtsam/discrete/TableFactor.h @@ -32,12 +32,14 @@ namespace gtsam { - class SparseDiscreteConditional; class HybridValues; /** * A discrete probabilistic factor optimized for sparsity. - * + * Uses sparse_table_ to store only the non-zero probabilities. + * Computes the assigned value for the key using the ordering which the + * non-zero probabilties are stored in. + * * @ingroup discrete */ class GTSAM_EXPORT TableFactor : public DiscreteFactor { @@ -129,8 +131,6 @@ namespace gtsam { TableFactor(const DiscreteKey& key, const std::vector& row) : TableFactor(DiscreteKeys{key}, row) {} - /** Construct from a DiscreteTableConditional type */ - explicit TableFactor(const SparseDiscreteConditional& c); /// @} /// @name Testable From 361f9fa391b33b9894553e6f1671715c8dfb0ba7 Mon Sep 17 00:00:00 2001 From: Yoonwoo Kim Date: Mon, 29 May 2023 00:28:03 +0900 Subject: [PATCH 30/78] added one line comments for variables. --- gtsam/discrete/TableFactor.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gtsam/discrete/TableFactor.h b/gtsam/discrete/TableFactor.h index 59d601537..c565cbe6b 100644 --- a/gtsam/discrete/TableFactor.h +++ b/gtsam/discrete/TableFactor.h @@ -36,23 +36,23 @@ namespace gtsam { /** * A discrete probabilistic factor optimized for sparsity. - * Uses sparse_table_ to store only the non-zero probabilities. + * Uses sparse_table_ to store only the nonzero probabilities. * Computes the assigned value for the key using the ordering which the - * non-zero probabilties are stored in. + * nonzero probabilties are stored in. (lazy cartesian product) * * @ingroup discrete */ class GTSAM_EXPORT TableFactor : public DiscreteFactor { protected: - std::map cardinalities_; - Eigen::SparseVector sparse_table_; + std::map cardinalities_; /// Map of Keys and their cardinalities. + Eigen::SparseVector sparse_table_; /// SparseVector of nonzero probabilities. private: - std::map denominators_; - DiscreteKeys sorted_dkeys_; + std::map denominators_; /// Map of Keys and their denominators used in keyValueForIndex. + DiscreteKeys sorted_dkeys_; /// Sorted DiscreteKeys to use internally. /** - * @brief Finds nth entry in the cartesian product of arrays in O(1) + * @brief Uses lazy cartesian product to find nth entry in the cartesian product of arrays in O(1) * Example) * v0 | v1 | val * 0 | 0 | 10 From 7b3ce2fe3400a74ae4bd0a8eca518f27d815857f Mon Sep 17 00:00:00 2001 From: Yoonwoo Kim Date: Mon, 29 May 2023 01:17:50 +0900 Subject: [PATCH 31/78] added doc for disceteKey in .h file, formatted in Google style. --- gtsam/discrete/TableFactor.cpp | 893 ++++++++++++++++----------------- gtsam/discrete/TableFactor.h | 503 ++++++++++--------- 2 files changed, 702 insertions(+), 694 deletions(-) diff --git a/gtsam/discrete/TableFactor.cpp b/gtsam/discrete/TableFactor.cpp index e79f32bbc..acb59a8be 100644 --- a/gtsam/discrete/TableFactor.cpp +++ b/gtsam/discrete/TableFactor.cpp @@ -16,10 +16,10 @@ * @author Yoonwoo Kim */ -#include #include -#include +#include #include +#include #include #include @@ -28,528 +28,527 @@ using namespace std; namespace gtsam { - /* ************************************************************************ */ - TableFactor::TableFactor() {} +/* ************************************************************************ */ +TableFactor::TableFactor() {} - /* ************************************************************************ */ - TableFactor::TableFactor(const DiscreteKeys& dkeys, - const TableFactor& potentials) - : DiscreteFactor(dkeys.indices()), - cardinalities_(potentials .cardinalities_) { +/* ************************************************************************ */ +TableFactor::TableFactor(const DiscreteKeys& dkeys, + const TableFactor& potentials) + : DiscreteFactor(dkeys.indices()), + cardinalities_(potentials.cardinalities_) { sparse_table_ = potentials.sparse_table_; denominators_ = potentials.denominators_; sorted_dkeys_ = discreteKeys(); sort(sorted_dkeys_.begin(), sorted_dkeys_.end()); - } +} - /* ************************************************************************ */ - TableFactor::TableFactor(const DiscreteKeys& dkeys, - const Eigen::SparseVector& table) - : DiscreteFactor(dkeys.indices()), sparse_table_(table.size()) { - sparse_table_ = table; - double denom = table.size(); - for (const DiscreteKey& dkey : dkeys) { - cardinalities_.insert(dkey); - denom /= dkey.second; - denominators_.insert(std::pair(dkey.first, denom)); - } - sorted_dkeys_ = discreteKeys(); - sort(sorted_dkeys_.begin(), sorted_dkeys_.end()); +/* ************************************************************************ */ +TableFactor::TableFactor(const DiscreteKeys& dkeys, + const Eigen::SparseVector& table) + : DiscreteFactor(dkeys.indices()), sparse_table_(table.size()) { + sparse_table_ = table; + double denom = table.size(); + for (const DiscreteKey& dkey : dkeys) { + cardinalities_.insert(dkey); + denom /= dkey.second; + denominators_.insert(std::pair(dkey.first, denom)); } + sorted_dkeys_ = discreteKeys(); + sort(sorted_dkeys_.begin(), sorted_dkeys_.end()); +} - /* ************************************************************************ */ - Eigen::SparseVector TableFactor::Convert( +/* ************************************************************************ */ +Eigen::SparseVector TableFactor::Convert( const std::vector& table) { - Eigen::SparseVector sparse_table(table.size()); - // Count number of nonzero elements in table and reserving the space. - const uint64_t nnz = std::count_if(table.begin(), table.end(), - [](uint64_t i) { return i != 0; }); - sparse_table.reserve(nnz); - for (uint64_t i = 0; i < table.size(); i++) { - if (table[i] != 0) sparse_table.insert(i) = table[i]; + Eigen::SparseVector sparse_table(table.size()); + // Count number of nonzero elements in table and reserving the space. + const uint64_t nnz = std::count_if(table.begin(), table.end(), + [](uint64_t i) { return i != 0; }); + sparse_table.reserve(nnz); + for (uint64_t i = 0; i < table.size(); i++) { + if (table[i] != 0) sparse_table.insert(i) = table[i]; + } + sparse_table.pruned(); + sparse_table.data().squeeze(); + return sparse_table; +} + +/* ************************************************************************ */ +Eigen::SparseVector TableFactor::Convert(const std::string& table) { + // Convert string to doubles. + std::vector ys; + std::istringstream iss(table); + std::copy(std::istream_iterator(iss), std::istream_iterator(), + std::back_inserter(ys)); + return Convert(ys); +} + +/* ************************************************************************ */ +bool TableFactor::equals(const DiscreteFactor& other, double tol) const { + if (!dynamic_cast(&other)) { + return false; + } else { + const auto& f(static_cast(other)); + return sparse_table_.isApprox(f.sparse_table_, tol); + } +} + +/* ************************************************************************ */ +double TableFactor::operator()(const DiscreteValues& values) const { + // a b c d => D * (C * (B * (a) + b) + c) + d + uint64_t idx = 0, card = 1; + for (auto it = sorted_dkeys_.rbegin(); it != sorted_dkeys_.rend(); ++it) { + if (values.find(it->first) != values.end()) { + idx += card * values.at(it->first); } - sparse_table.pruned(); - sparse_table.data().squeeze(); - return sparse_table; + card *= it->second; } + return sparse_table_.coeff(idx); +} - /* ************************************************************************ */ - Eigen::SparseVector TableFactor::Convert(const std::string& table) { - // Convert string to doubles. - std::vector ys; - std::istringstream iss(table); - std::copy(std::istream_iterator(iss), std::istream_iterator(), - std::back_inserter(ys)); - return Convert(ys); - } - - /* ************************************************************************ */ - bool TableFactor::equals(const DiscreteFactor& other, - double tol) const { - if (!dynamic_cast(&other)) { - return false; - } else { - const auto& f(static_cast(other)); - return sparse_table_.isApprox(f.sparse_table_, tol); +/* ************************************************************************ */ +double TableFactor::findValue(const DiscreteValues& values) const { + // a b c d => D * (C * (B * (a) + b) + c) + d + uint64_t idx = 0, card = 1; + for (auto it = keys_.rbegin(); it != keys_.rend(); ++it) { + if (values.find(*it) != values.end()) { + idx += card * values.at(*it); } + card *= cardinality(*it); } + return sparse_table_.coeff(idx); +} - /* ************************************************************************ */ - double TableFactor::operator()(const DiscreteValues& values) const { - // a b c d => D * (C * (B * (a) + b) + c) + d - uint64_t idx = 0, card = 1; - for (auto it = sorted_dkeys_.rbegin(); it != sorted_dkeys_.rend(); ++it) { - if (values.find(it->first) != values.end()) { - idx += card * values.at(it->first); - } - card *= it->second; - } - return sparse_table_.coeff(idx); +/* ************************************************************************ */ +double TableFactor::error(const DiscreteValues& values) const { + return -log(evaluate(values)); +} +/* ************************************************************************ */ +double TableFactor::error(const HybridValues& values) const { + return error(values.discrete()); +} + +/* ************************************************************************ */ +DecisionTreeFactor TableFactor::operator*(const DecisionTreeFactor& f) const { + return toDecisionTreeFactor() * f; +} + +/* ************************************************************************ */ +DecisionTreeFactor TableFactor::toDecisionTreeFactor() const { + DiscreteKeys dkeys = discreteKeys(); + std::vector table; + for (auto i = 0; i < sparse_table_.size(); i++) { + table.push_back(sparse_table_.coeff(i)); } + DecisionTreeFactor f(dkeys, table); + return f; +} - /* ************************************************************************ */ - double TableFactor::findValue(const DiscreteValues& values) const { - // a b c d => D * (C * (B * (a) + b) + c) + d - uint64_t idx = 0, card = 1; - for (auto it = keys_.rbegin(); it != keys_.rend(); ++it) { - if (values.find(*it) != values.end()) { - idx += card * values.at(*it); - } +/* ************************************************************************ */ +TableFactor TableFactor::choose(const DiscreteValues parent_assign, + DiscreteKeys parent_keys) const { + if (parent_keys.empty()) return *this; + + // Unique representation of parent values. + uint64_t unique = 0; + uint64_t card = 1; + for (auto it = keys_.rbegin(); it != keys_.rend(); ++it) { + if (parent_assign.find(*it) != parent_assign.end()) { + unique += parent_assign.at(*it) * card; card *= cardinality(*it); } - return sparse_table_.coeff(idx); } - /* ************************************************************************ */ - double TableFactor::error(const DiscreteValues& values) const { - return -log(evaluate(values)); - } - - /* ************************************************************************ */ - double TableFactor::error(const HybridValues& values) const { - return error(values.discrete()); - } + // Find child DiscreteKeys + DiscreteKeys child_dkeys; + std::sort(parent_keys.begin(), parent_keys.end()); + std::set_difference(sorted_dkeys_.begin(), sorted_dkeys_.end(), + parent_keys.begin(), parent_keys.end(), + std::back_inserter(child_dkeys)); - /* ************************************************************************ */ - DecisionTreeFactor TableFactor::operator*(const DecisionTreeFactor& f) const { - return toDecisionTreeFactor() * f; - } + // Create child sparse table to populate. + uint64_t child_card = 1; + for (const DiscreteKey& child_dkey : child_dkeys) + child_card *= child_dkey.second; + Eigen::SparseVector child_sparse_table_(child_card); + child_sparse_table_.reserve(child_card); - /* ************************************************************************ */ - DecisionTreeFactor TableFactor::toDecisionTreeFactor() const { - DiscreteKeys dkeys = discreteKeys(); - std::vector table; - for (auto i = 0; i < sparse_table_.size(); i++) { - table.push_back(sparse_table_.coeff(i)); + // Populate child sparse table. + for (SparseIt it(sparse_table_); it; ++it) { + // Create unique representation of parent keys + uint64_t parent_unique = uniqueRep(parent_keys, it.index()); + // Populate the table + if (parent_unique == unique) { + uint64_t idx = uniqueRep(child_dkeys, it.index()); + child_sparse_table_.insert(idx) = it.value(); } - DecisionTreeFactor f(dkeys, table); + } + + child_sparse_table_.pruned(); + child_sparse_table_.data().squeeze(); + return TableFactor(child_dkeys, child_sparse_table_); +} + +/* ************************************************************************ */ +double TableFactor::safe_div(const double& a, const double& b) { + // The use for safe_div is when we divide the product factor by the sum + // factor. If the product or sum is zero, we accord zero probability to the + // event. + return (a == 0 || b == 0) ? 0 : (a / b); +} + +/* ************************************************************************ */ +void TableFactor::print(const string& s, const KeyFormatter& formatter) const { + cout << s; + cout << " f["; + for (auto&& key : keys()) + cout << boost::format(" (%1%,%2%),") % formatter(key) % cardinality(key); + cout << " ]" << endl; + for (SparseIt it(sparse_table_); it; ++it) { + DiscreteValues assignment = findAssignments(it.index()); + for (auto&& kv : assignment) { + cout << "(" << formatter(kv.first) << ", " << kv.second << ")"; + } + cout << " | " << it.value() << " | " << it.index() << endl; + } + cout << "number of nnzs: " << sparse_table_.nonZeros() << endl; +} + +/* ************************************************************************ */ +TableFactor TableFactor::apply(const TableFactor& f, Binary op) const { + if (keys_.empty() && sparse_table_.nonZeros() == 0) return f; - } - - /* ************************************************************************ */ - TableFactor TableFactor::choose(const DiscreteValues parent_assign, - DiscreteKeys parent_keys) const { - if (parent_keys.empty()) return *this; - - // Unique representation of parent values. - uint64_t unique = 0; - uint64_t card = 1; - for (auto it = keys_.rbegin(); it != keys_.rend(); ++it) { - if (parent_assign.find(*it) != parent_assign.end()) { - unique += parent_assign.at(*it) * card; - card *= cardinality(*it); - } - } - - // Find child DiscreteKeys - DiscreteKeys child_dkeys; - std::sort(parent_keys.begin(), parent_keys.end()); - std::set_difference(sorted_dkeys_.begin(), sorted_dkeys_.end(), parent_keys.begin(), - parent_keys.end(), std::back_inserter(child_dkeys)); - - // Create child sparse table to populate. - uint64_t child_card = 1; - for (const DiscreteKey& child_dkey : child_dkeys) - child_card *= child_dkey.second; - Eigen::SparseVector child_sparse_table_(child_card); - child_sparse_table_.reserve(child_card); - - // Populate child sparse table. - for (SparseIt it(sparse_table_); it; ++it) { - // Create unique representation of parent keys - uint64_t parent_unique = uniqueRep(parent_keys, it.index()); - // Populate the table - if (parent_unique == unique) { - uint64_t idx = uniqueRep(child_dkeys, it.index()); - child_sparse_table_.insert(idx) = it.value(); - } - } - - child_sparse_table_.pruned(); - child_sparse_table_.data().squeeze(); - return TableFactor(child_dkeys, child_sparse_table_); - } - - /* ************************************************************************ */ - double TableFactor::safe_div(const double& a, const double& b) { - // The use for safe_div is when we divide the product factor by the sum - // factor. If the product or sum is zero, we accord zero probability to the - // event. - return (a == 0 || b == 0) ? 0 : (a / b); - } - - /* ************************************************************************ */ - void TableFactor::print(const string& s, const KeyFormatter& formatter) const { - cout << s; - cout << " f["; - for (auto&& key : keys()) - cout << boost::format(" (%1%,%2%),") % formatter(key) % cardinality(key); - cout << " ]" << endl; - for (SparseIt it(sparse_table_); it; ++it) { - DiscreteValues assignment = findAssignments(it.index()); - for (auto&& kv : assignment) { - cout << "(" << formatter(kv.first) << ", " << kv.second << ")"; - } - cout << " | " << it.value() << " | " << it.index() << endl; - } - cout << "number of nnzs: " < map_f = + else if (f.keys_.empty() && f.sparse_table_.nonZeros() == 0) + return *this; + // 1. Identify keys for contract and free modes. + DiscreteKeys contract_dkeys = contractDkeys(f); + DiscreteKeys f_free_dkeys = f.freeDkeys(*this); + DiscreteKeys union_dkeys = unionDkeys(f); + // 2. Create hash table for input factor f + unordered_map map_f = f.createMap(contract_dkeys, f_free_dkeys); - // 3. Initialize multiplied factor. - uint64_t card = 1; - for (auto u_dkey : union_dkeys) card *= u_dkey.second; - Eigen::SparseVector mult_sparse_table(card); - mult_sparse_table.reserve(card); - // 3. Multiply. - for (SparseIt it(sparse_table_); it; ++it) { - uint64_t contract_unique = uniqueRep(contract_dkeys, it.index()); - if (map_f.find(contract_unique) == map_f.end()) continue; - for (auto assignVal : map_f[contract_unique]) { - uint64_t union_idx = unionRep(union_dkeys, assignVal.first, it.index()); - mult_sparse_table.insert(union_idx) = op(it.value(), assignVal.second); - } + // 3. Initialize multiplied factor. + uint64_t card = 1; + for (auto u_dkey : union_dkeys) card *= u_dkey.second; + Eigen::SparseVector mult_sparse_table(card); + mult_sparse_table.reserve(card); + // 3. Multiply. + for (SparseIt it(sparse_table_); it; ++it) { + uint64_t contract_unique = uniqueRep(contract_dkeys, it.index()); + if (map_f.find(contract_unique) == map_f.end()) continue; + for (auto assignVal : map_f[contract_unique]) { + uint64_t union_idx = unionRep(union_dkeys, assignVal.first, it.index()); + mult_sparse_table.insert(union_idx) = op(it.value(), assignVal.second); } - // 4. Free unused memory. - mult_sparse_table.pruned(); - mult_sparse_table.data().squeeze(); - // 5. Create union keys and return. - return TableFactor(union_dkeys, mult_sparse_table); } + // 4. Free unused memory. + mult_sparse_table.pruned(); + mult_sparse_table.data().squeeze(); + // 5. Create union keys and return. + return TableFactor(union_dkeys, mult_sparse_table); +} - /* ************************************************************************ */ - DiscreteKeys TableFactor::contractDkeys(const TableFactor& f) const { - // Find contract modes. - DiscreteKeys contract; - set_intersection(sorted_dkeys_.begin(), sorted_dkeys_.end(), - f.sorted_dkeys_.begin(), f.sorted_dkeys_.end(), - back_inserter(contract)); - return contract; - } +/* ************************************************************************ */ +DiscreteKeys TableFactor::contractDkeys(const TableFactor& f) const { + // Find contract modes. + DiscreteKeys contract; + set_intersection(sorted_dkeys_.begin(), sorted_dkeys_.end(), + f.sorted_dkeys_.begin(), f.sorted_dkeys_.end(), + back_inserter(contract)); + return contract; +} - /* ************************************************************************ */ - DiscreteKeys TableFactor::freeDkeys(const TableFactor& f) const { - // Find free modes. - DiscreteKeys free; - set_difference(sorted_dkeys_.begin(), sorted_dkeys_.end(), - f.sorted_dkeys_.begin(), f.sorted_dkeys_.end(), - back_inserter(free)); - return free; - } +/* ************************************************************************ */ +DiscreteKeys TableFactor::freeDkeys(const TableFactor& f) const { + // Find free modes. + DiscreteKeys free; + set_difference(sorted_dkeys_.begin(), sorted_dkeys_.end(), + f.sorted_dkeys_.begin(), f.sorted_dkeys_.end(), + back_inserter(free)); + return free; +} - /* ************************************************************************ */ - DiscreteKeys TableFactor::unionDkeys(const TableFactor& f) const { - // Find union modes. - DiscreteKeys union_dkeys; - set_union(sorted_dkeys_.begin(), sorted_dkeys_.end(), - f.sorted_dkeys_.begin(), f.sorted_dkeys_.end(), - back_inserter(union_dkeys)); - return union_dkeys; - } +/* ************************************************************************ */ +DiscreteKeys TableFactor::unionDkeys(const TableFactor& f) const { + // Find union modes. + DiscreteKeys union_dkeys; + set_union(sorted_dkeys_.begin(), sorted_dkeys_.end(), f.sorted_dkeys_.begin(), + f.sorted_dkeys_.end(), back_inserter(union_dkeys)); + return union_dkeys; +} - /* ************************************************************************ */ - uint64_t TableFactor::unionRep(const DiscreteKeys& union_keys, - const DiscreteValues& f_free, const uint64_t idx) const { - uint64_t union_idx = 0, card = 1; - for (auto it = union_keys.rbegin(); it != union_keys.rend(); it++) { - if (f_free.find(it->first) == f_free.end()) { - union_idx += keyValueForIndex(it->first, idx) * card; - } else { - union_idx += f_free.at(it->first) * card; - } - card *= it->second; +/* ************************************************************************ */ +uint64_t TableFactor::unionRep(const DiscreteKeys& union_keys, + const DiscreteValues& f_free, + const uint64_t idx) const { + uint64_t union_idx = 0, card = 1; + for (auto it = union_keys.rbegin(); it != union_keys.rend(); it++) { + if (f_free.find(it->first) == f_free.end()) { + union_idx += keyValueForIndex(it->first, idx) * card; + } else { + union_idx += f_free.at(it->first) * card; } - return union_idx; + card *= it->second; } + return union_idx; +} - /* ************************************************************************ */ - unordered_map TableFactor::createMap( +/* ************************************************************************ */ +unordered_map TableFactor::createMap( const DiscreteKeys& contract, const DiscreteKeys& free) const { - // 1. Initialize map. - unordered_map map_f; - // 2. Iterate over nonzero elements. - for (SparseIt it(sparse_table_); it; ++it) { - // 3. Create unique representation of contract modes. - uint64_t unique_rep = uniqueRep(contract, it.index()); - // 4. Create assignment for free modes. - DiscreteValues free_assignments; - for (auto& key : free) free_assignments[key.first] - = keyValueForIndex(key.first, it.index()); - // 5. Populate map. - if (map_f.find(unique_rep) == map_f.end()) { - map_f[unique_rep] = {make_pair(free_assignments, it.value())}; - } else { - map_f[unique_rep].push_back(make_pair(free_assignments, it.value())); - } + // 1. Initialize map. + unordered_map map_f; + // 2. Iterate over nonzero elements. + for (SparseIt it(sparse_table_); it; ++it) { + // 3. Create unique representation of contract modes. + uint64_t unique_rep = uniqueRep(contract, it.index()); + // 4. Create assignment for free modes. + DiscreteValues free_assignments; + for (auto& key : free) + free_assignments[key.first] = keyValueForIndex(key.first, it.index()); + // 5. Populate map. + if (map_f.find(unique_rep) == map_f.end()) { + map_f[unique_rep] = {make_pair(free_assignments, it.value())}; + } else { + map_f[unique_rep].push_back(make_pair(free_assignments, it.value())); } - return map_f; } + return map_f; +} - /* ************************************************************************ */ - uint64_t TableFactor::uniqueRep(const DiscreteKeys& dkeys, const uint64_t idx) const { - if (dkeys.empty()) return 0; - uint64_t unique_rep = 0, card = 1; - for (auto it = dkeys.rbegin(); it != dkeys.rend(); it++) { - unique_rep += keyValueForIndex(it->first, idx) * card; - card *= it->second; - } - return unique_rep; +/* ************************************************************************ */ +uint64_t TableFactor::uniqueRep(const DiscreteKeys& dkeys, + const uint64_t idx) const { + if (dkeys.empty()) return 0; + uint64_t unique_rep = 0, card = 1; + for (auto it = dkeys.rbegin(); it != dkeys.rend(); it++) { + unique_rep += keyValueForIndex(it->first, idx) * card; + card *= it->second; } + return unique_rep; +} - /* ************************************************************************ */ - uint64_t TableFactor::uniqueRep(const DiscreteValues& assignments) const { - if (assignments.empty()) return 0; - uint64_t unique_rep = 0, card = 1; - for (auto it = assignments.rbegin(); it != assignments.rend(); it++) { - unique_rep += it->second * card; - card *= cardinalities_.at(it->first); - } - return unique_rep; +/* ************************************************************************ */ +uint64_t TableFactor::uniqueRep(const DiscreteValues& assignments) const { + if (assignments.empty()) return 0; + uint64_t unique_rep = 0, card = 1; + for (auto it = assignments.rbegin(); it != assignments.rend(); it++) { + unique_rep += it->second * card; + card *= cardinalities_.at(it->first); } + return unique_rep; +} - /* ************************************************************************ */ - DiscreteValues TableFactor::findAssignments(const uint64_t idx) const { - DiscreteValues assignment; - for (Key key : keys_) { - assignment[key] = keyValueForIndex(key, idx); - } - return assignment; +/* ************************************************************************ */ +DiscreteValues TableFactor::findAssignments(const uint64_t idx) const { + DiscreteValues assignment; + for (Key key : keys_) { + assignment[key] = keyValueForIndex(key, idx); } + return assignment; +} - /* ************************************************************************ */ - TableFactor::shared_ptr TableFactor::combine( - size_t nrFrontals, Binary op) const { - if (nrFrontals > size()) { - throw invalid_argument( - "TableFactor::combine: invalid number of frontal " - "keys " + - to_string(nrFrontals) + ", nr.keys=" + std::to_string(size())); - } - // Find remaining keys. - DiscreteKeys remain_dkeys; - uint64_t card = 1; - for (auto i = nrFrontals; i < keys_.size(); i++) { - remain_dkeys.push_back(discreteKey(i)); - card *= cardinality(keys_[i]); - } - // Create combined table. - Eigen::SparseVector combined_table(card); - combined_table.reserve(sparse_table_.nonZeros()); - // Populate combined table. - for (SparseIt it(sparse_table_); it; ++it) { - uint64_t idx = uniqueRep(remain_dkeys, it.index()); - double new_val = op(combined_table.coeff(idx), it.value()); - combined_table.coeffRef(idx) = new_val; +/* ************************************************************************ */ +TableFactor::shared_ptr TableFactor::combine(size_t nrFrontals, + Binary op) const { + if (nrFrontals > size()) { + throw invalid_argument( + "TableFactor::combine: invalid number of frontal " + "keys " + + to_string(nrFrontals) + ", nr.keys=" + std::to_string(size())); + } + // Find remaining keys. + DiscreteKeys remain_dkeys; + uint64_t card = 1; + for (auto i = nrFrontals; i < keys_.size(); i++) { + remain_dkeys.push_back(discreteKey(i)); + card *= cardinality(keys_[i]); + } + // Create combined table. + Eigen::SparseVector combined_table(card); + combined_table.reserve(sparse_table_.nonZeros()); + // Populate combined table. + for (SparseIt it(sparse_table_); it; ++it) { + uint64_t idx = uniqueRep(remain_dkeys, it.index()); + double new_val = op(combined_table.coeff(idx), it.value()); + combined_table.coeffRef(idx) = new_val; } // Free unused memory. combined_table.pruned(); combined_table.data().squeeze(); return std::make_shared(remain_dkeys, combined_table); - } +} - /* ************************************************************************ */ - TableFactor::shared_ptr TableFactor::combine( - const Ordering& frontalKeys, Binary op) const { - if (frontalKeys.size() > size()) { - throw invalid_argument( - "TableFactor::combine: invalid number of frontal " - "keys " + - std::to_string(frontalKeys.size()) + ", nr.keys=" + - std::to_string(size())); - } - // Find remaining keys. - DiscreteKeys remain_dkeys; - uint64_t card = 1; - for (Key key : keys_) { - if (std::find(frontalKeys.begin(), frontalKeys.end(), key) == - frontalKeys.end()) { - remain_dkeys.emplace_back(key, cardinality(key)); - card *= cardinality(key); - } - } - // Create combined table. - Eigen::SparseVector combined_table(card); - combined_table.reserve(sparse_table_.nonZeros()); - // Populate combined table. - for (SparseIt it(sparse_table_); it; ++it) { - uint64_t idx = uniqueRep(remain_dkeys, it.index()); - double new_val = op(combined_table.coeff(idx), it.value()); - combined_table.coeffRef(idx) = new_val; - } - // Free unused memory. - combined_table.pruned(); - combined_table.data().squeeze(); - return std::make_shared(remain_dkeys, combined_table); +/* ************************************************************************ */ +TableFactor::shared_ptr TableFactor::combine(const Ordering& frontalKeys, + Binary op) const { + if (frontalKeys.size() > size()) { + throw invalid_argument( + "TableFactor::combine: invalid number of frontal " + "keys " + + std::to_string(frontalKeys.size()) + + ", nr.keys=" + std::to_string(size())); } - - /* ************************************************************************ */ - size_t TableFactor::keyValueForIndex(Key target_key, uint64_t index) const { - // http://phrogz.net/lazy-cartesian-product - return (index / denominators_.at(target_key)) % cardinality(target_key); + // Find remaining keys. + DiscreteKeys remain_dkeys; + uint64_t card = 1; + for (Key key : keys_) { + if (std::find(frontalKeys.begin(), frontalKeys.end(), key) == + frontalKeys.end()) { + remain_dkeys.emplace_back(key, cardinality(key)); + card *= cardinality(key); + } } + // Create combined table. + Eigen::SparseVector combined_table(card); + combined_table.reserve(sparse_table_.nonZeros()); + // Populate combined table. + for (SparseIt it(sparse_table_); it; ++it) { + uint64_t idx = uniqueRep(remain_dkeys, it.index()); + double new_val = op(combined_table.coeff(idx), it.value()); + combined_table.coeffRef(idx) = new_val; + } + // Free unused memory. + combined_table.pruned(); + combined_table.data().squeeze(); + return std::make_shared(remain_dkeys, combined_table); +} - /* ************************************************************************ */ - std::vector> TableFactor::enumerate() - const { - // Get all possible assignments - std::vector> pairs = discreteKeys(); - // Reverse to make cartesian product output a more natural ordering. - std::vector> rpairs(pairs.rbegin(), pairs.rend()); - const auto assignments = DiscreteValues::CartesianProduct(rpairs); - // Construct unordered_map with values - std::vector> result; - for (const auto& assignment : assignments) { - result.emplace_back(assignment, operator()(assignment)); - } - return result; - } +/* ************************************************************************ */ +size_t TableFactor::keyValueForIndex(Key target_key, uint64_t index) const { + // http://phrogz.net/lazy-cartesian-product + return (index / denominators_.at(target_key)) % cardinality(target_key); +} - /* ************************************************************************ */ - DiscreteKeys TableFactor::discreteKeys() const { - DiscreteKeys result; - for (auto&& key : keys()) { - DiscreteKey dkey(key, cardinality(key)); - if (std::find(result.begin(), result.end(), dkey) == result.end()) { - result.push_back(dkey); - } - } - return result; +/* ************************************************************************ */ +std::vector> TableFactor::enumerate() const { + // Get all possible assignments + std::vector> pairs = discreteKeys(); + // Reverse to make cartesian product output a more natural ordering. + std::vector> rpairs(pairs.rbegin(), pairs.rend()); + const auto assignments = DiscreteValues::CartesianProduct(rpairs); + // Construct unordered_map with values + std::vector> result; + for (const auto& assignment : assignments) { + result.emplace_back(assignment, operator()(assignment)); } + return result; +} + +/* ************************************************************************ */ +DiscreteKeys TableFactor::discreteKeys() const { + DiscreteKeys result; + for (auto&& key : keys()) { + DiscreteKey dkey(key, cardinality(key)); + if (std::find(result.begin(), result.end(), dkey) == result.end()) { + result.push_back(dkey); + } + } + return result; +} + +// Print out header. +/* ************************************************************************ */ +string TableFactor::markdown(const KeyFormatter& keyFormatter, + const Names& names) const { + stringstream ss; // Print out header. - /* ************************************************************************ */ - string TableFactor::markdown(const KeyFormatter& keyFormatter, - const Names& names) const { - stringstream ss; + ss << "|"; + for (auto& key : keys()) { + ss << keyFormatter(key) << "|"; + } + ss << "value|\n"; - // Print out header. + // Print out separator with alignment hints. + ss << "|"; + for (size_t j = 0; j < size(); j++) ss << ":-:|"; + ss << ":-:|\n"; + + // Print out all rows. + for (SparseIt it(sparse_table_); it; ++it) { + DiscreteValues assignment = findAssignments(it.index()); ss << "|"; for (auto& key : keys()) { - ss << keyFormatter(key) << "|"; + size_t index = assignment.at(key); + ss << DiscreteValues::Translate(names, key, index) << "|"; } - ss << "value|\n"; - - // Print out separator with alignment hints. - ss << "|"; - for (size_t j = 0; j < size(); j++) ss << ":-:|"; - ss << ":-:|\n"; - - // Print out all rows. - for (SparseIt it(sparse_table_); it; ++it) { - DiscreteValues assignment = findAssignments(it.index()); - ss << "|"; - for (auto& key : keys()) { - size_t index = assignment.at(key); - ss << DiscreteValues::Translate(names, key, index) << "|"; - } - ss << it.value() << "|\n"; - } - return ss.str(); + ss << it.value() << "|\n"; } + return ss.str(); +} - /* ************************************************************************ */ - string TableFactor::html(const KeyFormatter& keyFormatter, - const Names& names) const { - stringstream ss; +/* ************************************************************************ */ +string TableFactor::html(const KeyFormatter& keyFormatter, + const Names& names) const { + stringstream ss; - // Print out preamble. - ss << "
\n\n \n"; + // Print out preamble. + ss << "
\n
\n \n"; - // Print out header row. + // Print out header row. + ss << " "; + for (auto& key : keys()) { + ss << ""; + } + ss << "\n"; + + // Finish header and start body. + ss << " \n \n"; + + // Print out all rows. + for (SparseIt it(sparse_table_); it; ++it) { + DiscreteValues assignment = findAssignments(it.index()); ss << " "; for (auto& key : keys()) { - ss << ""; + size_t index = assignment.at(key); + ss << ""; } - ss << "\n"; + ss << ""; // value + ss << "\n"; + } + ss << " \n
" << keyFormatter(key) << "value
" << keyFormatter(key) << "" << DiscreteValues::Translate(names, key, index) << "value
" << it.value() << "
\n
"; + return ss.str(); +} - // Finish header and start body. - ss << " \n \n"; - - // Print out all rows. - for (SparseIt it(sparse_table_); it; ++it) { - DiscreteValues assignment = findAssignments(it.index()); - ss << " "; - for (auto& key : keys()) { - size_t index = assignment.at(key); - ss << "" << DiscreteValues::Translate(names, key, index) << ""; - } - ss << "" << it.value() << ""; // value - ss << "\n"; - } - ss << " \n\n"; - return ss.str(); +/* ************************************************************************ */ +TableFactor TableFactor::prune(size_t maxNrAssignments) const { + const size_t N = maxNrAssignments; + + // Get the probabilities in the TableFactor so we can threshold. + vector> probabilities; + + // Store non-zero probabilities along with their indices in a vector. + for (SparseIt it(sparse_table_); it; ++it) { + probabilities.emplace_back(it.index(), it.value()); } - /* ************************************************************************ */ - TableFactor TableFactor::prune(size_t maxNrAssignments) const { - const size_t N = maxNrAssignments; + // The number of probabilities can be lower than max_leaves. + if (probabilities.size() <= N) return *this; - // Get the probabilities in the TableFactor so we can threshold. - vector> probabilities; - - // Store non-zero probabilities along with their indices in a vector. - for (SparseIt it(sparse_table_); it; ++it) { - probabilities.emplace_back(it.index(), it.value()); - } - - // The number of probabilities can be lower than max_leaves. - if (probabilities.size() <= N) return *this; - - // Sort the vector in descending order based on the element values. - sort(probabilities.begin(), probabilities.end(), [] ( - const std::pair& a, - const std::pair& b) { - return a.second > b.second; - }); - - // Keep the largest N probabilities in the vector. - if (probabilities.size() > N) probabilities.resize(N); + // Sort the vector in descending order based on the element values. + sort(probabilities.begin(), probabilities.end(), + [](const std::pair& a, + const std::pair& b) { + return a.second > b.second; + }); - // Create pruned sparse vector. - Eigen::SparseVector pruned_vec(sparse_table_.size()); - pruned_vec.reserve(probabilities.size()); + // Keep the largest N probabilities in the vector. + if (probabilities.size() > N) probabilities.resize(N); - // Populate pruned sparse vector. - for (const auto& prob : probabilities) { - pruned_vec.insert(prob.first) = prob.second; - } + // Create pruned sparse vector. + Eigen::SparseVector pruned_vec(sparse_table_.size()); + pruned_vec.reserve(probabilities.size()); - // Create pruned decision tree factor and return. - return TableFactor(this->discreteKeys(), pruned_vec); + // Populate pruned sparse vector. + for (const auto& prob : probabilities) { + pruned_vec.insert(prob.first) = prob.second; } - /* ************************************************************************ */ + // Create pruned decision tree factor and return. + return TableFactor(this->discreteKeys(), pruned_vec); +} + +/* ************************************************************************ */ } // namespace gtsam diff --git a/gtsam/discrete/TableFactor.h b/gtsam/discrete/TableFactor.h index c565cbe6b..d73dc1c9d 100644 --- a/gtsam/discrete/TableFactor.h +++ b/gtsam/discrete/TableFactor.h @@ -23,8 +23,8 @@ #include #include -#include #include +#include #include #include #include @@ -32,287 +32,296 @@ namespace gtsam { - class HybridValues; +class HybridValues; + +/** + * A discrete probabilistic factor optimized for sparsity. + * Uses sparse_table_ to store only the nonzero probabilities. + * Computes the assigned value for the key using the ordering which the + * nonzero probabilties are stored in. (lazy cartesian product) + * + * @ingroup discrete + */ +class GTSAM_EXPORT TableFactor : public DiscreteFactor { + protected: + /// Map of Keys and their cardinalities. + std::map cardinalities_; + /// SparseVector of nonzero probabilities. + Eigen::SparseVector sparse_table_; + + private: + /// Map of Keys and their denominators used in keyValueForIndex. + std::map denominators_; + /// Sorted DiscreteKeys to use internally. + DiscreteKeys sorted_dkeys_; /** - * A discrete probabilistic factor optimized for sparsity. - * Uses sparse_table_ to store only the nonzero probabilities. - * Computes the assigned value for the key using the ordering which the - * nonzero probabilties are stored in. (lazy cartesian product) - * - * @ingroup discrete + * @brief Uses lazy cartesian product to find nth entry in the cartesian + * product of arrays in O(1) + * Example) + * v0 | v1 | val + * 0 | 0 | 10 + * 0 | 1 | 21 + * 1 | 0 | 32 + * 1 | 1 | 43 + * keyValueForIndex(v1, 2) = 0 + * @param target_key nth entry's key to find out its assigned value + * @param index nth entry in the sparse vector + * @return TableFactor */ - class GTSAM_EXPORT TableFactor : public DiscreteFactor { - protected: - std::map cardinalities_; /// Map of Keys and their cardinalities. - Eigen::SparseVector sparse_table_; /// SparseVector of nonzero probabilities. - - private: - std::map denominators_; /// Map of Keys and their denominators used in keyValueForIndex. - DiscreteKeys sorted_dkeys_; /// Sorted DiscreteKeys to use internally. - - /** - * @brief Uses lazy cartesian product to find nth entry in the cartesian product of arrays in O(1) - * Example) - * v0 | v1 | val - * 0 | 0 | 10 - * 0 | 1 | 21 - * 1 | 0 | 32 - * 1 | 1 | 43 - * keyValueForIndex(v1, 2) = 0 - * @param target_key nth entry's key to find out its assigned value - * @param index nth entry in the sparse vector - * @return TableFactor - */ - size_t keyValueForIndex(Key target_key, uint64_t index) const; + size_t keyValueForIndex(Key target_key, uint64_t index) const; - DiscreteKey discreteKey(size_t i) const { - return DiscreteKey(keys_[i], cardinalities_.at(keys_[i])); + /** + * @brief Return ith key in keys_ as a DiscreteKey + * @param i ith key in keys_ + * @return DiscreteKey + * */ + DiscreteKey discreteKey(size_t i) const { + return DiscreteKey(keys_[i], cardinalities_.at(keys_[i])); + } + + /// Convert probability table given as doubles to SparseVector. + static Eigen::SparseVector Convert(const std::vector& table); + + /// Convert probability table given as string to SparseVector. + static Eigen::SparseVector Convert(const std::string& table); + + public: + // typedefs needed to play nice with gtsam + typedef TableFactor This; + typedef DiscreteFactor Base; ///< Typedef to base class + typedef std::shared_ptr shared_ptr; + typedef Eigen::SparseVector::InnerIterator SparseIt; + typedef std::vector> AssignValList; + using Binary = std::function; + + public: + /** The Real ring with addition and multiplication */ + struct Ring { + static inline double zero() { return 0.0; } + static inline double one() { return 1.0; } + static inline double add(const double& a, const double& b) { return a + b; } + static inline double max(const double& a, const double& b) { + return std::max(a, b); } - - /// Convert probability table given as doubles to SparseVector. - static Eigen::SparseVector Convert(const std::vector& table); - - /// Convert probability table given as string to SparseVector. - static Eigen::SparseVector Convert(const std::string& table); - - public: - // typedefs needed to play nice with gtsam - typedef TableFactor This; - typedef DiscreteFactor Base; ///< Typedef to base class - typedef std::shared_ptr shared_ptr; - typedef Eigen::SparseVector::InnerIterator SparseIt; - typedef std::vector> AssignValList; - using Binary = std::function; - - public: - /** The Real ring with addition and multiplication */ - struct Ring { - static inline double zero() { return 0.0; } - static inline double one() { return 1.0; } - static inline double add(const double& a, const double& b) { return a + b; } - static inline double max(const double& a, const double& b) { - return std::max(a, b); - } - static inline double mul(const double& a, const double& b) { return a * b; } - static inline double div(const double& a, const double& b) { - return (a == 0 || b == 0) ? 0 : (a / b); - } - static inline double id(const double& x) { return x; } - }; - - /// @name Standard Constructors - /// @{ - - /** Default constructor for I/O */ - TableFactor(); - - /** Constructor from DiscreteKeys and TableFactor */ - TableFactor(const DiscreteKeys& keys, const TableFactor& potentials); - - /** Constructor from sparse_table */ - TableFactor(const DiscreteKeys& keys, - const Eigen::SparseVector& table); - - /** Constructor from doubles */ - TableFactor(const DiscreteKeys& keys, const std::vector& table) - : TableFactor(keys, Convert(table)) {} - - /** Constructor from string */ - TableFactor(const DiscreteKeys& keys, const std::string& table) - : TableFactor(keys, Convert(table)) {} - - /// Single-key specialization - template - TableFactor(const DiscreteKey& key, SOURCE table) - : TableFactor(DiscreteKeys{key}, table) {} - - /// Single-key specialization, with vector of doubles. - TableFactor(const DiscreteKey& key, const std::vector& row) - : TableFactor(DiscreteKeys{key}, row) {} - - - /// @} - /// @name Testable - /// @{ - - /// equality - bool equals(const DiscreteFactor& other, double tol = 1e-9) const override; - - // print - void print( - const std::string& s = "TableFactor:\n", - const KeyFormatter& formatter = DefaultKeyFormatter) const override; - - // /// @} - // /// @name Standard Interface - // /// @{ - - /// Calculate probability for given values `x`, - /// is just look up in TableFactor. - double evaluate(const DiscreteValues& values) const { - return operator()(values); + static inline double mul(const double& a, const double& b) { return a * b; } + static inline double div(const double& a, const double& b) { + return (a == 0 || b == 0) ? 0 : (a / b); } + static inline double id(const double& x) { return x; } + }; - /// Evaluate probability distribution, sugar. - double operator()(const DiscreteValues& values) const override; + /// @name Standard Constructors + /// @{ - /// Calculate error for DiscreteValues `x`, is -log(probability). - double error(const DiscreteValues& values) const; + /** Default constructor for I/O */ + TableFactor(); - /// multiply two TableFactors - TableFactor operator*(const TableFactor& f) const { - return apply(f, Ring::mul); - }; + /** Constructor from DiscreteKeys and TableFactor */ + TableFactor(const DiscreteKeys& keys, const TableFactor& potentials); - /// multiple with DecisionTreeFactor - DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; + /** Constructor from sparse_table */ + TableFactor(const DiscreteKeys& keys, + const Eigen::SparseVector& table); - static double safe_div(const double& a, const double& b); + /** Constructor from doubles */ + TableFactor(const DiscreteKeys& keys, const std::vector& table) + : TableFactor(keys, Convert(table)) {} - size_t cardinality(Key j) const { return cardinalities_.at(j); } + /** Constructor from string */ + TableFactor(const DiscreteKeys& keys, const std::string& table) + : TableFactor(keys, Convert(table)) {} - /// divide by factor f (safely) - TableFactor operator/(const TableFactor& f) const { - return apply(f, safe_div); - } + /// Single-key specialization + template + TableFactor(const DiscreteKey& key, SOURCE table) + : TableFactor(DiscreteKeys{key}, table) {} - /// Convert into a decisiontree - DecisionTreeFactor toDecisionTreeFactor() const override; + /// Single-key specialization, with vector of doubles. + TableFactor(const DiscreteKey& key, const std::vector& row) + : TableFactor(DiscreteKeys{key}, row) {} - /// Generate TableFactor from TableFactor - // TableFactor toTableFactor() const override { return *this; } + /// @} + /// @name Testable + /// @{ - /// Create a TableFactor that is a subset of this TableFactor - TableFactor choose(const DiscreteValues assignments, - DiscreteKeys parent_keys) const; + /// equality + bool equals(const DiscreteFactor& other, double tol = 1e-9) const override; - /// Create new factor by summing all values with the same separator values - shared_ptr sum(size_t nrFrontals) const { - return combine(nrFrontals, Ring::add); - } + // print + void print( + const std::string& s = "TableFactor:\n", + const KeyFormatter& formatter = DefaultKeyFormatter) const override; - /// Create new factor by summing all values with the same separator values - shared_ptr sum(const Ordering& keys) const { - return combine(keys, Ring::add); - } + // /// @} + // /// @name Standard Interface + // /// @{ - /// Create new factor by maximizing over all values with the same separator. - shared_ptr max(size_t nrFrontals) const { - return combine(nrFrontals, Ring::max); - } + /// Calculate probability for given values `x`, + /// is just look up in TableFactor. + double evaluate(const DiscreteValues& values) const { + return operator()(values); + } - /// Create new factor by maximizing over all values with the same separator. - shared_ptr max(const Ordering& keys) const { - return combine(keys, Ring::max); - } + /// Evaluate probability distribution, sugar. + double operator()(const DiscreteValues& values) const override; - /// @} - /// @name Advanced Interface - /// @{ + /// Calculate error for DiscreteValues `x`, is -log(probability). + double error(const DiscreteValues& values) const; - /** - * Apply binary operator (*this) "op" f - * @param f the second argument for op - * @param op a binary operator that operates on TableFactor - */ - TableFactor apply(const TableFactor& f, Binary op) const; + /// multiply two TableFactors + TableFactor operator*(const TableFactor& f) const { + return apply(f, Ring::mul); + }; - /// Return keys in contract mode. - DiscreteKeys contractDkeys(const TableFactor& f) const; - - /// Return keys in free mode. - DiscreteKeys freeDkeys(const TableFactor& f) const; + /// multiple with DecisionTreeFactor + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; - /// Return union of DiscreteKeys in two factors. - DiscreteKeys unionDkeys(const TableFactor& f) const; + static double safe_div(const double& a, const double& b); - /// Create unique representation of union modes. - uint64_t unionRep(const DiscreteKeys& keys, - const DiscreteValues& assign, const uint64_t idx) const; - - /// Create a hash map of input factor with assignment of contract modes as - /// keys and vector of hashed assignment of free modes and value as values. - std::unordered_map createMap( + size_t cardinality(Key j) const { return cardinalities_.at(j); } + + /// divide by factor f (safely) + TableFactor operator/(const TableFactor& f) const { + return apply(f, safe_div); + } + + /// Convert into a decisiontree + DecisionTreeFactor toDecisionTreeFactor() const override; + + /// Generate TableFactor from TableFactor + // TableFactor toTableFactor() const override { return *this; } + + /// Create a TableFactor that is a subset of this TableFactor + TableFactor choose(const DiscreteValues assignments, + DiscreteKeys parent_keys) const; + + /// Create new factor by summing all values with the same separator values + shared_ptr sum(size_t nrFrontals) const { + return combine(nrFrontals, Ring::add); + } + + /// Create new factor by summing all values with the same separator values + shared_ptr sum(const Ordering& keys) const { + return combine(keys, Ring::add); + } + + /// Create new factor by maximizing over all values with the same separator. + shared_ptr max(size_t nrFrontals) const { + return combine(nrFrontals, Ring::max); + } + + /// Create new factor by maximizing over all values with the same separator. + shared_ptr max(const Ordering& keys) const { + return combine(keys, Ring::max); + } + + /// @} + /// @name Advanced Interface + /// @{ + + /** + * Apply binary operator (*this) "op" f + * @param f the second argument for op + * @param op a binary operator that operates on TableFactor + */ + TableFactor apply(const TableFactor& f, Binary op) const; + + /// Return keys in contract mode. + DiscreteKeys contractDkeys(const TableFactor& f) const; + + /// Return keys in free mode. + DiscreteKeys freeDkeys(const TableFactor& f) const; + + /// Return union of DiscreteKeys in two factors. + DiscreteKeys unionDkeys(const TableFactor& f) const; + + /// Create unique representation of union modes. + uint64_t unionRep(const DiscreteKeys& keys, const DiscreteValues& assign, + const uint64_t idx) const; + + /// Create a hash map of input factor with assignment of contract modes as + /// keys and vector of hashed assignment of free modes and value as values. + std::unordered_map createMap( const DiscreteKeys& contract, const DiscreteKeys& free) const; - /// Create unique representation - uint64_t uniqueRep(const DiscreteKeys& keys, const uint64_t idx) const; - - /// Create unique representation with DiscreteValues - uint64_t uniqueRep(const DiscreteValues& assignments) const; + /// Create unique representation + uint64_t uniqueRep(const DiscreteKeys& keys, const uint64_t idx) const; - /// Find DiscreteValues for corresponding index. - DiscreteValues findAssignments(const uint64_t idx) const; - - /// Find value for corresponding DiscreteValues. - double findValue(const DiscreteValues& values) const; + /// Create unique representation with DiscreteValues + uint64_t uniqueRep(const DiscreteValues& assignments) const; - /** - * Combine frontal variables using binary operator "op" - * @param nrFrontals nr. of frontal to combine variables in this factor - * @param op a binary operator that operates on TableFactor - * @return shared pointer to newly created TableFactor - */ - shared_ptr combine(size_t nrFrontals, Binary op) const; + /// Find DiscreteValues for corresponding index. + DiscreteValues findAssignments(const uint64_t idx) const; - /** - * Combine frontal variables in an Ordering using binary operator "op" - * @param nrFrontals nr. of frontal to combine variables in this factor - * @param op a binary operator that operates on TableFactor - * @return shared pointer to newly created TableFactor - */ - shared_ptr combine(const Ordering& keys, Binary op) const; + /// Find value for corresponding DiscreteValues. + double findValue(const DiscreteValues& values) const; - /// Enumerate all values into a map from values to double. - std::vector> enumerate() const; + /** + * Combine frontal variables using binary operator "op" + * @param nrFrontals nr. of frontal to combine variables in this factor + * @param op a binary operator that operates on TableFactor + * @return shared pointer to newly created TableFactor + */ + shared_ptr combine(size_t nrFrontals, Binary op) const; - /// Return all the discrete keys associated with this factor. - DiscreteKeys discreteKeys() const; + /** + * Combine frontal variables in an Ordering using binary operator "op" + * @param nrFrontals nr. of frontal to combine variables in this factor + * @param op a binary operator that operates on TableFactor + * @return shared pointer to newly created TableFactor + */ + shared_ptr combine(const Ordering& keys, Binary op) const; - /** - * @brief Prune the decision tree of discrete variables. - * - * Pruning will set the values to be "pruned" to 0 indicating a 0 - * probability. An assignment is pruned if it is not in the top - * `maxNrAssignments` values. - * - * A violation can occur if there are more - * duplicate values than `maxNrAssignments`. A violation here is the need to - * un-prune the decision tree (e.g. all assignment values are 1.0). We could - * have another case where some subset of duplicates exist (e.g. for a tree - * with 8 assignments we have 1, 1, 1, 1, 0.8, 0.7, 0.6, 0.5), but this is - * not a violation since the for `maxNrAssignments=5` the top values are (1, - * 0.8). - * - * @param maxNrAssignments The maximum number of assignments to keep. - * @return TableFactor - */ - TableFactor prune(size_t maxNrAssignments) const; + /// Enumerate all values into a map from values to double. + std::vector> enumerate() const; - /// @} - /// @name Wrapper support - /// @{ + /// Return all the discrete keys associated with this factor. + DiscreteKeys discreteKeys() const; - /** - * @brief Render as markdown table - * - * @param keyFormatter GTSAM-style Key formatter. - * @param names optional, category names corresponding to choices. - * @return std::string a markdown string. - */ - std::string markdown(const KeyFormatter& keyFormatter = DefaultKeyFormatter, - const Names& names = {}) const override; + /** + * @brief Prune the decision tree of discrete variables. + * + * Pruning will set the values to be "pruned" to 0 indicating a 0 + * probability. An assignment is pruned if it is not in the top + * `maxNrAssignments` values. + * + * A violation can occur if there are more + * duplicate values than `maxNrAssignments`. A violation here is the need to + * un-prune the decision tree (e.g. all assignment values are 1.0). We could + * have another case where some subset of duplicates exist (e.g. for a tree + * with 8 assignments we have 1, 1, 1, 1, 0.8, 0.7, 0.6, 0.5), but this is + * not a violation since the for `maxNrAssignments=5` the top values are (1, + * 0.8). + * + * @param maxNrAssignments The maximum number of assignments to keep. + * @return TableFactor + */ + TableFactor prune(size_t maxNrAssignments) const; - /** - * @brief Render as html table - * - * @param keyFormatter GTSAM-style Key formatter. - * @param names optional, category names corresponding to choices. - * @return std::string a html string. - */ - std::string html(const KeyFormatter& keyFormatter = DefaultKeyFormatter, - const Names& names = {}) const override; + /// @} + /// @name Wrapper support + /// @{ + + /** + * @brief Render as markdown table + * + * @param keyFormatter GTSAM-style Key formatter. + * @param names optional, category names corresponding to choices. + * @return std::string a markdown string. + */ + std::string markdown(const KeyFormatter& keyFormatter = DefaultKeyFormatter, + const Names& names = {}) const override; + + /** + * @brief Render as html table + * + * @param keyFormatter GTSAM-style Key formatter. + * @param names optional, category names corresponding to choices. + * @return std::string a html string. + */ + std::string html(const KeyFormatter& keyFormatter = DefaultKeyFormatter, + const Names& names = {}) const override; /// @} /// @name HybridValues methods. @@ -325,7 +334,7 @@ namespace gtsam { double error(const HybridValues& values) const override; /// @} - }; +}; // traits template <> From 7295bdd542d8389a803ecf4bc90991826937aff2 Mon Sep 17 00:00:00 2001 From: Yoonwoo Kim Date: Mon, 29 May 2023 01:29:18 +0900 Subject: [PATCH 32/78] added example for Convert function which converts vector into Eigen::SparseVector. --- gtsam/discrete/TableFactor.h | 1 + 1 file changed, 1 insertion(+) diff --git a/gtsam/discrete/TableFactor.h b/gtsam/discrete/TableFactor.h index d73dc1c9d..87989bcff 100644 --- a/gtsam/discrete/TableFactor.h +++ b/gtsam/discrete/TableFactor.h @@ -81,6 +81,7 @@ class GTSAM_EXPORT TableFactor : public DiscreteFactor { } /// Convert probability table given as doubles to SparseVector. + /// Example) {0, 1, 1, 0, 0, 1, 0} -> values: {1, 1, 1}, indices: {1, 2, 5} static Eigen::SparseVector Convert(const std::vector& table); /// Convert probability table given as string to SparseVector. From 0a5a21bedca1afb4ad939c62134423527d757d4d Mon Sep 17 00:00:00 2001 From: Yoonwoo Kim Date: Mon, 29 May 2023 01:34:04 +0900 Subject: [PATCH 33/78] deleted toTableFactor. --- gtsam/discrete/TableFactor.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/gtsam/discrete/TableFactor.h b/gtsam/discrete/TableFactor.h index 87989bcff..1462180e0 100644 --- a/gtsam/discrete/TableFactor.h +++ b/gtsam/discrete/TableFactor.h @@ -190,9 +190,6 @@ class GTSAM_EXPORT TableFactor : public DiscreteFactor { /// Convert into a decisiontree DecisionTreeFactor toDecisionTreeFactor() const override; - /// Generate TableFactor from TableFactor - // TableFactor toTableFactor() const override { return *this; } - /// Create a TableFactor that is a subset of this TableFactor TableFactor choose(const DiscreteValues assignments, DiscreteKeys parent_keys) const; From 1e14e4e2a5d0e9065e52bf02b8235e9fe799682c Mon Sep 17 00:00:00 2001 From: Yoonwoo Kim Date: Mon, 29 May 2023 02:31:30 +0900 Subject: [PATCH 34/78] added comment for every test and formatted with Google style for testTableFactor.cpp. --- gtsam/discrete/tests/testTableFactor.cpp | 115 ++++++++++++----------- 1 file changed, 58 insertions(+), 57 deletions(-) diff --git a/gtsam/discrete/tests/testTableFactor.cpp b/gtsam/discrete/tests/testTableFactor.cpp index 4acde8167..3ad757347 100644 --- a/gtsam/discrete/tests/testTableFactor.cpp +++ b/gtsam/discrete/tests/testTableFactor.cpp @@ -19,11 +19,12 @@ #include #include #include -#include #include #include -#include +#include + #include +#include using namespace std; using namespace gtsam; @@ -31,7 +32,7 @@ using namespace gtsam; vector genArr(double dropout, size_t size) { random_device rd; mt19937 g(rd()); - vector dropoutmask(size); // Chance of 0 + vector dropoutmask(size); // Chance of 0 uniform_int_distribution<> dist(1, 9); auto gen = [&dist, &g]() { return dist(g); }; @@ -39,16 +40,15 @@ vector genArr(double dropout, size_t size) { fill_n(dropoutmask.begin(), dropoutmask.size() * (dropout), 0); shuffle(dropoutmask.begin(), dropoutmask.end(), g); - + return dropoutmask; } -map> - measureTime(DiscreteKeys keys1, DiscreteKeys keys2, size_t size) { +map> measureTime( + DiscreteKeys keys1, DiscreteKeys keys2, size_t size) { vector dropouts = {0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9}; - map> - measured_times; - + map> measured_times; + for (auto dropout : dropouts) { vector arr1 = genArr(dropout, size); vector arr2 = genArr(dropout, size); @@ -61,13 +61,15 @@ map> auto tb_start = chrono::high_resolution_clock::now(); TableFactor actual = f1 * f2; auto tb_end = chrono::high_resolution_clock::now(); - auto tb_time_diff = chrono::duration_cast(tb_end - tb_start); + auto tb_time_diff = + chrono::duration_cast(tb_end - tb_start); // measure time DT auto dt_start = chrono::high_resolution_clock::now(); DecisionTreeFactor actual_dt = f1_dt * f2_dt; auto dt_end = chrono::high_resolution_clock::now(); - auto dt_time_diff = chrono::duration_cast(dt_end - dt_start); + auto dt_time_diff = + chrono::duration_cast(dt_end - dt_start); bool flag = true; for (auto assignmentVal : actual_dt.enumerate()) { @@ -75,7 +77,7 @@ map> if (flag) { std::cout << "something is wrong: " << std::endl; assignmentVal.first.print(); - std::cout << "dt: " << actual_dt(assignmentVal.first) << std::endl; + std::cout << "dt: " << actual_dt(assignmentVal.first) << std::endl; std::cout << "tb: " << actual(assignmentVal.first) << std::endl; break; } @@ -86,35 +88,35 @@ map> return measured_times; } -void printTime(map> measured_time) { +void printTime(map> + measured_time) { for (auto&& kv : measured_time) { - cout << "dropout: " << kv.first << " | TableFactor time: " - << kv.second.first.count() << " | DecisionTreeFactor time: " << kv.second.second.count() - << endl; + cout << "dropout: " << kv.first + << " | TableFactor time: " << kv.second.first.count() + << " | DecisionTreeFactor time: " << kv.second.second.count() << endl; } - } /* ************************************************************************* */ -TEST( TableFactor, constructors) -{ +// Check constructors for TableFactor. +TEST(TableFactor, constructors) { // Declare a bunch of keys - DiscreteKey X(0,2), Y(1,3), Z(2,2), A(3, 5); + DiscreteKey X(0, 2), Y(1, 3), Z(2, 2), A(3, 5); // Create factors TableFactor f_zeros(A, {0, 0, 0, 0, 1}); TableFactor f1(X, {2, 8}); TableFactor f2(X & Y, "2 5 3 6 4 7"); TableFactor f3(X & Y & Z, "2 5 3 6 4 7 25 55 35 65 45 75"); - EXPECT_LONGS_EQUAL(1,f1.size()); - EXPECT_LONGS_EQUAL(2,f2.size()); - EXPECT_LONGS_EQUAL(3,f3.size()); + EXPECT_LONGS_EQUAL(1, f1.size()); + EXPECT_LONGS_EQUAL(2, f2.size()); + EXPECT_LONGS_EQUAL(3, f3.size()); DiscreteValues values; - values[0] = 1; // x - values[1] = 2; // y - values[2] = 1; // z - values[3] = 4; // a + values[0] = 1; // x + values[1] = 2; // y + values[2] = 1; // z + values[3] = 4; // a EXPECT_DOUBLES_EQUAL(1, f_zeros(values), 1e-9); EXPECT_DOUBLES_EQUAL(8, f1(values), 1e-9); EXPECT_DOUBLES_EQUAL(7, f2(values), 1e-9); @@ -125,6 +127,7 @@ TEST( TableFactor, constructors) } /* ************************************************************************* */ +// Check multiplication between two TableFactors. TEST(TableFactor, multiplication) { DiscreteKey v0(0, 2), v1(1, 2), v2(2, 2); @@ -133,7 +136,7 @@ TEST(TableFactor, multiplication) { TableFactor f1(v0 & v1, "1 2 3 4"); DecisionTreeFactor expected(v0 & v1, "0.25 1.5 0.75 3"); CHECK(assert_equal(expected, static_cast(prior) * - f1.toDecisionTreeFactor())); + f1.toDecisionTreeFactor())); CHECK(assert_equal(expected, f1 * prior)); // Multiply two factors @@ -148,74 +151,75 @@ TEST(TableFactor, multiplication) { TableFactor actual_zeros = f_zeros1 * f_zeros2; TableFactor expected3(A & B & C, "0 0 0 0 0 0 0 10 0 0 0 15"); CHECK(assert_equal(expected3, actual_zeros)); - } /* ************************************************************************* */ +// Benchmark which compares runtime of multiplication of two TableFactors +// and two DecisionTreeFactors given sparsity from dense to 90% sparsity. TEST(TableFactor, benchmark) { -DiscreteKey A(0, 5), B(1, 2), C(2, 5), D(3, 2), E(4, 5), - F(5, 2), G(6, 3), H(7, 2), I(8, 5), J(9, 7), K(10, 2), L(11, 3); + DiscreteKey A(0, 5), B(1, 2), C(2, 5), D(3, 2), E(4, 5), F(5, 2), G(6, 3), + H(7, 2), I(8, 5), J(9, 7), K(10, 2), L(11, 3); // 100 DiscreteKeys one_1 = {A, B, C, D}; DiscreteKeys one_2 = {C, D, E, F}; - map> time_map_1 = - measureTime(one_1, one_2, 100); + map> time_map_1 = + measureTime(one_1, one_2, 100); printTime(time_map_1); // 200 DiscreteKeys two_1 = {A, B, C, D, F}; DiscreteKeys two_2 = {B, C, D, E, F}; map> time_map_2 = - measureTime(two_1, two_2, 200); + measureTime(two_1, two_2, 200); printTime(time_map_2); // 300 DiscreteKeys three_1 = {A, B, C, D, G}; DiscreteKeys three_2 = {C, D, E, F, G}; - map> time_map_3 = - measureTime(three_1, three_2, 300); + map> time_map_3 = + measureTime(three_1, three_2, 300); printTime(time_map_3); // 400 DiscreteKeys four_1 = {A, B, C, D, F, H}; DiscreteKeys four_2 = {B, C, D, E, F, H}; - map> time_map_4 = - measureTime(four_1, four_2, 400); + map> time_map_4 = + measureTime(four_1, four_2, 400); printTime(time_map_4); // 500 DiscreteKeys five_1 = {A, B, C, D, I}; DiscreteKeys five_2 = {C, D, E, F, I}; map> time_map_5 = - measureTime(five_1, five_2, 500); + measureTime(five_1, five_2, 500); printTime(time_map_5); // 600 DiscreteKeys six_1 = {A, B, C, D, F, G}; DiscreteKeys six_2 = {B, C, D, E, F, G}; - map> time_map_6 = - measureTime(six_1, six_2, 600); + map> time_map_6 = + measureTime(six_1, six_2, 600); printTime(time_map_6); // 700 DiscreteKeys seven_1 = {A, B, C, D, J}; DiscreteKeys seven_2 = {C, D, E, F, J}; - map> time_map_7 = - measureTime(seven_1, seven_2, 700); + map> time_map_7 = + measureTime(seven_1, seven_2, 700); printTime(time_map_7); // 800 DiscreteKeys eight_1 = {A, B, C, D, F, H, K}; DiscreteKeys eight_2 = {B, C, D, E, F, H, K}; - map> time_map_8 = - measureTime(eight_1, eight_2, 800); + map> time_map_8 = + measureTime(eight_1, eight_2, 800); printTime(time_map_8); // 900 DiscreteKeys nine_1 = {A, B, C, D, G, L}; DiscreteKeys nine_2 = {C, D, E, F, G, L}; map> time_map_9 = - measureTime(nine_1, nine_2, 900); + measureTime(nine_1, nine_2, 900); printTime(time_map_9); } /* ************************************************************************* */ -TEST( TableFactor, sum_max) -{ - DiscreteKey v0(0,3), v1(1,2); +// Check sum and max over frontals. +TEST(TableFactor, sum_max) { + DiscreteKey v0(0, 3), v1(1, 2); TableFactor f1(v0 & v1, "1 2 3 4 5 6"); TableFactor expected(v1, "9 12"); @@ -274,10 +278,9 @@ TEST(TableFactor, Prune) { "0.0 0.0 0.0 0.60658897 0.61241912 0.61241969 0.61247685 0.61247742 0.0 " "0.0 0.0 0.99995287 1.0 1.0 1.0 1.0"); - TableFactor expected3( - D & C & B & A, - "0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 " - "0.999952870000 1.0 1.0 1.0 1.0"); + TableFactor expected3(D & C & B & A, + "0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 " + "0.999952870000 1.0 1.0 1.0 1.0"); maxNrAssignments = 5; auto pruned3 = factor.prune(maxNrAssignments); EXPECT(assert_equal(expected3, pruned3)); @@ -317,8 +320,7 @@ TEST(TableFactor, markdownWithValueFormatter) { "|Two|-|5|\n" "|Two|+|6|\n"; auto keyFormatter = [](Key key) { return key == 12 ? "A" : "B"; }; - TableFactor::Names names{{12, {"Zero", "One", "Two"}}, - {5, {"-", "+"}}}; + TableFactor::Names names{{12, {"Zero", "One", "Two"}}, {5, {"-", "+"}}}; string actual = f.markdown(keyFormatter, names); EXPECT(actual == expected); } @@ -345,8 +347,7 @@ TEST(TableFactor, htmlWithValueFormatter) { "\n" ""; auto keyFormatter = [](Key key) { return key == 12 ? "A" : "B"; }; - TableFactor::Names names{{12, {"Zero", "One", "Two"}}, - {5, {"-", "+"}}}; + TableFactor::Names names{{12, {"Zero", "One", "Two"}}, {5, {"-", "+"}}}; string actual = f.html(keyFormatter, names); EXPECT(actual == expected); } From 3eec88f60eaae0c1db1cecdab6d20d24a64c9030 Mon Sep 17 00:00:00 2001 From: Travis Driver Date: Fri, 2 Jun 2023 15:44:51 -0700 Subject: [PATCH 35/78] Added rank threshold to triangulateLOST --- gtsam/geometry/triangulation.cpp | 57 +++--- gtsam/geometry/triangulation.h | 305 +++++++++++++++---------------- 2 files changed, 173 insertions(+), 189 deletions(-) diff --git a/gtsam/geometry/triangulation.cpp b/gtsam/geometry/triangulation.cpp index 06fb8fafe..2049ce384 100644 --- a/gtsam/geometry/triangulation.cpp +++ b/gtsam/geometry/triangulation.cpp @@ -25,9 +25,9 @@ namespace gtsam { Vector4 triangulateHomogeneousDLT( - const std::vector>& - projection_matrices, - const Point2Vector& measurements, double rank_tol) { + const std::vector>& projection_matrices, + const Point2Vector& measurements, + double rank_tol) { // number of cameras size_t m = projection_matrices.size(); @@ -56,9 +56,9 @@ Vector4 triangulateHomogeneousDLT( } Vector4 triangulateHomogeneousDLT( - const std::vector>& - projection_matrices, - const std::vector& measurements, double rank_tol) { + const std::vector>& projection_matrices, + const std::vector& measurements, + double rank_tol) { // number of cameras size_t m = projection_matrices.size(); @@ -68,9 +68,7 @@ Vector4 triangulateHomogeneousDLT( for (size_t i = 0; i < m; i++) { size_t row = i * 2; const Matrix34& projection = projection_matrices.at(i); - const Point3& p = - measurements.at(i) - .point3(); // to get access to x,y,z of the bearing vector + const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector // build system of equations A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0); @@ -85,7 +83,8 @@ Vector4 triangulateHomogeneousDLT( Point3 triangulateLOST(const std::vector& poses, const Point3Vector& calibratedMeasurements, - const SharedIsotropic& measurementNoise) { + const SharedIsotropic& measurementNoise, + double rank_tol) { size_t m = calibratedMeasurements.size(); assert(m == poses.size()); @@ -105,36 +104,39 @@ Point3 triangulateLOST(const std::vector& poses, const Point3 wZj = wTj.rotation().rotate(calibratedMeasurements[j]); // Note: Setting q_i = 1.0 gives same results as DLT. - const double q_i = wZi.cross(wZj).norm() / - (measurementNoise->sigma() * d_ij.cross(wZj).norm()); + const double q_i = wZi.cross(wZj).norm() / (measurementNoise->sigma() * d_ij.cross(wZj).norm()); - const Matrix23 coefficientMat = - q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) * - wTi.rotation().matrix().transpose(); + const Matrix23 coefficientMat = q_i * + skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) * + wTi.rotation().matrix().transpose(); A.block<2, 3>(2 * i, 0) << coefficientMat; b.block<2, 1>(2 * i, 0) << coefficientMat * wTi.translation(); } - return A.colPivHouseholderQr().solve(b); + + Eigen::ColPivHouseholderQR A_Qr = A.colPivHouseholderQr(); + A_Qr.setThreshold(rank_tol); + + // if (A_Qr.rank() < 3) throw(TriangulationUnderconstrainedException()); + + return A_Qr.solve(b); } Point3 triangulateDLT( - const std::vector>& - projection_matrices, - const Point2Vector& measurements, double rank_tol) { - Vector4 v = - triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); + const std::vector>& projection_matrices, + const Point2Vector& measurements, + double rank_tol) { + Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); // Create 3D point from homogeneous coordinates return Point3(v.head<3>() / v[3]); } Point3 triangulateDLT( - const std::vector>& - projection_matrices, - const std::vector& measurements, double rank_tol) { + const std::vector>& projection_matrices, + const std::vector& measurements, + double rank_tol) { // contrary to previous triangulateDLT, this is now taking Unit3 inputs - Vector4 v = - triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); + Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); // Create 3D point from homogeneous coordinates return Point3(v.head<3>() / v[3]); } @@ -146,8 +148,7 @@ Point3 triangulateDLT( * @param landmarkKey to refer to landmark * @return refined Point3 */ -Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, - Key landmarkKey) { +Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, Key landmarkKey) { // Maybe we should consider Gauss-Newton? LevenbergMarquardtParams params; params.verbosityLM = LevenbergMarquardtParams::TRYLAMBDA; diff --git a/gtsam/geometry/triangulation.h b/gtsam/geometry/triangulation.h index 7e58cee2d..61ca5588a 100644 --- a/gtsam/geometry/triangulation.h +++ b/gtsam/geometry/triangulation.h @@ -20,39 +20,38 @@ #pragma once -#include "gtsam/geometry/Point3.h" #include +#include #include #include #include -#include #include #include -#include #include +#include #include #include #include +#include "gtsam/geometry/Point3.h" #include namespace gtsam { /// Exception thrown by triangulateDLT when SVD returns rank < 3 -class GTSAM_EXPORT TriangulationUnderconstrainedException: public std::runtime_error { -public: - TriangulationUnderconstrainedException() : - std::runtime_error("Triangulation Underconstrained Exception.") { - } +class GTSAM_EXPORT TriangulationUnderconstrainedException : public std::runtime_error { + public: + TriangulationUnderconstrainedException() + : std::runtime_error("Triangulation Underconstrained Exception.") {} }; /// Exception thrown by triangulateDLT when landmark is behind one or more of the cameras -class GTSAM_EXPORT TriangulationCheiralityException: public std::runtime_error { -public: - TriangulationCheiralityException() : - std::runtime_error( - "Triangulation Cheirality Exception: The resulting landmark is behind one or more cameras.") { - } +class GTSAM_EXPORT TriangulationCheiralityException : public std::runtime_error { + public: + TriangulationCheiralityException() + : std::runtime_error( + "Triangulation Cheirality Exception: The resulting landmark is behind one or more " + "cameras.") {} }; /** @@ -64,11 +63,13 @@ public: */ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( const std::vector>& projection_matrices, - const Point2Vector& measurements, double rank_tol = 1e-9); + const Point2Vector& measurements, + double rank_tol = 1e-9); /** * Same math as Hartley and Zisserman, 2nd Ed., page 312, but with unit-norm bearing vectors - * (contrarily to pinhole projection, the z entry is not assumed to be 1 as in Hartley and Zisserman) + * (contrarily to pinhole projection, the z entry is not assumed to be 1 as in Hartley and + * Zisserman) * @param projection_matrices Projection matrices (K*P^-1) * @param measurements Unit3 bearing measurements * @param rank_tol SVD rank tolerance @@ -76,7 +77,8 @@ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( */ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( const std::vector>& projection_matrices, - const std::vector& measurements, double rank_tol = 1e-9); + const std::vector& measurements, + double rank_tol = 1e-9); /** * DLT triangulation: See Hartley and Zisserman, 2nd Ed., page 312 @@ -85,18 +87,18 @@ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( * @param rank_tol SVD rank tolerance * @return Triangulated Point3 */ -GTSAM_EXPORT Point3 triangulateDLT( - const std::vector>& projection_matrices, - const Point2Vector& measurements, - double rank_tol = 1e-9); +GTSAM_EXPORT Point3 +triangulateDLT(const std::vector>& projection_matrices, + const Point2Vector& measurements, + double rank_tol = 1e-9); /** * overload of previous function to work with Unit3 (projected to canonical camera) */ -GTSAM_EXPORT Point3 triangulateDLT( - const std::vector>& projection_matrices, - const std::vector& measurements, - double rank_tol = 1e-9); +GTSAM_EXPORT Point3 +triangulateDLT(const std::vector>& projection_matrices, + const std::vector& measurements, + double rank_tol = 1e-9); /** * @brief Triangulation using the LOST (Linear Optimal Sine Triangulation) @@ -110,7 +112,8 @@ GTSAM_EXPORT Point3 triangulateDLT( */ GTSAM_EXPORT Point3 triangulateLOST(const std::vector& poses, const Point3Vector& calibratedMeasurements, - const SharedIsotropic& measurementNoise); + const SharedIsotropic& measurementNoise, + double rank_tol = 1e-9); /** * Create a factor graph with projection factors from poses and one calibration @@ -121,20 +124,22 @@ GTSAM_EXPORT Point3 triangulateLOST(const std::vector& poses, * @param initialEstimate * @return graph and initial values */ -template +template std::pair triangulationGraph( - const std::vector& poses, std::shared_ptr sharedCal, - const Point2Vector& measurements, Key landmarkKey, + const std::vector& poses, + std::shared_ptr sharedCal, + const Point2Vector& measurements, + Key landmarkKey, const Point3& initialEstimate, const SharedNoiseModel& model = noiseModel::Unit::Create(2)) { Values values; - values.insert(landmarkKey, initialEstimate); // Initial landmark value + values.insert(landmarkKey, initialEstimate); // Initial landmark value NonlinearFactorGraph graph; for (size_t i = 0; i < measurements.size(); i++) { const Pose3& pose_i = poses[i]; typedef PinholePose Camera; Camera camera_i(pose_i, sharedCal); - graph.emplace_shared > // + graph.emplace_shared> // (camera_i, measurements[i], model, landmarkKey); } return {graph, values}; @@ -149,21 +154,22 @@ std::pair triangulationGraph( * @param initialEstimate * @return graph and initial values */ -template +template std::pair triangulationGraph( const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measurements, Key landmarkKey, + const typename CAMERA::MeasurementVector& measurements, + Key landmarkKey, const Point3& initialEstimate, const SharedNoiseModel& model = nullptr) { Values values; - values.insert(landmarkKey, initialEstimate); // Initial landmark value + values.insert(landmarkKey, initialEstimate); // Initial landmark value NonlinearFactorGraph graph; - static SharedNoiseModel unit(noiseModel::Unit::Create( - traits::dimension)); + static SharedNoiseModel unit( + noiseModel::Unit::Create(traits::dimension)); for (size_t i = 0; i < measurements.size(); i++) { const CAMERA& camera_i = cameras[i]; - graph.emplace_shared > // - (camera_i, measurements[i], model? model : unit, landmarkKey); + graph.emplace_shared> // + (camera_i, measurements[i], model ? model : unit, landmarkKey); } return {graph, values}; } @@ -176,7 +182,8 @@ std::pair triangulationGraph( * @return refined Point3 */ GTSAM_EXPORT Point3 optimize(const NonlinearFactorGraph& graph, - const Values& values, Key landmarkKey); + const Values& values, + Key landmarkKey); /** * Given an initial estimate , refine a point using measurements in several cameras @@ -186,14 +193,14 @@ GTSAM_EXPORT Point3 optimize(const NonlinearFactorGraph& graph, * @param initialEstimate * @return refined Point3 */ -template +template Point3 triangulateNonlinear(const std::vector& poses, - std::shared_ptr sharedCal, - const Point2Vector& measurements, const Point3& initialEstimate, - const SharedNoiseModel& model = nullptr) { - + std::shared_ptr sharedCal, + const Point2Vector& measurements, + const Point3& initialEstimate, + const SharedNoiseModel& model = nullptr) { // Create a factor graph and initial values - const auto [graph, values] = triangulationGraph // + const auto [graph, values] = triangulationGraph // (poses, sharedCal, measurements, Symbol('p', 0), initialEstimate, model); return optimize(graph, values, Symbol('p', 0)); @@ -206,33 +213,32 @@ Point3 triangulateNonlinear(const std::vector& poses, * @param initialEstimate * @return refined Point3 */ -template -Point3 triangulateNonlinear( - const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measurements, const Point3& initialEstimate, - const SharedNoiseModel& model = nullptr) { - +template +Point3 triangulateNonlinear(const CameraSet& cameras, + const typename CAMERA::MeasurementVector& measurements, + const Point3& initialEstimate, + const SharedNoiseModel& model = nullptr) { // Create a factor graph and initial values - const auto [graph, values] = triangulationGraph // + const auto [graph, values] = triangulationGraph // (cameras, measurements, Symbol('p', 0), initialEstimate, model); return optimize(graph, values, Symbol('p', 0)); } -template -std::vector> -projectionMatricesFromCameras(const CameraSet &cameras) { +template +std::vector> projectionMatricesFromCameras( + const CameraSet& cameras) { std::vector> projection_matrices; - for (const CAMERA &camera: cameras) { + for (const CAMERA& camera : cameras) { projection_matrices.push_back(camera.cameraProjectionMatrix()); } return projection_matrices; } // overload, assuming pinholePose -template +template std::vector> projectionMatricesFromPoses( - const std::vector &poses, std::shared_ptr sharedCal) { + const std::vector& poses, std::shared_ptr sharedCal) { std::vector> projection_matrices; for (size_t i = 0; i < poses.size(); i++) { PinholePose camera(poses.at(i), sharedCal); @@ -257,9 +263,9 @@ Cal3_S2 createPinholeCalibration(const CALIBRATION& cal) { /** Internal undistortMeasurement to be used by undistortMeasurement and * undistortMeasurements */ template -MEASUREMENT undistortMeasurementInternal( - const CALIBRATION& cal, const MEASUREMENT& measurement, - std::optional pinholeCal = {}) { +MEASUREMENT undistortMeasurementInternal(const CALIBRATION& cal, + const MEASUREMENT& measurement, + std::optional pinholeCal = {}) { if (!pinholeCal) { pinholeCal = createPinholeCalibration(cal); } @@ -278,13 +284,13 @@ MEASUREMENT undistortMeasurementInternal( * @return measurements with the effect of the distortion of sharedCal removed. */ template -Point2Vector undistortMeasurements(const CALIBRATION& cal, - const Point2Vector& measurements) { +Point2Vector undistortMeasurements(const CALIBRATION& cal, const Point2Vector& measurements) { Cal3_S2 pinholeCalibration = createPinholeCalibration(cal); Point2Vector undistortedMeasurements; // Calibrate with cal and uncalibrate with pinhole version of cal so that // measurements are undistorted. - std::transform(measurements.begin(), measurements.end(), + std::transform(measurements.begin(), + measurements.end(), std::back_inserter(undistortedMeasurements), [&cal, &pinholeCalibration](const Point2& measurement) { return undistortMeasurementInternal( @@ -295,8 +301,7 @@ Point2Vector undistortMeasurements(const CALIBRATION& cal, /** Specialization for Cal3_S2 as it doesn't need to be undistorted. */ template <> -inline Point2Vector undistortMeasurements(const Cal3_S2& cal, - const Point2Vector& measurements) { +inline Point2Vector undistortMeasurements(const Cal3_S2& cal, const Point2Vector& measurements) { return measurements; } @@ -313,17 +318,15 @@ inline Point2Vector undistortMeasurements(const Cal3_S2& cal, */ template typename CAMERA::MeasurementVector undistortMeasurements( - const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measurements) { + const CameraSet& cameras, const typename CAMERA::MeasurementVector& measurements) { const size_t nrMeasurements = measurements.size(); assert(nrMeasurements == cameras.size()); typename CAMERA::MeasurementVector undistortedMeasurements(nrMeasurements); for (size_t ii = 0; ii < nrMeasurements; ++ii) { // Calibrate with cal and uncalibrate with pinhole version of cal so that // measurements are undistorted. - undistortedMeasurements[ii] = - undistortMeasurementInternal( - cameras[ii].calibration(), measurements[ii]); + undistortedMeasurements[ii] = undistortMeasurementInternal( + cameras[ii].calibration(), measurements[ii]); } return undistortedMeasurements; } @@ -353,12 +356,13 @@ inline SphericalCamera::MeasurementVector undistortMeasurements( * @return homogeneous measurements in image plane */ template -inline Point3Vector calibrateMeasurementsShared( - const CALIBRATION& cal, const Point2Vector& measurements) { +inline Point3Vector calibrateMeasurementsShared(const CALIBRATION& cal, + const Point2Vector& measurements) { Point3Vector calibratedMeasurements; // Calibrate with cal and uncalibrate with pinhole version of cal so that // measurements are undistorted. - std::transform(measurements.begin(), measurements.end(), + std::transform(measurements.begin(), + measurements.end(), std::back_inserter(calibratedMeasurements), [&cal](const Point2& measurement) { Point3 p; @@ -377,25 +381,21 @@ inline Point3Vector calibrateMeasurementsShared( * @return homogeneous measurements in image plane */ template -inline Point3Vector calibrateMeasurements( - const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measurements) { +inline Point3Vector calibrateMeasurements(const CameraSet& cameras, + const typename CAMERA::MeasurementVector& measurements) { const size_t nrMeasurements = measurements.size(); assert(nrMeasurements == cameras.size()); Point3Vector calibratedMeasurements(nrMeasurements); for (size_t ii = 0; ii < nrMeasurements; ++ii) { - calibratedMeasurements[ii] - << cameras[ii].calibration().calibrate(measurements[ii]), - 1.0; + calibratedMeasurements[ii] << cameras[ii].calibration().calibrate(measurements[ii]), 1.0; } return calibratedMeasurements; } /** Specialize for SphericalCamera to do nothing. */ template -inline Point3Vector calibrateMeasurements( - const CameraSet& cameras, - const SphericalCamera::MeasurementVector& measurements) { +inline Point3Vector calibrateMeasurements(const CameraSet& cameras, + const SphericalCamera::MeasurementVector& measurements) { Point3Vector calibratedMeasurements(measurements.size()); for (size_t ii = 0; ii < measurements.size(); ++ii) { calibratedMeasurements[ii] << measurements[ii].point3(); @@ -420,7 +420,8 @@ template Point3 triangulatePoint3(const std::vector& poses, std::shared_ptr sharedCal, const Point2Vector& measurements, - double rank_tol = 1e-9, bool optimize = false, + double rank_tol = 1e-9, + bool optimize = false, const SharedNoiseModel& model = nullptr, const bool useLOST = false) { assert(poses.size() == measurements.size()); @@ -432,24 +433,21 @@ Point3 triangulatePoint3(const std::vector& poses, // Reduce input noise model to an isotropic noise model using the mean of // the diagonal. const double measurementSigma = model ? model->sigmas().mean() : 1e-4; - SharedIsotropic measurementNoise = - noiseModel::Isotropic::Sigma(2, measurementSigma); + SharedIsotropic measurementNoise = noiseModel::Isotropic::Sigma(2, measurementSigma); // calibrate the measurements to obtain homogenous coordinates in image // plane. auto calibratedMeasurements = calibrateMeasurementsShared(*sharedCal, measurements); - point = triangulateLOST(poses, calibratedMeasurements, measurementNoise); + point = triangulateLOST(poses, calibratedMeasurements, measurementNoise, rank_tol); } else { // construct projection matrices from poses & calibration auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal); // Undistort the measurements, leaving only the pinhole elements in effect. - auto undistortedMeasurements = - undistortMeasurements(*sharedCal, measurements); + auto undistortedMeasurements = undistortMeasurements(*sharedCal, measurements); - point = - triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + point = triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); } // Then refine using non-linear optimization @@ -485,7 +483,8 @@ Point3 triangulatePoint3(const std::vector& poses, template Point3 triangulatePoint3(const CameraSet& cameras, const typename CAMERA::MeasurementVector& measurements, - double rank_tol = 1e-9, bool optimize = false, + double rank_tol = 1e-9, + bool optimize = false, const SharedNoiseModel& model = nullptr, const bool useLOST = false) { size_t m = cameras.size(); @@ -499,8 +498,7 @@ Point3 triangulatePoint3(const CameraSet& cameras, // Reduce input noise model to an isotropic noise model using the mean of // the diagonal. const double measurementSigma = model ? model->sigmas().mean() : 1e-4; - SharedIsotropic measurementNoise = - noiseModel::Isotropic::Sigma(2, measurementSigma); + SharedIsotropic measurementNoise = noiseModel::Isotropic::Sigma(2, measurementSigma); // construct poses from cameras. std::vector poses; @@ -509,20 +507,17 @@ Point3 triangulatePoint3(const CameraSet& cameras, // calibrate the measurements to obtain homogenous coordinates in image // plane. - auto calibratedMeasurements = - calibrateMeasurements(cameras, measurements); + auto calibratedMeasurements = calibrateMeasurements(cameras, measurements); - point = triangulateLOST(poses, calibratedMeasurements, measurementNoise); + point = triangulateLOST(poses, calibratedMeasurements, measurementNoise, rank_tol); } else { // construct projection matrices from poses & calibration auto projection_matrices = projectionMatricesFromCameras(cameras); // Undistort the measurements, leaving only the pinhole elements in effect. - auto undistortedMeasurements = - undistortMeasurements(cameras, measurements); + auto undistortedMeasurements = undistortMeasurements(cameras, measurements); - point = - triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + point = triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); } // Then refine using non-linear optimization @@ -545,7 +540,8 @@ Point3 triangulatePoint3(const CameraSet& cameras, template Point3 triangulatePoint3(const CameraSet>& cameras, const Point2Vector& measurements, - double rank_tol = 1e-9, bool optimize = false, + double rank_tol = 1e-9, + bool optimize = false, const SharedNoiseModel& model = nullptr, const bool useLOST = false) { return triangulatePoint3> // @@ -553,16 +549,16 @@ Point3 triangulatePoint3(const CameraSet>& cameras, } struct GTSAM_EXPORT TriangulationParameters { - - double rankTolerance; ///< threshold to decide whether triangulation is result.degenerate - ///< (the rank is the number of singular values of the triangulation matrix which are larger than rankTolerance) - bool enableEPI; ///< if set to true, will refine triangulation using LM + double rankTolerance; ///< threshold to decide whether triangulation is result.degenerate + ///< (the rank is the number of singular values of the triangulation matrix which are larger than + ///< rankTolerance) + bool enableEPI; ///< if set to true, will refine triangulation using LM /** * if the landmark is triangulated at distance larger than this, * result is flagged as degenerate. */ - double landmarkDistanceThreshold; // + double landmarkDistanceThreshold; // /** * If this is nonnegative the we will check if the average reprojection error @@ -571,7 +567,7 @@ struct GTSAM_EXPORT TriangulationParameters { */ double dynamicOutlierRejectionThreshold; - SharedNoiseModel noiseModel; ///< used in the nonlinear triangulation + SharedNoiseModel noiseModel; ///< used in the nonlinear triangulation /** * Constructor @@ -583,39 +579,36 @@ struct GTSAM_EXPORT TriangulationParameters { * */ TriangulationParameters(const double _rankTolerance = 1.0, - const bool _enableEPI = false, double _landmarkDistanceThreshold = -1, - double _dynamicOutlierRejectionThreshold = -1, - const SharedNoiseModel& _noiseModel = nullptr) : - rankTolerance(_rankTolerance), enableEPI(_enableEPI), // - landmarkDistanceThreshold(_landmarkDistanceThreshold), // - dynamicOutlierRejectionThreshold(_dynamicOutlierRejectionThreshold), - noiseModel(_noiseModel){ - } + const bool _enableEPI = false, + double _landmarkDistanceThreshold = -1, + double _dynamicOutlierRejectionThreshold = -1, + const SharedNoiseModel& _noiseModel = nullptr) + : rankTolerance(_rankTolerance), + enableEPI(_enableEPI), // + landmarkDistanceThreshold(_landmarkDistanceThreshold), // + dynamicOutlierRejectionThreshold(_dynamicOutlierRejectionThreshold), + noiseModel(_noiseModel) {} // stream to output - friend std::ostream &operator<<(std::ostream &os, - const TriangulationParameters& p) { + friend std::ostream& operator<<(std::ostream& os, const TriangulationParameters& p) { os << "rankTolerance = " << p.rankTolerance << std::endl; os << "enableEPI = " << p.enableEPI << std::endl; - os << "landmarkDistanceThreshold = " << p.landmarkDistanceThreshold - << std::endl; - os << "dynamicOutlierRejectionThreshold = " - << p.dynamicOutlierRejectionThreshold << std::endl; + os << "landmarkDistanceThreshold = " << p.landmarkDistanceThreshold << std::endl; + os << "dynamicOutlierRejectionThreshold = " << p.dynamicOutlierRejectionThreshold << std::endl; os << "noise model" << std::endl; return os; } -private: - + private: #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /// /// Serialization function friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int version) { - ar & BOOST_SERIALIZATION_NVP(rankTolerance); - ar & BOOST_SERIALIZATION_NVP(enableEPI); - ar & BOOST_SERIALIZATION_NVP(landmarkDistanceThreshold); - ar & BOOST_SERIALIZATION_NVP(dynamicOutlierRejectionThreshold); + template + void serialize(ARCHIVE& ar, const unsigned int version) { + ar& BOOST_SERIALIZATION_NVP(rankTolerance); + ar& BOOST_SERIALIZATION_NVP(enableEPI); + ar& BOOST_SERIALIZATION_NVP(landmarkDistanceThreshold); + ar& BOOST_SERIALIZATION_NVP(dynamicOutlierRejectionThreshold); } #endif }; @@ -642,16 +635,10 @@ class TriangulationResult : public std::optional { * Constructor */ TriangulationResult(const Point3& p) : status(VALID) { emplace(p); } - static TriangulationResult Degenerate() { - return TriangulationResult(DEGENERATE); - } + static TriangulationResult Degenerate() { return TriangulationResult(DEGENERATE); } static TriangulationResult Outlier() { return TriangulationResult(OUTLIER); } - static TriangulationResult FarPoint() { - return TriangulationResult(FAR_POINT); - } - static TriangulationResult BehindCamera() { - return TriangulationResult(BEHIND_CAMERA); - } + static TriangulationResult FarPoint() { return TriangulationResult(FAR_POINT); } + static TriangulationResult BehindCamera() { return TriangulationResult(BEHIND_CAMERA); } bool valid() const { return status == VALID; } bool degenerate() const { return status == DEGENERATE; } bool outlier() const { return status == OUTLIER; } @@ -662,8 +649,7 @@ class TriangulationResult : public std::optional { return value(); } // stream to output - friend std::ostream& operator<<(std::ostream& os, - const TriangulationResult& result) { + friend std::ostream& operator<<(std::ostream& os, const TriangulationResult& result) { if (result) os << "point = " << *result << std::endl; else @@ -683,11 +669,10 @@ class TriangulationResult : public std::optional { }; /// triangulateSafe: extensive checking of the outcome -template +template TriangulationResult triangulateSafe(const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measured, - const TriangulationParameters& params) { - + const typename CAMERA::MeasurementVector& measured, + const TriangulationParameters& params) { size_t m = cameras.size(); // if we have a single pose the corresponding factor is uninformative @@ -696,25 +681,22 @@ TriangulationResult triangulateSafe(const CameraSet& cameras, else // We triangulate the 3D position of the landmark try { - Point3 point = - triangulatePoint3(cameras, measured, params.rankTolerance, - params.enableEPI, params.noiseModel); + Point3 point = triangulatePoint3( + cameras, measured, params.rankTolerance, params.enableEPI, params.noiseModel); // Check landmark distance and re-projection errors to avoid outliers size_t i = 0; double maxReprojError = 0.0; - for(const CAMERA& camera: cameras) { + for (const CAMERA& camera : cameras) { const Pose3& pose = camera.pose(); - if (params.landmarkDistanceThreshold > 0 - && distance3(pose.translation(), point) - > params.landmarkDistanceThreshold) + if (params.landmarkDistanceThreshold > 0 && + distance3(pose.translation(), point) > params.landmarkDistanceThreshold) return TriangulationResult::FarPoint(); #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION // verify that the triangulated point lies in front of all cameras // Only needed if this was not yet handled by exception const Point3& p_local = pose.transformTo(point); - if (p_local.z() <= 0) - return TriangulationResult::BehindCamera(); + if (p_local.z() <= 0) return TriangulationResult::BehindCamera(); #endif // Check reprojection error if (params.dynamicOutlierRejectionThreshold > 0) { @@ -725,19 +707,21 @@ TriangulationResult triangulateSafe(const CameraSet& cameras, i += 1; } // Flag as degenerate if average reprojection error is too large - if (params.dynamicOutlierRejectionThreshold > 0 - && maxReprojError > params.dynamicOutlierRejectionThreshold) + if (params.dynamicOutlierRejectionThreshold > 0 && + maxReprojError > params.dynamicOutlierRejectionThreshold) return TriangulationResult::Outlier(); // all good! return TriangulationResult(point); } catch (TriangulationUnderconstrainedException&) { // This exception is thrown if - // 1) There is a single pose for triangulation - this should not happen because we checked the number of poses before - // 2) The rank of the matrix used for triangulation is < 3: rotation-only, parallel cameras (or motion towards the landmark) + // 1) There is a single pose for triangulation - this should not happen because we checked the + // number of poses before 2) The rank of the matrix used for triangulation is < 3: + // rotation-only, parallel cameras (or motion towards the landmark) return TriangulationResult::Degenerate(); } catch (TriangulationCheiralityException&) { - // point is behind one of the cameras: can be the case of close-to-parallel cameras or may depend on outliers + // point is behind one of the cameras: can be the case of close-to-parallel cameras or may + // depend on outliers return TriangulationResult::BehindCamera(); } } @@ -749,5 +733,4 @@ using CameraSetCal3DS2 = CameraSet>; using CameraSetCal3Fisheye = CameraSet>; using CameraSetCal3Unified = CameraSet>; using CameraSetSpherical = CameraSet; -} // \namespace gtsam - +} // namespace gtsam From b1bce79e957fce2e322fc64696c190443a91457a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 5 Jun 2023 13:29:21 -0400 Subject: [PATCH 36/78] support Apple silicon --- wrap/cmake/MatlabWrap.cmake | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/wrap/cmake/MatlabWrap.cmake b/wrap/cmake/MatlabWrap.cmake index c45d8c050..55b7cdb99 100644 --- a/wrap/cmake/MatlabWrap.cmake +++ b/wrap/cmake/MatlabWrap.cmake @@ -105,7 +105,12 @@ function(wrap_library_internal interfaceHeader moduleName linkLibraries extraInc set(mexModuleExt mexglx) endif() elseif(APPLE) - set(mexModuleExt mexmaci64) + check_cxx_compiler_flag("-arch arm64" arm64Supported) + if (arm64Supported) + set(mexModuleExt mexmaca64) + else() + set(mexModuleExt mexmaci64) + endif() elseif(MSVC) if(CMAKE_CL_64) set(mexModuleExt mexw64) @@ -299,7 +304,12 @@ function(wrap_library_internal interfaceHeader moduleName linkLibraries extraInc APPEND PROPERTY COMPILE_FLAGS "/bigobj") elseif(APPLE) - set(mxLibPath "${MATLAB_ROOT}/bin/maci64") + check_cxx_compiler_flag("-arch arm64" arm64Supported) + if (arm64Supported) + set(mxLibPath "${MATLAB_ROOT}/bin/maca64") + else() + set(mxLibPath "${MATLAB_ROOT}/bin/maci64") + endif() target_link_libraries( ${moduleName}_matlab_wrapper "${mxLibPath}/libmex.dylib" "${mxLibPath}/libmx.dylib" "${mxLibPath}/libmat.dylib") @@ -367,7 +377,12 @@ function(check_conflicting_libraries_internal libraries) if(UNIX) # Set path for matlab's built-in libraries if(APPLE) - set(mxLibPath "${MATLAB_ROOT}/bin/maci64") + check_cxx_compiler_flag("-arch arm64" arm64Supported) + if (arm64Supported) + set(mxLibPath "${MATLAB_ROOT}/bin/maca64") + else() + set(mxLibPath "${MATLAB_ROOT}/bin/maci64") + endif() else() if(CMAKE_CL_64) set(mxLibPath "${MATLAB_ROOT}/bin/glnxa64") From 2d48dd06081cdb7c4f0882b6e1b5f166a9ee71da Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 5 Jun 2023 15:08:06 -0400 Subject: [PATCH 37/78] memory sanitizer flag in CMake --- cmake/HandleGeneralOptions.cmake | 3 ++- cmake/HandleGlobalBuildFlags.cmake | 7 +++++++ cmake/HandlePrintConfiguration.cmake | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/cmake/HandleGeneralOptions.cmake b/cmake/HandleGeneralOptions.cmake index 4a4f1a36e..9ebb07331 100644 --- a/cmake/HandleGeneralOptions.cmake +++ b/cmake/HandleGeneralOptions.cmake @@ -19,7 +19,8 @@ option(GTSAM_FORCE_STATIC_LIB "Force gtsam to be a static library, option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) -option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) +option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) +option(GTSAM_ENABLE_MEMORY_SANITIZER "Enable/Disable memory sanitizer" OFF) option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) diff --git a/cmake/HandleGlobalBuildFlags.cmake b/cmake/HandleGlobalBuildFlags.cmake index cb48f875b..eba6645d7 100644 --- a/cmake/HandleGlobalBuildFlags.cmake +++ b/cmake/HandleGlobalBuildFlags.cmake @@ -50,3 +50,10 @@ if(GTSAM_ENABLE_CONSISTENCY_CHECKS) # This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS) endif() + +if(GTSAM_ENABLE_MEMORY_SANITIZER) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address -fsanitize=leak -g") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fsanitize=leak -g") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address -fsanitize=leak") + set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fsanitize=address -fsanitize=leak") +endif() diff --git a/cmake/HandlePrintConfiguration.cmake b/cmake/HandlePrintConfiguration.cmake index b17d522d9..c5c3920cb 100644 --- a/cmake/HandlePrintConfiguration.cmake +++ b/cmake/HandlePrintConfiguration.cmake @@ -87,6 +87,7 @@ print_config("CPack Generator" "${CPACK_GENERATOR}") message(STATUS "GTSAM flags ") print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") +print_enabled_config(${GTSAM_ENABLE_MEMORY_SANITIZER} "Build with Memory Sanitizer ") print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V43} "Allow features deprecated in GTSAM 4.3") From 82480fe2383be600069911523e954da6a10f3912 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 5 Jun 2023 15:39:01 -0400 Subject: [PATCH 38/78] added more tests for basis factors --- gtsam/basis/tests/testBasisFactors.cpp | 2 + gtsam/basis/tests/testChebyshev2.cpp | 55 ++++++++++++++++++++++---- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/gtsam/basis/tests/testBasisFactors.cpp b/gtsam/basis/tests/testBasisFactors.cpp index 18a389da5..703830b8d 100644 --- a/gtsam/basis/tests/testBasisFactors.cpp +++ b/gtsam/basis/tests/testBasisFactors.cpp @@ -170,6 +170,8 @@ TEST(BasisFactors, ManifoldEvaluationFactor) { LevenbergMarquardtOptimizer(graph, initial, parameters).optimize(); EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9); + // Check Jacobians + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, initial, 1e-7, 1e-5); } //****************************************************************************** diff --git a/gtsam/basis/tests/testChebyshev2.cpp b/gtsam/basis/tests/testChebyshev2.cpp index 56a79a38a..e2f01e8af 100644 --- a/gtsam/basis/tests/testChebyshev2.cpp +++ b/gtsam/basis/tests/testChebyshev2.cpp @@ -17,14 +17,15 @@ * methods */ -#include +#include +#include #include #include #include +#include #include -#include -#include +#include #include using namespace std; @@ -119,8 +120,9 @@ TEST(Chebyshev2, InterpolateVector) { EXPECT(assert_equal(expected, fx(X, actualH), 1e-9)); // Check derivative - std::function)> f = std::bind( - &Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, std::placeholders::_1, nullptr); + std::function)> f = + std::bind(&Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, + std::placeholders::_1, nullptr); Matrix numericalH = numericalDerivative11, 2 * N>(f, X); EXPECT(assert_equal(numericalH, actualH, 1e-9)); @@ -138,10 +140,49 @@ TEST(Chebyshev2, InterpolatePose2) { Vector xi(3); xi << t, 0, 0.1; + Eigen::Matrix actualH(3, 3 * N); + Chebyshev2::ManifoldEvaluationFunctor fx(N, t, a, b); // We use xi as canonical coordinates via exponential map Pose2 expected = Pose2::ChartAtOrigin::Retract(xi); - EXPECT(assert_equal(expected, fx(X))); + EXPECT(assert_equal(expected, fx(X, actualH))); + + // Check derivative + std::function)> f = + std::bind(&Chebyshev2::ManifoldEvaluationFunctor::operator(), fx, + std::placeholders::_1, nullptr); + Matrix numericalH = + numericalDerivative11, 3 * N>(f, X); + EXPECT(assert_equal(numericalH, actualH, 1e-9)); +} + +//****************************************************************************** +// Interpolating poses using the exponential map +TEST(Chebyshev2, InterpolatePose3) { + double a = 10, b = 100; + double t = Chebyshev2::Points(N, a, b)(11); + + Rot3 R = Rot3::Ypr(-2.21366492e-05, -9.35353636e-03, -5.90463598e-04); + Pose3 pose(R, Point3(1, 2, 3)); + + Vector6 xi = Pose3::ChartAtOrigin::Local(pose); + Eigen::Matrix actualH(6, 6 * N); + + ParameterMatrix<6> X(N); + X.col(11) = xi; + + Chebyshev2::ManifoldEvaluationFunctor fx(N, t, a, b); + // We use xi as canonical coordinates via exponential map + Pose3 expected = Pose3::ChartAtOrigin::Retract(xi); + EXPECT(assert_equal(expected, fx(X, actualH))); + + // Check derivative + std::function)> f = + std::bind(&Chebyshev2::ManifoldEvaluationFunctor::operator(), fx, + std::placeholders::_1, nullptr); + Matrix numericalH = + numericalDerivative11, 6 * N>(f, X); + EXPECT(assert_equal(numericalH, actualH, 1e-8)); } //****************************************************************************** @@ -149,7 +190,7 @@ TEST(Chebyshev2, Decomposition) { // Create example sequence Sequence sequence; for (size_t i = 0; i < 16; i++) { - double x = (1.0/ 16)*i - 0.99, y = x; + double x = (1.0 / 16) * i - 0.99, y = x; sequence[x] = y; } From 57578a4793daecb248f3d0118ed8801c130cc70e Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 5 Jun 2023 15:42:24 -0400 Subject: [PATCH 39/78] Wrap ManifoldEvaluationFactor for both Rot3 and Pose3 --- gtsam/basis/basis.i | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/gtsam/basis/basis.i b/gtsam/basis/basis.i index 0cd87ba65..5ff4e777c 100644 --- a/gtsam/basis/basis.i +++ b/gtsam/basis/basis.i @@ -121,6 +121,13 @@ virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor { double x, double a, double b); }; +#include + +typedef gtsam::ManifoldEvaluationFactor + ManifoldEvaluationFactorChebyshev2Rot3; +typedef gtsam::ManifoldEvaluationFactor + ManifoldEvaluationFactorChebyshev2Pose3; + // TODO(gerry): Add `DerivativeFactor`, `VectorDerivativeFactor`, and // `ComponentDerivativeFactor` From 87687cee9fad4b5940d19533b5d981e7d55f2cb7 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 5 Jun 2023 15:47:02 -0400 Subject: [PATCH 40/78] wrap another Colamd ordering method --- gtsam/inference/inference.i | 1 + 1 file changed, 1 insertion(+) diff --git a/gtsam/inference/inference.i b/gtsam/inference/inference.i index d4c90e356..953b1da84 100644 --- a/gtsam/inference/inference.i +++ b/gtsam/inference/inference.i @@ -109,6 +109,7 @@ class Ordering { FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph, gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph, gtsam::HybridGaussianFactorGraph}> static gtsam::Ordering Colamd(const FACTOR_GRAPH& graph); + static gtsam::Ordering Colamd(const gtsam::VariableIndex& variableIndex); template < FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph, From fcee29e62898d15bf64d93fafbb35f6735d997d1 Mon Sep 17 00:00:00 2001 From: Travis Driver Date: Mon, 5 Jun 2023 19:11:41 -0700 Subject: [PATCH 41/78] Handle q_i = 0 (or NaN) for LOST --- gtsam/geometry/triangulation.cpp | 33 ++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/gtsam/geometry/triangulation.cpp b/gtsam/geometry/triangulation.cpp index 2049ce384..d7be29a9f 100644 --- a/gtsam/geometry/triangulation.cpp +++ b/gtsam/geometry/triangulation.cpp @@ -95,16 +95,37 @@ Point3 triangulateLOST(const std::vector& poses, for (size_t i = 0; i < m; i++) { const Pose3& wTi = poses[i]; // TODO(akshay-krishnan): are there better ways to select j? - const int j = (i + 1) % m; + int j = (i + 1) % m; const Pose3& wTj = poses[j]; - const Point3 d_ij = wTj.translation() - wTi.translation(); + Point3 d_ij = wTj.translation() - wTi.translation(); + Point3 wZi = wTi.rotation().rotate(calibratedMeasurements[i]); + Point3 wZj = wTj.rotation().rotate(calibratedMeasurements[j]); + double num_i = wZi.cross(wZj).norm(); + double den_i = d_ij.cross(wZj).norm(); - const Point3 wZi = wTi.rotation().rotate(calibratedMeasurements[i]); - const Point3 wZj = wTj.rotation().rotate(calibratedMeasurements[j]); + // Handle q_i = 0 (or NaN), which arises if the measurement vectors, wZi and wZj, coincide + // (or the baseline vector coincides with the jth measurement vector). + if (num_i == 0 || den_i == 0) { + bool success = false; + for (size_t k = 2; k < m; k++) { + j = (i + k) % m; + const Pose3& wTj = poses[j]; + + d_ij = wTj.translation() - wTi.translation(); + wZj = wTj.rotation().rotate(calibratedMeasurements[j]); + num_i = wZi.cross(wZj).norm(); + den_i = d_ij.cross(wZj).norm(); + if (num_i > 0 && den_i > 0) { + success = true; + break; + } + } + if (!success) throw(TriangulationUnderconstrainedException()); + } // Note: Setting q_i = 1.0 gives same results as DLT. - const double q_i = wZi.cross(wZj).norm() / (measurementNoise->sigma() * d_ij.cross(wZj).norm()); + const double q_i = num_i / (measurementNoise->sigma() * den_i); const Matrix23 coefficientMat = q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) * @@ -117,7 +138,7 @@ Point3 triangulateLOST(const std::vector& poses, Eigen::ColPivHouseholderQR A_Qr = A.colPivHouseholderQr(); A_Qr.setThreshold(rank_tol); - // if (A_Qr.rank() < 3) throw(TriangulationUnderconstrainedException()); + if (A_Qr.rank() < 3) throw(TriangulationUnderconstrainedException()); return A_Qr.solve(b); } From f6f91ce231eef1855085444031470856bd42e1af Mon Sep 17 00:00:00 2001 From: Travis Driver Date: Tue, 6 Jun 2023 09:38:19 -0700 Subject: [PATCH 42/78] Revert formatting for triangulation.cpp --- gtsam/geometry/triangulation.cpp | 48 ++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/gtsam/geometry/triangulation.cpp b/gtsam/geometry/triangulation.cpp index d7be29a9f..e714bb432 100644 --- a/gtsam/geometry/triangulation.cpp +++ b/gtsam/geometry/triangulation.cpp @@ -25,9 +25,9 @@ namespace gtsam { Vector4 triangulateHomogeneousDLT( - const std::vector>& projection_matrices, - const Point2Vector& measurements, - double rank_tol) { + const std::vector>& + projection_matrices, + const Point2Vector& measurements, double rank_tol) { // number of cameras size_t m = projection_matrices.size(); @@ -56,9 +56,9 @@ Vector4 triangulateHomogeneousDLT( } Vector4 triangulateHomogeneousDLT( - const std::vector>& projection_matrices, - const std::vector& measurements, - double rank_tol) { + const std::vector>& + projection_matrices, + const std::vector& measurements, double rank_tol) { // number of cameras size_t m = projection_matrices.size(); @@ -68,7 +68,9 @@ Vector4 triangulateHomogeneousDLT( for (size_t i = 0; i < m; i++) { size_t row = i * 2; const Matrix34& projection = projection_matrices.at(i); - const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector + const Point3& p = + measurements.at(i) + .point3(); // to get access to x,y,z of the bearing vector // build system of equations A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0); @@ -104,8 +106,9 @@ Point3 triangulateLOST(const std::vector& poses, double num_i = wZi.cross(wZj).norm(); double den_i = d_ij.cross(wZj).norm(); - // Handle q_i = 0 (or NaN), which arises if the measurement vectors, wZi and wZj, coincide - // (or the baseline vector coincides with the jth measurement vector). + // Handle q_i = 0 (or NaN), which arises if the measurement vectors, wZi and + // wZj, coincide (or the baseline vector coincides with the jth measurement + // vector). if (num_i == 0 || den_i == 0) { bool success = false; for (size_t k = 2; k < m; k++) { @@ -127,9 +130,9 @@ Point3 triangulateLOST(const std::vector& poses, // Note: Setting q_i = 1.0 gives same results as DLT. const double q_i = num_i / (measurementNoise->sigma() * den_i); - const Matrix23 coefficientMat = q_i * - skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) * - wTi.rotation().matrix().transpose(); + const Matrix23 coefficientMat = + q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) * + wTi.rotation().matrix().transpose(); A.block<2, 3>(2 * i, 0) << coefficientMat; b.block<2, 1>(2 * i, 0) << coefficientMat * wTi.translation(); @@ -144,20 +147,22 @@ Point3 triangulateLOST(const std::vector& poses, } Point3 triangulateDLT( - const std::vector>& projection_matrices, - const Point2Vector& measurements, - double rank_tol) { - Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); + const std::vector>& + projection_matrices, + const Point2Vector& measurements, double rank_tol) { + Vector4 v = + triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); // Create 3D point from homogeneous coordinates return Point3(v.head<3>() / v[3]); } Point3 triangulateDLT( - const std::vector>& projection_matrices, - const std::vector& measurements, - double rank_tol) { + const std::vector>& + projection_matrices, + const std::vector& measurements, double rank_tol) { // contrary to previous triangulateDLT, this is now taking Unit3 inputs - Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); + Vector4 v = + triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); // Create 3D point from homogeneous coordinates return Point3(v.head<3>() / v[3]); } @@ -169,7 +174,8 @@ Point3 triangulateDLT( * @param landmarkKey to refer to landmark * @return refined Point3 */ -Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, Key landmarkKey) { +Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, + Key landmarkKey) { // Maybe we should consider Gauss-Newton? LevenbergMarquardtParams params; params.verbosityLM = LevenbergMarquardtParams::TRYLAMBDA; From 8d134fd120525562dead2f9e9cbe3e93bc7e4395 Mon Sep 17 00:00:00 2001 From: Travis Driver Date: Tue, 6 Jun 2023 09:48:05 -0700 Subject: [PATCH 43/78] Revert formatting for triangulation.h --- gtsam/geometry/triangulation.h | 303 ++++++++++++++++++--------------- 1 file changed, 161 insertions(+), 142 deletions(-) diff --git a/gtsam/geometry/triangulation.h b/gtsam/geometry/triangulation.h index 61ca5588a..3a8398804 100644 --- a/gtsam/geometry/triangulation.h +++ b/gtsam/geometry/triangulation.h @@ -20,38 +20,39 @@ #pragma once +#include "gtsam/geometry/Point3.h" #include -#include #include #include #include +#include #include #include -#include #include +#include #include #include #include -#include "gtsam/geometry/Point3.h" #include namespace gtsam { /// Exception thrown by triangulateDLT when SVD returns rank < 3 -class GTSAM_EXPORT TriangulationUnderconstrainedException : public std::runtime_error { - public: - TriangulationUnderconstrainedException() - : std::runtime_error("Triangulation Underconstrained Exception.") {} +class GTSAM_EXPORT TriangulationUnderconstrainedException: public std::runtime_error { +public: + TriangulationUnderconstrainedException() : + std::runtime_error("Triangulation Underconstrained Exception.") { + } }; /// Exception thrown by triangulateDLT when landmark is behind one or more of the cameras -class GTSAM_EXPORT TriangulationCheiralityException : public std::runtime_error { - public: - TriangulationCheiralityException() - : std::runtime_error( - "Triangulation Cheirality Exception: The resulting landmark is behind one or more " - "cameras.") {} +class GTSAM_EXPORT TriangulationCheiralityException: public std::runtime_error { +public: + TriangulationCheiralityException() : + std::runtime_error( + "Triangulation Cheirality Exception: The resulting landmark is behind one or more cameras.") { + } }; /** @@ -63,13 +64,11 @@ class GTSAM_EXPORT TriangulationCheiralityException : public std::runtime_error */ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( const std::vector>& projection_matrices, - const Point2Vector& measurements, - double rank_tol = 1e-9); + const Point2Vector& measurements, double rank_tol = 1e-9); /** * Same math as Hartley and Zisserman, 2nd Ed., page 312, but with unit-norm bearing vectors - * (contrarily to pinhole projection, the z entry is not assumed to be 1 as in Hartley and - * Zisserman) + * (contrarily to pinhole projection, the z entry is not assumed to be 1 as in Hartley and Zisserman) * @param projection_matrices Projection matrices (K*P^-1) * @param measurements Unit3 bearing measurements * @param rank_tol SVD rank tolerance @@ -77,8 +76,7 @@ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( */ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( const std::vector>& projection_matrices, - const std::vector& measurements, - double rank_tol = 1e-9); + const std::vector& measurements, double rank_tol = 1e-9); /** * DLT triangulation: See Hartley and Zisserman, 2nd Ed., page 312 @@ -87,18 +85,18 @@ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( * @param rank_tol SVD rank tolerance * @return Triangulated Point3 */ -GTSAM_EXPORT Point3 -triangulateDLT(const std::vector>& projection_matrices, - const Point2Vector& measurements, - double rank_tol = 1e-9); +GTSAM_EXPORT Point3 triangulateDLT( + const std::vector>& projection_matrices, + const Point2Vector& measurements, + double rank_tol = 1e-9); /** * overload of previous function to work with Unit3 (projected to canonical camera) */ -GTSAM_EXPORT Point3 -triangulateDLT(const std::vector>& projection_matrices, - const std::vector& measurements, - double rank_tol = 1e-9); +GTSAM_EXPORT Point3 triangulateDLT( + const std::vector>& projection_matrices, + const std::vector& measurements, + double rank_tol = 1e-9); /** * @brief Triangulation using the LOST (Linear Optimal Sine Triangulation) @@ -124,22 +122,20 @@ GTSAM_EXPORT Point3 triangulateLOST(const std::vector& poses, * @param initialEstimate * @return graph and initial values */ -template +template std::pair triangulationGraph( - const std::vector& poses, - std::shared_ptr sharedCal, - const Point2Vector& measurements, - Key landmarkKey, + const std::vector& poses, std::shared_ptr sharedCal, + const Point2Vector& measurements, Key landmarkKey, const Point3& initialEstimate, const SharedNoiseModel& model = noiseModel::Unit::Create(2)) { Values values; - values.insert(landmarkKey, initialEstimate); // Initial landmark value + values.insert(landmarkKey, initialEstimate); // Initial landmark value NonlinearFactorGraph graph; for (size_t i = 0; i < measurements.size(); i++) { const Pose3& pose_i = poses[i]; typedef PinholePose Camera; Camera camera_i(pose_i, sharedCal); - graph.emplace_shared> // + graph.emplace_shared > // (camera_i, measurements[i], model, landmarkKey); } return {graph, values}; @@ -154,22 +150,21 @@ std::pair triangulationGraph( * @param initialEstimate * @return graph and initial values */ -template +template std::pair triangulationGraph( const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measurements, - Key landmarkKey, + const typename CAMERA::MeasurementVector& measurements, Key landmarkKey, const Point3& initialEstimate, const SharedNoiseModel& model = nullptr) { Values values; - values.insert(landmarkKey, initialEstimate); // Initial landmark value + values.insert(landmarkKey, initialEstimate); // Initial landmark value NonlinearFactorGraph graph; - static SharedNoiseModel unit( - noiseModel::Unit::Create(traits::dimension)); + static SharedNoiseModel unit(noiseModel::Unit::Create( + traits::dimension)); for (size_t i = 0; i < measurements.size(); i++) { const CAMERA& camera_i = cameras[i]; - graph.emplace_shared> // - (camera_i, measurements[i], model ? model : unit, landmarkKey); + graph.emplace_shared > // + (camera_i, measurements[i], model? model : unit, landmarkKey); } return {graph, values}; } @@ -182,8 +177,7 @@ std::pair triangulationGraph( * @return refined Point3 */ GTSAM_EXPORT Point3 optimize(const NonlinearFactorGraph& graph, - const Values& values, - Key landmarkKey); + const Values& values, Key landmarkKey); /** * Given an initial estimate , refine a point using measurements in several cameras @@ -193,14 +187,14 @@ GTSAM_EXPORT Point3 optimize(const NonlinearFactorGraph& graph, * @param initialEstimate * @return refined Point3 */ -template +template Point3 triangulateNonlinear(const std::vector& poses, - std::shared_ptr sharedCal, - const Point2Vector& measurements, - const Point3& initialEstimate, - const SharedNoiseModel& model = nullptr) { + std::shared_ptr sharedCal, + const Point2Vector& measurements, const Point3& initialEstimate, + const SharedNoiseModel& model = nullptr) { + // Create a factor graph and initial values - const auto [graph, values] = triangulationGraph // + const auto [graph, values] = triangulationGraph // (poses, sharedCal, measurements, Symbol('p', 0), initialEstimate, model); return optimize(graph, values, Symbol('p', 0)); @@ -213,32 +207,33 @@ Point3 triangulateNonlinear(const std::vector& poses, * @param initialEstimate * @return refined Point3 */ -template -Point3 triangulateNonlinear(const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measurements, - const Point3& initialEstimate, - const SharedNoiseModel& model = nullptr) { +template +Point3 triangulateNonlinear( + const CameraSet& cameras, + const typename CAMERA::MeasurementVector& measurements, const Point3& initialEstimate, + const SharedNoiseModel& model = nullptr) { + // Create a factor graph and initial values - const auto [graph, values] = triangulationGraph // + const auto [graph, values] = triangulationGraph // (cameras, measurements, Symbol('p', 0), initialEstimate, model); return optimize(graph, values, Symbol('p', 0)); } -template -std::vector> projectionMatricesFromCameras( - const CameraSet& cameras) { +template +std::vector> +projectionMatricesFromCameras(const CameraSet &cameras) { std::vector> projection_matrices; - for (const CAMERA& camera : cameras) { + for (const CAMERA &camera: cameras) { projection_matrices.push_back(camera.cameraProjectionMatrix()); } return projection_matrices; } // overload, assuming pinholePose -template +template std::vector> projectionMatricesFromPoses( - const std::vector& poses, std::shared_ptr sharedCal) { + const std::vector &poses, std::shared_ptr sharedCal) { std::vector> projection_matrices; for (size_t i = 0; i < poses.size(); i++) { PinholePose camera(poses.at(i), sharedCal); @@ -263,9 +258,9 @@ Cal3_S2 createPinholeCalibration(const CALIBRATION& cal) { /** Internal undistortMeasurement to be used by undistortMeasurement and * undistortMeasurements */ template -MEASUREMENT undistortMeasurementInternal(const CALIBRATION& cal, - const MEASUREMENT& measurement, - std::optional pinholeCal = {}) { +MEASUREMENT undistortMeasurementInternal( + const CALIBRATION& cal, const MEASUREMENT& measurement, + std::optional pinholeCal = {}) { if (!pinholeCal) { pinholeCal = createPinholeCalibration(cal); } @@ -284,13 +279,13 @@ MEASUREMENT undistortMeasurementInternal(const CALIBRATION& cal, * @return measurements with the effect of the distortion of sharedCal removed. */ template -Point2Vector undistortMeasurements(const CALIBRATION& cal, const Point2Vector& measurements) { +Point2Vector undistortMeasurements(const CALIBRATION& cal, + const Point2Vector& measurements) { Cal3_S2 pinholeCalibration = createPinholeCalibration(cal); Point2Vector undistortedMeasurements; // Calibrate with cal and uncalibrate with pinhole version of cal so that // measurements are undistorted. - std::transform(measurements.begin(), - measurements.end(), + std::transform(measurements.begin(), measurements.end(), std::back_inserter(undistortedMeasurements), [&cal, &pinholeCalibration](const Point2& measurement) { return undistortMeasurementInternal( @@ -301,7 +296,8 @@ Point2Vector undistortMeasurements(const CALIBRATION& cal, const Point2Vector& m /** Specialization for Cal3_S2 as it doesn't need to be undistorted. */ template <> -inline Point2Vector undistortMeasurements(const Cal3_S2& cal, const Point2Vector& measurements) { +inline Point2Vector undistortMeasurements(const Cal3_S2& cal, + const Point2Vector& measurements) { return measurements; } @@ -318,15 +314,17 @@ inline Point2Vector undistortMeasurements(const Cal3_S2& cal, const Point2Vector */ template typename CAMERA::MeasurementVector undistortMeasurements( - const CameraSet& cameras, const typename CAMERA::MeasurementVector& measurements) { + const CameraSet& cameras, + const typename CAMERA::MeasurementVector& measurements) { const size_t nrMeasurements = measurements.size(); assert(nrMeasurements == cameras.size()); typename CAMERA::MeasurementVector undistortedMeasurements(nrMeasurements); for (size_t ii = 0; ii < nrMeasurements; ++ii) { // Calibrate with cal and uncalibrate with pinhole version of cal so that // measurements are undistorted. - undistortedMeasurements[ii] = undistortMeasurementInternal( - cameras[ii].calibration(), measurements[ii]); + undistortedMeasurements[ii] = + undistortMeasurementInternal( + cameras[ii].calibration(), measurements[ii]); } return undistortedMeasurements; } @@ -356,13 +354,12 @@ inline SphericalCamera::MeasurementVector undistortMeasurements( * @return homogeneous measurements in image plane */ template -inline Point3Vector calibrateMeasurementsShared(const CALIBRATION& cal, - const Point2Vector& measurements) { +inline Point3Vector calibrateMeasurementsShared( + const CALIBRATION& cal, const Point2Vector& measurements) { Point3Vector calibratedMeasurements; // Calibrate with cal and uncalibrate with pinhole version of cal so that // measurements are undistorted. - std::transform(measurements.begin(), - measurements.end(), + std::transform(measurements.begin(), measurements.end(), std::back_inserter(calibratedMeasurements), [&cal](const Point2& measurement) { Point3 p; @@ -381,21 +378,25 @@ inline Point3Vector calibrateMeasurementsShared(const CALIBRATION& cal, * @return homogeneous measurements in image plane */ template -inline Point3Vector calibrateMeasurements(const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measurements) { +inline Point3Vector calibrateMeasurements( + const CameraSet& cameras, + const typename CAMERA::MeasurementVector& measurements) { const size_t nrMeasurements = measurements.size(); assert(nrMeasurements == cameras.size()); Point3Vector calibratedMeasurements(nrMeasurements); for (size_t ii = 0; ii < nrMeasurements; ++ii) { - calibratedMeasurements[ii] << cameras[ii].calibration().calibrate(measurements[ii]), 1.0; + calibratedMeasurements[ii] + << cameras[ii].calibration().calibrate(measurements[ii]), + 1.0; } return calibratedMeasurements; } /** Specialize for SphericalCamera to do nothing. */ template -inline Point3Vector calibrateMeasurements(const CameraSet& cameras, - const SphericalCamera::MeasurementVector& measurements) { +inline Point3Vector calibrateMeasurements( + const CameraSet& cameras, + const SphericalCamera::MeasurementVector& measurements) { Point3Vector calibratedMeasurements(measurements.size()); for (size_t ii = 0; ii < measurements.size(); ++ii) { calibratedMeasurements[ii] << measurements[ii].point3(); @@ -420,8 +421,7 @@ template Point3 triangulatePoint3(const std::vector& poses, std::shared_ptr sharedCal, const Point2Vector& measurements, - double rank_tol = 1e-9, - bool optimize = false, + double rank_tol = 1e-9, bool optimize = false, const SharedNoiseModel& model = nullptr, const bool useLOST = false) { assert(poses.size() == measurements.size()); @@ -433,21 +433,25 @@ Point3 triangulatePoint3(const std::vector& poses, // Reduce input noise model to an isotropic noise model using the mean of // the diagonal. const double measurementSigma = model ? model->sigmas().mean() : 1e-4; - SharedIsotropic measurementNoise = noiseModel::Isotropic::Sigma(2, measurementSigma); + SharedIsotropic measurementNoise = + noiseModel::Isotropic::Sigma(2, measurementSigma); // calibrate the measurements to obtain homogenous coordinates in image // plane. auto calibratedMeasurements = calibrateMeasurementsShared(*sharedCal, measurements); - point = triangulateLOST(poses, calibratedMeasurements, measurementNoise, rank_tol); + point = triangulateLOST(poses, calibratedMeasurements, measurementNoise, + rank_tol); } else { // construct projection matrices from poses & calibration auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal); // Undistort the measurements, leaving only the pinhole elements in effect. - auto undistortedMeasurements = undistortMeasurements(*sharedCal, measurements); + auto undistortedMeasurements = + undistortMeasurements(*sharedCal, measurements); - point = triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + point = + triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); } // Then refine using non-linear optimization @@ -483,8 +487,7 @@ Point3 triangulatePoint3(const std::vector& poses, template Point3 triangulatePoint3(const CameraSet& cameras, const typename CAMERA::MeasurementVector& measurements, - double rank_tol = 1e-9, - bool optimize = false, + double rank_tol = 1e-9, bool optimize = false, const SharedNoiseModel& model = nullptr, const bool useLOST = false) { size_t m = cameras.size(); @@ -498,7 +501,8 @@ Point3 triangulatePoint3(const CameraSet& cameras, // Reduce input noise model to an isotropic noise model using the mean of // the diagonal. const double measurementSigma = model ? model->sigmas().mean() : 1e-4; - SharedIsotropic measurementNoise = noiseModel::Isotropic::Sigma(2, measurementSigma); + SharedIsotropic measurementNoise = + noiseModel::Isotropic::Sigma(2, measurementSigma); // construct poses from cameras. std::vector poses; @@ -507,17 +511,21 @@ Point3 triangulatePoint3(const CameraSet& cameras, // calibrate the measurements to obtain homogenous coordinates in image // plane. - auto calibratedMeasurements = calibrateMeasurements(cameras, measurements); + auto calibratedMeasurements = + calibrateMeasurements(cameras, measurements); - point = triangulateLOST(poses, calibratedMeasurements, measurementNoise, rank_tol); + point = triangulateLOST(poses, calibratedMeasurements, measurementNoise, + rank_tol); } else { // construct projection matrices from poses & calibration auto projection_matrices = projectionMatricesFromCameras(cameras); // Undistort the measurements, leaving only the pinhole elements in effect. - auto undistortedMeasurements = undistortMeasurements(cameras, measurements); + auto undistortedMeasurements = + undistortMeasurements(cameras, measurements); - point = triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + point = + triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); } // Then refine using non-linear optimization @@ -540,8 +548,7 @@ Point3 triangulatePoint3(const CameraSet& cameras, template Point3 triangulatePoint3(const CameraSet>& cameras, const Point2Vector& measurements, - double rank_tol = 1e-9, - bool optimize = false, + double rank_tol = 1e-9, bool optimize = false, const SharedNoiseModel& model = nullptr, const bool useLOST = false) { return triangulatePoint3> // @@ -549,16 +556,16 @@ Point3 triangulatePoint3(const CameraSet>& cameras, } struct GTSAM_EXPORT TriangulationParameters { - double rankTolerance; ///< threshold to decide whether triangulation is result.degenerate - ///< (the rank is the number of singular values of the triangulation matrix which are larger than - ///< rankTolerance) - bool enableEPI; ///< if set to true, will refine triangulation using LM + + double rankTolerance; ///< threshold to decide whether triangulation is result.degenerate + ///< (the rank is the number of singular values of the triangulation matrix which are larger than rankTolerance) + bool enableEPI; ///< if set to true, will refine triangulation using LM /** * if the landmark is triangulated at distance larger than this, * result is flagged as degenerate. */ - double landmarkDistanceThreshold; // + double landmarkDistanceThreshold; // /** * If this is nonnegative the we will check if the average reprojection error @@ -567,7 +574,7 @@ struct GTSAM_EXPORT TriangulationParameters { */ double dynamicOutlierRejectionThreshold; - SharedNoiseModel noiseModel; ///< used in the nonlinear triangulation + SharedNoiseModel noiseModel; ///< used in the nonlinear triangulation /** * Constructor @@ -579,36 +586,39 @@ struct GTSAM_EXPORT TriangulationParameters { * */ TriangulationParameters(const double _rankTolerance = 1.0, - const bool _enableEPI = false, - double _landmarkDistanceThreshold = -1, - double _dynamicOutlierRejectionThreshold = -1, - const SharedNoiseModel& _noiseModel = nullptr) - : rankTolerance(_rankTolerance), - enableEPI(_enableEPI), // - landmarkDistanceThreshold(_landmarkDistanceThreshold), // - dynamicOutlierRejectionThreshold(_dynamicOutlierRejectionThreshold), - noiseModel(_noiseModel) {} + const bool _enableEPI = false, double _landmarkDistanceThreshold = -1, + double _dynamicOutlierRejectionThreshold = -1, + const SharedNoiseModel& _noiseModel = nullptr) : + rankTolerance(_rankTolerance), enableEPI(_enableEPI), // + landmarkDistanceThreshold(_landmarkDistanceThreshold), // + dynamicOutlierRejectionThreshold(_dynamicOutlierRejectionThreshold), + noiseModel(_noiseModel){ + } // stream to output - friend std::ostream& operator<<(std::ostream& os, const TriangulationParameters& p) { + friend std::ostream &operator<<(std::ostream &os, + const TriangulationParameters& p) { os << "rankTolerance = " << p.rankTolerance << std::endl; os << "enableEPI = " << p.enableEPI << std::endl; - os << "landmarkDistanceThreshold = " << p.landmarkDistanceThreshold << std::endl; - os << "dynamicOutlierRejectionThreshold = " << p.dynamicOutlierRejectionThreshold << std::endl; + os << "landmarkDistanceThreshold = " << p.landmarkDistanceThreshold + << std::endl; + os << "dynamicOutlierRejectionThreshold = " + << p.dynamicOutlierRejectionThreshold << std::endl; os << "noise model" << std::endl; return os; } - private: +private: + #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /// /// Serialization function friend class boost::serialization::access; - template - void serialize(ARCHIVE& ar, const unsigned int version) { - ar& BOOST_SERIALIZATION_NVP(rankTolerance); - ar& BOOST_SERIALIZATION_NVP(enableEPI); - ar& BOOST_SERIALIZATION_NVP(landmarkDistanceThreshold); - ar& BOOST_SERIALIZATION_NVP(dynamicOutlierRejectionThreshold); + template + void serialize(ARCHIVE & ar, const unsigned int version) { + ar & BOOST_SERIALIZATION_NVP(rankTolerance); + ar & BOOST_SERIALIZATION_NVP(enableEPI); + ar & BOOST_SERIALIZATION_NVP(landmarkDistanceThreshold); + ar & BOOST_SERIALIZATION_NVP(dynamicOutlierRejectionThreshold); } #endif }; @@ -635,10 +645,16 @@ class TriangulationResult : public std::optional { * Constructor */ TriangulationResult(const Point3& p) : status(VALID) { emplace(p); } - static TriangulationResult Degenerate() { return TriangulationResult(DEGENERATE); } + static TriangulationResult Degenerate() { + return TriangulationResult(DEGENERATE); + } static TriangulationResult Outlier() { return TriangulationResult(OUTLIER); } - static TriangulationResult FarPoint() { return TriangulationResult(FAR_POINT); } - static TriangulationResult BehindCamera() { return TriangulationResult(BEHIND_CAMERA); } + static TriangulationResult FarPoint() { + return TriangulationResult(FAR_POINT); + } + static TriangulationResult BehindCamera() { + return TriangulationResult(BEHIND_CAMERA); + } bool valid() const { return status == VALID; } bool degenerate() const { return status == DEGENERATE; } bool outlier() const { return status == OUTLIER; } @@ -649,7 +665,8 @@ class TriangulationResult : public std::optional { return value(); } // stream to output - friend std::ostream& operator<<(std::ostream& os, const TriangulationResult& result) { + friend std::ostream& operator<<(std::ostream& os, + const TriangulationResult& result) { if (result) os << "point = " << *result << std::endl; else @@ -669,10 +686,11 @@ class TriangulationResult : public std::optional { }; /// triangulateSafe: extensive checking of the outcome -template +template TriangulationResult triangulateSafe(const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measured, - const TriangulationParameters& params) { + const typename CAMERA::MeasurementVector& measured, + const TriangulationParameters& params) { + size_t m = cameras.size(); // if we have a single pose the corresponding factor is uninformative @@ -681,22 +699,25 @@ TriangulationResult triangulateSafe(const CameraSet& cameras, else // We triangulate the 3D position of the landmark try { - Point3 point = triangulatePoint3( - cameras, measured, params.rankTolerance, params.enableEPI, params.noiseModel); + Point3 point = + triangulatePoint3(cameras, measured, params.rankTolerance, + params.enableEPI, params.noiseModel); // Check landmark distance and re-projection errors to avoid outliers size_t i = 0; double maxReprojError = 0.0; - for (const CAMERA& camera : cameras) { + for(const CAMERA& camera: cameras) { const Pose3& pose = camera.pose(); - if (params.landmarkDistanceThreshold > 0 && - distance3(pose.translation(), point) > params.landmarkDistanceThreshold) + if (params.landmarkDistanceThreshold > 0 + && distance3(pose.translation(), point) + > params.landmarkDistanceThreshold) return TriangulationResult::FarPoint(); #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION // verify that the triangulated point lies in front of all cameras // Only needed if this was not yet handled by exception const Point3& p_local = pose.transformTo(point); - if (p_local.z() <= 0) return TriangulationResult::BehindCamera(); + if (p_local.z() <= 0) + return TriangulationResult::BehindCamera(); #endif // Check reprojection error if (params.dynamicOutlierRejectionThreshold > 0) { @@ -707,21 +728,19 @@ TriangulationResult triangulateSafe(const CameraSet& cameras, i += 1; } // Flag as degenerate if average reprojection error is too large - if (params.dynamicOutlierRejectionThreshold > 0 && - maxReprojError > params.dynamicOutlierRejectionThreshold) + if (params.dynamicOutlierRejectionThreshold > 0 + && maxReprojError > params.dynamicOutlierRejectionThreshold) return TriangulationResult::Outlier(); // all good! return TriangulationResult(point); } catch (TriangulationUnderconstrainedException&) { // This exception is thrown if - // 1) There is a single pose for triangulation - this should not happen because we checked the - // number of poses before 2) The rank of the matrix used for triangulation is < 3: - // rotation-only, parallel cameras (or motion towards the landmark) + // 1) There is a single pose for triangulation - this should not happen because we checked the number of poses before + // 2) The rank of the matrix used for triangulation is < 3: rotation-only, parallel cameras (or motion towards the landmark) return TriangulationResult::Degenerate(); } catch (TriangulationCheiralityException&) { - // point is behind one of the cameras: can be the case of close-to-parallel cameras or may - // depend on outliers + // point is behind one of the cameras: can be the case of close-to-parallel cameras or may depend on outliers return TriangulationResult::BehindCamera(); } } @@ -733,4 +752,4 @@ using CameraSetCal3DS2 = CameraSet>; using CameraSetCal3Fisheye = CameraSet>; using CameraSetCal3Unified = CameraSet>; using CameraSetSpherical = CameraSet; -} // namespace gtsam +} // \namespace gtsam From bba4b8731f68652398e4d1b85224e13e89517917 Mon Sep 17 00:00:00 2001 From: Travis Driver Date: Tue, 6 Jun 2023 10:01:46 -0700 Subject: [PATCH 44/78] Final cleanup --- gtsam/geometry/triangulation.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/gtsam/geometry/triangulation.cpp b/gtsam/geometry/triangulation.cpp index e714bb432..c8af2ea72 100644 --- a/gtsam/geometry/triangulation.cpp +++ b/gtsam/geometry/triangulation.cpp @@ -25,8 +25,8 @@ namespace gtsam { Vector4 triangulateHomogeneousDLT( - const std::vector>& - projection_matrices, + const std::vector>& + projection_matrices, const Point2Vector& measurements, double rank_tol) { // number of cameras size_t m = projection_matrices.size(); @@ -56,8 +56,8 @@ Vector4 triangulateHomogeneousDLT( } Vector4 triangulateHomogeneousDLT( - const std::vector>& - projection_matrices, + const std::vector>& + projection_matrices, const std::vector& measurements, double rank_tol) { // number of cameras size_t m = projection_matrices.size(); @@ -68,7 +68,7 @@ Vector4 triangulateHomogeneousDLT( for (size_t i = 0; i < m; i++) { size_t row = i * 2; const Matrix34& projection = projection_matrices.at(i); - const Point3& p = + const Point3& p = measurements.at(i) .point3(); // to get access to x,y,z of the bearing vector @@ -130,7 +130,7 @@ Point3 triangulateLOST(const std::vector& poses, // Note: Setting q_i = 1.0 gives same results as DLT. const double q_i = num_i / (measurementNoise->sigma() * den_i); - const Matrix23 coefficientMat = + const Matrix23 coefficientMat = q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) * wTi.rotation().matrix().transpose(); @@ -147,8 +147,8 @@ Point3 triangulateLOST(const std::vector& poses, } Point3 triangulateDLT( - const std::vector>& - projection_matrices, + const std::vector>& + projection_matrices, const Point2Vector& measurements, double rank_tol) { Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); @@ -161,7 +161,7 @@ Point3 triangulateDLT( projection_matrices, const std::vector& measurements, double rank_tol) { // contrary to previous triangulateDLT, this is now taking Unit3 inputs - Vector4 v = + Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); // Create 3D point from homogeneous coordinates return Point3(v.head<3>() / v[3]); @@ -174,7 +174,7 @@ Point3 triangulateDLT( * @param landmarkKey to refer to landmark * @return refined Point3 */ -Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, +Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, Key landmarkKey) { // Maybe we should consider Gauss-Newton? LevenbergMarquardtParams params; From 1e6f1b757cdcc02a810d3e425009017fcf62bbcc Mon Sep 17 00:00:00 2001 From: Travis Driver Date: Tue, 6 Jun 2023 10:11:49 -0700 Subject: [PATCH 45/78] Final final cleanup --- gtsam/geometry/triangulation.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gtsam/geometry/triangulation.cpp b/gtsam/geometry/triangulation.cpp index c8af2ea72..039dfd74d 100644 --- a/gtsam/geometry/triangulation.cpp +++ b/gtsam/geometry/triangulation.cpp @@ -150,15 +150,15 @@ Point3 triangulateDLT( const std::vector>& projection_matrices, const Point2Vector& measurements, double rank_tol) { - Vector4 v = + Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); // Create 3D point from homogeneous coordinates return Point3(v.head<3>() / v[3]); } Point3 triangulateDLT( - const std::vector>& - projection_matrices, + const std::vector>& + projection_matrices, const std::vector& measurements, double rank_tol) { // contrary to previous triangulateDLT, this is now taking Unit3 inputs Vector4 v = From 4203c4d355f82829a881a43dae721b89ad2dcd7f Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 6 Jun 2023 00:51:55 -0400 Subject: [PATCH 46/78] enable windows release build in the CI --- .github/workflows/build-windows.yml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index 3d4bf3faf..17e1e0376 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -28,8 +28,7 @@ jobs: build_type: [ Debug, - #TODO(Varun) The release build takes over 2.5 hours, need to figure out why. - # Release + Release ] build_unstable: [ON] include: @@ -93,13 +92,19 @@ jobs: # Set the BOOST_ROOT variable echo "BOOST_ROOT=$BOOST_PATH" >> $env:GITHUB_ENV + - name: Install Eigen + shell: powershell + run: | + choco install eigen + Get-ChildItem -Force "$env:ChocolateyInstall\lib\eigen\include" + - name: Checkout uses: actions/checkout@v3 - name: Configuration run: | cmake -E remove_directory build - cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib" + cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib" -DGTSAM_USE_SYSTEM_EIGEN=ON -DEIGEN_INCLUDE_DIR="$env:ChocolateyInstall\lib\eigen\include" - name: Build run: | @@ -125,4 +130,3 @@ jobs: # Run GTSAM_UNSTABLE tests #cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base_unstable - From a5b90df471bee77e2ed73fe6f8a169d45a80aea4 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 6 Jun 2023 17:48:58 -0400 Subject: [PATCH 47/78] configure system Eigen --- .github/workflows/build-windows.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index 17e1e0376..a24e14c74 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -96,7 +96,9 @@ jobs: shell: powershell run: | choco install eigen - Get-ChildItem -Force "$env:ChocolateyInstall\lib\eigen\include" + echo "EIGEN3_INCLUDE_DIR=$env:ChocolateyInstall\lib\eigen\include" >> $env:GITHUB_ENV + echo "CMAKE_PREFIX_PATH=$env:CMAKE_PREFIX_PATH;$env:ChocolateyInstall\lib\eigen\cmake\share" >> $env:GITHUB_ENV + Get-ChildItem -Force "$env:ChocolateyInstall\lib\eigen\cmake\share" - name: Checkout uses: actions/checkout@v3 @@ -104,7 +106,7 @@ jobs: - name: Configuration run: | cmake -E remove_directory build - cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib" -DGTSAM_USE_SYSTEM_EIGEN=ON -DEIGEN_INCLUDE_DIR="$env:ChocolateyInstall\lib\eigen\include" + cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib" -DGTSAM_USE_SYSTEM_EIGEN=ON -DEIGEN3_INCLUDE_DIR="$env:ChocolateyInstall\lib\eigen\include" - name: Build run: | From 9ae4146ef803e2ec01c41abc6681b1b02777cdfb Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 6 Jun 2023 18:20:11 -0400 Subject: [PATCH 48/78] conditionally compile Chebyshev2 Pose3 test --- gtsam/basis/tests/testChebyshev2.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gtsam/basis/tests/testChebyshev2.cpp b/gtsam/basis/tests/testChebyshev2.cpp index e2f01e8af..e51436fcf 100644 --- a/gtsam/basis/tests/testChebyshev2.cpp +++ b/gtsam/basis/tests/testChebyshev2.cpp @@ -156,6 +156,7 @@ TEST(Chebyshev2, InterpolatePose2) { EXPECT(assert_equal(numericalH, actualH, 1e-9)); } +#ifdef GTSAM_POSE3_EXPMAP //****************************************************************************** // Interpolating poses using the exponential map TEST(Chebyshev2, InterpolatePose3) { @@ -184,6 +185,7 @@ TEST(Chebyshev2, InterpolatePose3) { numericalDerivative11, 6 * N>(f, X); EXPECT(assert_equal(numericalH, actualH, 1e-8)); } +#endif //****************************************************************************** TEST(Chebyshev2, Decomposition) { From cdbdc67ee49d54307e22e66f03db9a3d11b66d9f Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 6 Jun 2023 18:32:16 -0400 Subject: [PATCH 49/78] fix eigen cmake search path --- .github/workflows/build-windows.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index a24e14c74..d4f1d3ba0 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -97,8 +97,8 @@ jobs: run: | choco install eigen echo "EIGEN3_INCLUDE_DIR=$env:ChocolateyInstall\lib\eigen\include" >> $env:GITHUB_ENV - echo "CMAKE_PREFIX_PATH=$env:CMAKE_PREFIX_PATH;$env:ChocolateyInstall\lib\eigen\cmake\share" >> $env:GITHUB_ENV - Get-ChildItem -Force "$env:ChocolateyInstall\lib\eigen\cmake\share" + echo "CMAKE_PREFIX_PATH=$env:CMAKE_PREFIX_PATH;$env:ChocolateyInstall\lib\eigen\share\cmake" >> $env:GITHUB_ENV + Get-ChildItem -Force "$env:ChocolateyInstall\lib\eigen\share\cmake" - name: Checkout uses: actions/checkout@v3 From b947a7201c5f9be578374d47f833bd5e023e6028 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 7 Jun 2023 12:44:10 -0400 Subject: [PATCH 50/78] remove need for Eigen install since latest packaged version should work --- .github/workflows/build-windows.yml | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index d4f1d3ba0..849fe8f7b 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -92,21 +92,13 @@ jobs: # Set the BOOST_ROOT variable echo "BOOST_ROOT=$BOOST_PATH" >> $env:GITHUB_ENV - - name: Install Eigen - shell: powershell - run: | - choco install eigen - echo "EIGEN3_INCLUDE_DIR=$env:ChocolateyInstall\lib\eigen\include" >> $env:GITHUB_ENV - echo "CMAKE_PREFIX_PATH=$env:CMAKE_PREFIX_PATH;$env:ChocolateyInstall\lib\eigen\share\cmake" >> $env:GITHUB_ENV - Get-ChildItem -Force "$env:ChocolateyInstall\lib\eigen\share\cmake" - - name: Checkout uses: actions/checkout@v3 - name: Configuration run: | cmake -E remove_directory build - cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib" -DGTSAM_USE_SYSTEM_EIGEN=ON -DEIGEN3_INCLUDE_DIR="$env:ChocolateyInstall\lib\eigen\include" + cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib" - name: Build run: | From e56186c45ab2b28cab85fb671f7e22535482bc08 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 7 Jun 2023 12:52:06 -0400 Subject: [PATCH 51/78] remove deprecated Windows image --- .github/workflows/build-windows.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index 849fe8f7b..0434577c1 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -21,8 +21,6 @@ jobs: # Github Actions requires a single row to be added to the build matrix. # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. name: [ - #TODO This build fails, need to understand why. - # windows-2016-cl, windows-2019-cl, ] @@ -32,12 +30,6 @@ jobs: ] build_unstable: [ON] include: - #TODO This build fails, need to understand why. - # - name: windows-2016-cl - # os: windows-2016 - # compiler: cl - # platform: 32 - - name: windows-2019-cl os: windows-2019 compiler: cl From 6584b78cb4e55ab28ed464b1165e4c4ad2e0acd5 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 8 Jun 2023 18:32:02 -0400 Subject: [PATCH 52/78] fix memory leak --- gtsam/base/tests/testStdOptionalSerialization.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gtsam/base/tests/testStdOptionalSerialization.cpp b/gtsam/base/tests/testStdOptionalSerialization.cpp index dd99b0f12..d9bd1da4a 100644 --- a/gtsam/base/tests/testStdOptionalSerialization.cpp +++ b/gtsam/base/tests/testStdOptionalSerialization.cpp @@ -149,6 +149,9 @@ TEST(StdOptionalSerialization, SerializTestOptionalStructPointerPointer) { // Check that it worked EXPECT(opt2.has_value()); EXPECT(**opt2 == TestOptionalStruct(42)); + + delete (*opt); + delete (*opt2); } int main() { From 5b588f2ea70c159f1891d28df3d2eb3c1c219200 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 15 Jun 2023 16:30:10 -0400 Subject: [PATCH 53/78] update sfm module --- gtsam/sfm/sfm.i | 5 ++-- python/gtsam/preamble/sfm.h | 6 ----- python/gtsam/tests/test_DsfTrackGenerator.py | 28 ++++++++++---------- 3 files changed, 16 insertions(+), 23 deletions(-) diff --git a/gtsam/sfm/sfm.i b/gtsam/sfm/sfm.i index 402d29039..059aa21ed 100644 --- a/gtsam/sfm/sfm.i +++ b/gtsam/sfm/sfm.i @@ -6,15 +6,14 @@ namespace gtsam { #include class SfmTrack2d { - std::vector> measurements; + std::vector measurements; SfmTrack2d(); SfmTrack2d(const std::vector& measurements); size_t numberMeasurements() const; - pair measurement(size_t idx) const; + gtsam::SfmMeasurement measurement(size_t idx) const; pair siftIndex(size_t idx) const; void addMeasurement(size_t idx, const gtsam::Point2& m); - gtsam::SfmMeasurement measurement(size_t idx) const; bool hasUniqueCameras() const; Eigen::MatrixX2d measurementMatrix() const; Eigen::VectorXi indexVector() const; diff --git a/python/gtsam/preamble/sfm.h b/python/gtsam/preamble/sfm.h index 814c1df31..bd71c0817 100644 --- a/python/gtsam/preamble/sfm.h +++ b/python/gtsam/preamble/sfm.h @@ -11,9 +11,3 @@ * mutations on Python side will not be reflected on C++. */ -PYBIND11_MAKE_OPAQUE(std::vector); -PYBIND11_MAKE_OPAQUE(std::vector); -PYBIND11_MAKE_OPAQUE(std::vector); -PYBIND11_MAKE_OPAQUE( - std::vector); -PYBIND11_MAKE_OPAQUE(gtsam::gtsfm::MatchIndicesMap); diff --git a/python/gtsam/tests/test_DsfTrackGenerator.py b/python/gtsam/tests/test_DsfTrackGenerator.py index 284c3f5cc..282b384ec 100644 --- a/python/gtsam/tests/test_DsfTrackGenerator.py +++ b/python/gtsam/tests/test_DsfTrackGenerator.py @@ -5,13 +5,13 @@ Authors: John Lambert import unittest -import gtsam import numpy as np -from gtsam import (IndexPair, KeypointsVector, MatchIndicesMap, Point2, - SfmMeasurementVector, SfmTrack2d) from gtsam.gtsfm import Keypoints from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import IndexPair, Point2, SfmTrack2d + class TestDsfTrackGenerator(GtsamTestCase): """Tests for DsfTrackGenerator.""" @@ -23,14 +23,14 @@ class TestDsfTrackGenerator(GtsamTestCase): kps_i1 = Keypoints(np.array([[50.0, 60], [70, 80], [90, 100]])) kps_i2 = Keypoints(np.array([[110.0, 120], [130, 140]])) - keypoints_list = KeypointsVector() + keypoints_list = [] keypoints_list.append(kps_i0) keypoints_list.append(kps_i1) keypoints_list.append(kps_i2) # For each image pair (i1,i2), we provide a (K,2) matrix # of corresponding image indices (k1,k2). - matches_dict = MatchIndicesMap() + matches_dict = {} matches_dict[IndexPair(0, 1)] = np.array([[0, 0], [1, 1]]) matches_dict[IndexPair(1, 2)] = np.array([[2, 0], [1, 1]]) @@ -81,16 +81,16 @@ class TestDsfTrackGenerator(GtsamTestCase): np.testing.assert_allclose(track2.indexVector(), [1, 2]) -class TestSfmTrack2d(GtsamTestCase): - """Tests for SfmTrack2d.""" +# class TestSfmTrack2d(GtsamTestCase): +# """Tests for SfmTrack2d.""" - def test_sfm_track_2d_constructor(self) -> None: - """Test construction of 2D SfM track.""" - measurements = SfmMeasurementVector() - measurements.append((0, Point2(10, 20))) - track = SfmTrack2d(measurements=measurements) - track.measurement(0) - assert track.numberMeasurements() == 1 +# def test_sfm_track_2d_constructor(self) -> None: +# """Test construction of 2D SfM track.""" +# measurements = [] +# measurements.append((0, Point2(10, 20))) +# track = SfmTrack2d(measurements=measurements) +# track.measurement(0) +# assert track.numberMeasurements() == 1 if __name__ == "__main__": From 31adb3ed453b4fb107693b0f63166d5de4a9474e Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 15 Jun 2023 16:34:37 -0400 Subject: [PATCH 54/78] fix remaining python tests --- python/CMakeLists.txt | 1 + .../gtsam/examples/FixedLagSmootherExample.py | 7 +++-- python/gtsam/preamble/base.h | 2 -- python/gtsam/preamble/custom.h | 3 ++ python/gtsam/preamble/geometry.h | 2 -- python/gtsam/preamble/hybrid.h | 8 ----- python/gtsam/specializations/base.h | 1 - python/gtsam/specializations/custom.h | 5 +++- python/gtsam/specializations/inference.h | 2 -- .../tests/test_FixedLagSmootherExample.py | 9 +++--- python/gtsam/tests/test_HybridBayesNet.py | 19 +++++++----- python/gtsam/tests/test_custom_factor.py | 30 ++++++++++++------- python/gtsam/utils/plot.py | 3 +- 13 files changed, 49 insertions(+), 43 deletions(-) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 2557da237..684cf31a1 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -52,6 +52,7 @@ set(ignore gtsam::IndexPairVector gtsam::BetweenFactorPose2s gtsam::BetweenFactorPose3s + gtsam::FixedLagSmootherKeyTimestampMap gtsam::FixedLagSmootherKeyTimestampMapValue gtsam::Point2Vector gtsam::Point2Pairs diff --git a/python/gtsam/examples/FixedLagSmootherExample.py b/python/gtsam/examples/FixedLagSmootherExample.py index 99af0edcf..c56ebbe07 100644 --- a/python/gtsam/examples/FixedLagSmootherExample.py +++ b/python/gtsam/examples/FixedLagSmootherExample.py @@ -16,6 +16,7 @@ import numpy as np import gtsam import gtsam_unstable + def BatchFixedLagSmootherExample(): """ Runs a batch fixed smoother on an agent with two odometry @@ -31,7 +32,7 @@ def BatchFixedLagSmootherExample(): # that will be sent to the smoothers new_factors = gtsam.NonlinearFactorGraph() new_values = gtsam.Values() - new_timestamps = gtsam.FixedLagSmootherKeyTimestampMap() + new_timestamps = {} # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) @@ -39,7 +40,7 @@ def BatchFixedLagSmootherExample(): X1 = 0 new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert((X1, 0.0)) + new_timestamps[X1] = 0.0 delta_time = 0.25 time = 0.25 @@ -49,7 +50,7 @@ def BatchFixedLagSmootherExample(): current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert((current_key, time)) + new_timestamps[current_key] = time # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * 2[m/s] diff --git a/python/gtsam/preamble/base.h b/python/gtsam/preamble/base.h index b1ed2ba14..d07a75f6f 100644 --- a/python/gtsam/preamble/base.h +++ b/python/gtsam/preamble/base.h @@ -10,5 +10,3 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ - -PYBIND11_MAKE_OPAQUE(std::vector); // JacobianVector diff --git a/python/gtsam/preamble/custom.h b/python/gtsam/preamble/custom.h index d07a75f6f..d7bc6360f 100644 --- a/python/gtsam/preamble/custom.h +++ b/python/gtsam/preamble/custom.h @@ -10,3 +10,6 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ + +// Added so that CustomFactor can pass the JacobianVector to the C++ side +PYBIND11_MAKE_OPAQUE(std::vector); // JacobianVector diff --git a/python/gtsam/preamble/geometry.h b/python/gtsam/preamble/geometry.h index 5c9df8ef8..40b841abc 100644 --- a/python/gtsam/preamble/geometry.h +++ b/python/gtsam/preamble/geometry.h @@ -11,8 +11,6 @@ * mutations on Python side will not be reflected on C++. */ -#include - PYBIND11_MAKE_OPAQUE(gtsam::CameraSet>); PYBIND11_MAKE_OPAQUE(gtsam::CameraSet>); PYBIND11_MAKE_OPAQUE(gtsam::CameraSet>); diff --git a/python/gtsam/preamble/hybrid.h b/python/gtsam/preamble/hybrid.h index 90a062d66..d07a75f6f 100644 --- a/python/gtsam/preamble/hybrid.h +++ b/python/gtsam/preamble/hybrid.h @@ -10,11 +10,3 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ -#include - -// NOTE: Needed since we are including pybind11/stl.h. -#ifdef GTSAM_ALLOCATOR_TBB -PYBIND11_MAKE_OPAQUE(std::vector>); -#else -PYBIND11_MAKE_OPAQUE(std::vector); -#endif diff --git a/python/gtsam/specializations/base.h b/python/gtsam/specializations/base.h index 995753da1..22fe3beff 100644 --- a/python/gtsam/specializations/base.h +++ b/python/gtsam/specializations/base.h @@ -11,4 +11,3 @@ * and saves one copy operation. */ -py::bind_vector >(m_, "JacobianVector"); diff --git a/python/gtsam/specializations/custom.h b/python/gtsam/specializations/custom.h index d46ccdc66..cffb5115f 100644 --- a/python/gtsam/specializations/custom.h +++ b/python/gtsam/specializations/custom.h @@ -9,4 +9,7 @@ * interface, but without the `` copying mechanism. Combined * with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python, * and saves one copy operation. - */ \ No newline at end of file + */ + +// Added so that CustomFactor can pass the JacobianVector to the C++ side +py::bind_vector >(m_, "JacobianVector"); diff --git a/python/gtsam/specializations/inference.h b/python/gtsam/specializations/inference.h index 9e23444ea..da8842eaf 100644 --- a/python/gtsam/specializations/inference.h +++ b/python/gtsam/specializations/inference.h @@ -10,5 +10,3 @@ * with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python, * and saves one copy operation. */ - -py::bind_map>(m_, "__MapCharDouble"); diff --git a/python/gtsam/tests/test_FixedLagSmootherExample.py b/python/gtsam/tests/test_FixedLagSmootherExample.py index ddd2d49e4..412abe987 100644 --- a/python/gtsam/tests/test_FixedLagSmootherExample.py +++ b/python/gtsam/tests/test_FixedLagSmootherExample.py @@ -11,10 +11,11 @@ Author: Frank Dellaert & Duy Nguyen Ta (Python) import unittest import numpy as np +from gtsam.utils.test_case import GtsamTestCase import gtsam import gtsam_unstable -from gtsam.utils.test_case import GtsamTestCase + class TestFixedLagSmootherExample(GtsamTestCase): ''' @@ -36,7 +37,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): # that will be sent to the smoothers new_factors = gtsam.NonlinearFactorGraph() new_values = gtsam.Values() - new_timestamps = gtsam.FixedLagSmootherKeyTimestampMap() + new_timestamps = {} # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) @@ -47,7 +48,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): gtsam.PriorFactorPose2( X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert((X1, 0.0)) + new_timestamps[X1] = 0.0 delta_time = 0.25 time = 0.25 @@ -77,7 +78,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert((current_key, time)) + new_timestamps[current_key] = time # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * diff --git a/python/gtsam/tests/test_HybridBayesNet.py b/python/gtsam/tests/test_HybridBayesNet.py index 01e1c5a5d..bc685dec6 100644 --- a/python/gtsam/tests/test_HybridBayesNet.py +++ b/python/gtsam/tests/test_HybridBayesNet.py @@ -17,8 +17,9 @@ import numpy as np from gtsam.symbol_shorthand import A, X from gtsam.utils.test_case import GtsamTestCase -from gtsam import (DiscreteConditional, DiscreteKeys, DiscreteValues, GaussianConditional, - GaussianMixture, HybridBayesNet, HybridValues, noiseModel, VectorValues) +from gtsam import (DiscreteConditional, DiscreteKeys, DiscreteValues, + GaussianConditional, GaussianMixture, HybridBayesNet, + HybridValues, VectorValues, noiseModel) class TestHybridBayesNet(GtsamTestCase): @@ -31,8 +32,8 @@ class TestHybridBayesNet(GtsamTestCase): # Create the continuous conditional I_1x1 = np.eye(1) - conditional = GaussianConditional.FromMeanAndStddev(X(0), 2 * I_1x1, X(1), [-4], - 5.0) + conditional = GaussianConditional.FromMeanAndStddev( + X(0), 2 * I_1x1, X(1), [-4], 5.0) # Create the noise models model0 = noiseModel.Diagonal.Sigmas([2.0]) @@ -47,8 +48,9 @@ class TestHybridBayesNet(GtsamTestCase): # Create hybrid Bayes net. bayesNet = HybridBayesNet() bayesNet.push_back(conditional) - bayesNet.push_back(GaussianMixture( - [X(1)], [], discrete_keys, [conditional0, conditional1])) + bayesNet.push_back( + GaussianMixture([X(1)], [], discrete_keys, + [conditional0, conditional1])) bayesNet.push_back(DiscreteConditional(Asia, "99/1")) # Create values at which to evaluate. @@ -76,7 +78,7 @@ class TestHybridBayesNet(GtsamTestCase): self.check_invariance(bayesNet.at(0).asGaussian(), continuous) self.check_invariance(bayesNet.at(0).asGaussian(), values) self.check_invariance(bayesNet.at(0), values) - + self.check_invariance(bayesNet.at(1), values) self.check_invariance(bayesNet.at(2).asDiscrete(), discrete) @@ -89,7 +91,8 @@ class TestHybridBayesNet(GtsamTestCase): self.assertTrue(probability >= 0.0) logProb = conditional.logProbability(values) self.assertAlmostEqual(probability, np.exp(logProb)) - expected = conditional.logNormalizationConstant() - conditional.error(values) + expected = conditional.logNormalizationConstant() - \ + conditional.error(values) self.assertAlmostEqual(logProb, expected) diff --git a/python/gtsam/tests/test_custom_factor.py b/python/gtsam/tests/test_custom_factor.py index a3bb00384..d19706c49 100644 --- a/python/gtsam/tests/test_custom_factor.py +++ b/python/gtsam/tests/test_custom_factor.py @@ -11,17 +11,20 @@ Author: Fan Jiang import unittest from typing import List -import gtsam import numpy as np -from gtsam import CustomFactor, JacobianFactor, Pose2, Values from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import CustomFactor, Pose2, Values + class TestCustomFactor(GtsamTestCase): + def test_new(self): """Test the creation of a new CustomFactor""" - def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): + def error_func(this: CustomFactor, v: gtsam.Values, + H: List[np.ndarray]): """Minimal error function stub""" return np.array([1, 0, 0]), H @@ -31,7 +34,8 @@ class TestCustomFactor(GtsamTestCase): def test_new_keylist(self): """Test the creation of a new CustomFactor""" - def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): + def error_func(this: CustomFactor, v: gtsam.Values, + H: List[np.ndarray]): """Minimal error function stub""" return np.array([1, 0, 0]) @@ -42,7 +46,8 @@ class TestCustomFactor(GtsamTestCase): """Test if calling the factor works (only error)""" expected_pose = Pose2(1, 1, 0) - def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray: + def error_func(this: CustomFactor, v: gtsam.Values, + H: List[np.ndarray]) -> np.ndarray: """Minimal error function with no Jacobian""" key0 = this.keys()[0] error = -v.atPose2(key0).localCoordinates(expected_pose) @@ -64,7 +69,8 @@ class TestCustomFactor(GtsamTestCase): expected = Pose2(2, 2, np.pi / 2) - def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): + def error_func(this: CustomFactor, v: gtsam.Values, + H: List[np.ndarray]): """ the custom error function. One can freely use variables captured from the outside scope. Or the variables can be acquired by indexing `v`. @@ -103,7 +109,8 @@ class TestCustomFactor(GtsamTestCase): gT1 = Pose2(1, 2, np.pi / 2) gT2 = Pose2(-1, 4, np.pi) - def error_func(this: CustomFactor, v: gtsam.Values, _: List[np.ndarray]): + def error_func(this: CustomFactor, v: gtsam.Values, + _: List[np.ndarray]): """Minimal error function stub""" return np.array([1, 0, 0]) @@ -124,7 +131,8 @@ class TestCustomFactor(GtsamTestCase): expected = Pose2(2, 2, np.pi / 2) - def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): + def error_func(this: CustomFactor, v: gtsam.Values, + H: List[np.ndarray]): """ Error function that mimics a BetweenFactor :param this: reference to the current CustomFactor being evaluated @@ -137,7 +145,8 @@ class TestCustomFactor(GtsamTestCase): gT1, gT2 = v.atPose2(key0), v.atPose2(key1) error = expected.localCoordinates(gT1.between(gT2)) - self.assertTrue(H is None) # Should be true if we only request the error + self.assertTrue( + H is None) # Should be true if we only request the error if H is not None: result = gT1.between(gT2) @@ -164,7 +173,8 @@ class TestCustomFactor(GtsamTestCase): expected = Pose2(2, 2, np.pi / 2) - def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]): + def error_func(this: CustomFactor, v: gtsam.Values, + H: List[np.ndarray]): """ Error function that mimics a BetweenFactor :param this: reference to the current CustomFactor being evaluated diff --git a/python/gtsam/utils/plot.py b/python/gtsam/utils/plot.py index 880c436e8..fe067e787 100644 --- a/python/gtsam/utils/plot.py +++ b/python/gtsam/utils/plot.py @@ -12,7 +12,6 @@ from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import import gtsam from gtsam import Marginals, Point2, Point3, Pose2, Pose3, Values - # For translation between a scaling of the uncertainty ellipse and the # percentage of inliers see discussion in # [PR 1067](https://github.com/borglab/gtsam/pull/1067) @@ -557,7 +556,7 @@ def plot_incremental_trajectory(fignum: int, axes = fig.axes[0] poses = gtsam.utilities.allPose3s(values) - keys = gtsam.KeyVector(poses.keys()) + keys = poses.keys() for key in keys[start:]: if values.exists(key): From 87d56aff9c49e6157648af528c81debe757bd5b1 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 15 Jun 2023 17:51:34 -0400 Subject: [PATCH 55/78] re-add classes for Matlab wrapper --- gtsam/geometry/Rot3.h | 3 ++ gtsam/geometry/geometry.i | 61 +++++++++++++++++++++++++++++++- gtsam/gtsam.i | 35 ++++++++++++++++++ gtsam/nonlinear/CustomFactor.cpp | 2 +- gtsam/sfm/sfm.i | 34 ++++++++++++++++++ gtsam/slam/slam.i | 21 ++++++++++- 6 files changed, 153 insertions(+), 3 deletions(-) diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index 1a5a1ed96..2b9c5a45a 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -560,6 +560,9 @@ class GTSAM_EXPORT Rot3 : public LieGroup { #endif }; + /// std::vector of Rot3s, used in Matlab wrapper + using Rot3Vector = std::vector>; + /** * [RQ] receives a 3 by 3 matrix and returns an upper triangular matrix R * and 3 rotation angles corresponding to the rotation matrix Q=Qz'*Qy'*Qx' diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index c28d8dc56..4d265fca2 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -28,7 +28,41 @@ class Point2 { // enabling serialization functionality void serialize() const; }; - + +// Used in Matlab wrapper +class Point2Pairs { + Point2Pairs(); + size_t size() const; + bool empty() const; + gtsam::Point2Pair at(size_t n) const; + void push_back(const gtsam::Point2Pair& point_pair); +}; + +// std::vector +// Used in Matlab wrapper +class Point2Vector { + // Constructors + Point2Vector(); + Point2Vector(const gtsam::Point2Vector& v); + + // Capacity + size_t size() const; + size_t max_size() const; + void resize(size_t sz); + size_t capacity() const; + bool empty() const; + void reserve(size_t n); + + // Element access + gtsam::Point2 at(size_t n) const; + gtsam::Point2 front() const; + gtsam::Point2 back() const; + + // Modifiers + void assign(size_t n, const gtsam::Point2& u); + void push_back(const gtsam::Point2& x); + void pop_back(); +}; #include class StereoPoint2 { @@ -99,6 +133,14 @@ class Point3 { gtsam::Point3 normalize(const gtsam::Point3 &p, Eigen::Ref H) const; }; +// Used in Matlab wrapper +class Point3Pairs { + Point3Pairs(); + size_t size() const; + bool empty() const; + gtsam::Point3Pair at(size_t n) const; + void push_back(const gtsam::Point3Pair& point_pair); +}; #include class Rot2 { @@ -503,6 +545,23 @@ class Pose3 { void serialize() const; }; +// Used in Matlab wrapper +class Pose3Pairs { + Pose3Pairs(); + size_t size() const; + bool empty() const; + gtsam::Pose3Pair at(size_t n) const; + void push_back(const gtsam::Pose3Pair& pose_pair); +}; + +// Used in Matlab wrapper +class Pose3Vector { + Pose3Vector(); + size_t size() const; + bool empty() const; + gtsam::Pose3 at(size_t n) const; + void push_back(const gtsam::Pose3& pose); +}; #include class Unit3 { diff --git a/gtsam/gtsam.i b/gtsam/gtsam.i index 3e18168e3..834d5a147 100644 --- a/gtsam/gtsam.i +++ b/gtsam/gtsam.i @@ -104,6 +104,41 @@ class KeyGroupMap { bool insert2(size_t key, int val); }; +// Actually a FastSet +// Used in Matlab wrapper +class FactorIndexSet { + FactorIndexSet(); + FactorIndexSet(const gtsam::FactorIndexSet& set); + + // common STL methods + size_t size() const; + bool empty() const; + void clear(); + + // structure specific methods + void insert(size_t factorIndex); + bool erase(size_t factorIndex); // returns true if value was removed + bool count(size_t factorIndex) const; // returns true if value exists +}; + +// Actually a vector +// Used in Matlab wrapper +class FactorIndices { + FactorIndices(); + FactorIndices(const gtsam::FactorIndices& other); + + // common STL methods + size_t size() const; + bool empty() const; + void clear(); + + // structure specific methods + size_t at(size_t i) const; + size_t front() const; + size_t back() const; + void push_back(size_t factorIndex) const; +}; + //************************************************************************* // Utilities //************************************************************************* diff --git a/gtsam/nonlinear/CustomFactor.cpp b/gtsam/nonlinear/CustomFactor.cpp index 52f49574c..a6415df20 100644 --- a/gtsam/nonlinear/CustomFactor.cpp +++ b/gtsam/nonlinear/CustomFactor.cpp @@ -28,7 +28,7 @@ Vector CustomFactor::unwhitenedError(const Values& x, OptionalMatrixVecType H) c if(H) { /* * In this case, we pass the raw pointer to the `std::vector` object directly to pybind. - * As the type `std::vector` has been marked as opaque in `preamble/base.h`, any changes in + * As the type `std::vector` has been marked as opaque in `preamble/custom.h`, any changes in * Python will be immediately reflected on the C++ side. * * Example: diff --git a/gtsam/sfm/sfm.i b/gtsam/sfm/sfm.i index 059aa21ed..2198692e8 100644 --- a/gtsam/sfm/sfm.i +++ b/gtsam/sfm/sfm.i @@ -99,6 +99,30 @@ typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; typedef gtsam::BinaryMeasurement BinaryMeasurementPoint3; +// Used in Matlab wrapper +class BinaryMeasurementsUnit3 { + BinaryMeasurementsUnit3(); + size_t size() const; + gtsam::BinaryMeasurement at(size_t idx) const; + void push_back(const gtsam::BinaryMeasurement& measurement); +}; + +// Used in Matlab wrapper +class BinaryMeasurementsPoint3 { + BinaryMeasurementsPoint3(); + size_t size() const; + gtsam::BinaryMeasurement at(size_t idx) const; + void push_back(const gtsam::BinaryMeasurement& measurement); +}; + +// Used in Matlab wrapper +class BinaryMeasurementsRot3 { + BinaryMeasurementsRot3(); + size_t size() const; + gtsam::BinaryMeasurement at(size_t idx) const; + void push_back(const gtsam::BinaryMeasurement& measurement); +}; + #include #include @@ -246,6 +270,16 @@ class ShonanAveraging3 { #include +// Used in Matlab wrapper +class KeyPairDoubleMap { + KeyPairDoubleMap(); + KeyPairDoubleMap(const gtsam::KeyPairDoubleMap& other); + + size_t size() const; + bool empty() const; + void clear(); + size_t at(const pair& keypair) const; +}; class MFAS { MFAS(const gtsam::BinaryMeasurementsUnit3& relativeTranslations, diff --git a/gtsam/slam/slam.i b/gtsam/slam/slam.i index dfdd63aa3..d35a87eee 100644 --- a/gtsam/slam/slam.i +++ b/gtsam/slam/slam.i @@ -250,6 +250,25 @@ void save2D(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& config, gtsam::noiseModel::Diagonal* model, string filename); +// std::vector::shared_ptr> +// Used in Matlab wrapper +class BetweenFactorPose2s { + BetweenFactorPose2s(); + size_t size() const; + gtsam::BetweenFactor* at(size_t i) const; + void push_back(const gtsam::BetweenFactor* factor); +}; +gtsam::BetweenFactorPose2s parse2DFactors(string filename); + +// std::vector::shared_ptr> +// Used in Matlab wrapper +class BetweenFactorPose3s { + BetweenFactorPose3s(); + size_t size() const; + gtsam::BetweenFactor* at(size_t i) const; + void push_back(const gtsam::BetweenFactor* factor); +}; + gtsam::BetweenFactorPose3s parse3DFactors(string filename); pair load3D(string filename); @@ -288,7 +307,7 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor { KarcherMeanFactor(const gtsam::KeyVector& keys); }; -gtsam::Rot3 FindKarcherMean(const std::vector& rotations); +gtsam::Rot3 FindKarcherMean(const gtsam::Rot3Vector& rotations); #include gtsam::noiseModel::Isotropic* ConvertNoiseModel(gtsam::noiseModel::Base* model, From e70f8af448a3aaf214c5eb66f72fbf045db704ed Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 15 Jun 2023 17:55:33 -0400 Subject: [PATCH 56/78] leftover classes to re-add --- gtsam/base/base.i | 40 ++++++++++++++++++++++++++++++++++++++++ python/CMakeLists.txt | 1 + 2 files changed, 41 insertions(+) diff --git a/gtsam/base/base.i b/gtsam/base/base.i index d8cf9d8ea..31d09cb3d 100644 --- a/gtsam/base/base.i +++ b/gtsam/base/base.i @@ -41,9 +41,49 @@ class DSFMap { std::map sets(); }; +// Used in Matlab wrapper +class IndexPairSet { + IndexPairSet(); + // common STL methods + size_t size() const; + bool empty() const; + void clear(); + + // structure specific methods + void insert(gtsam::IndexPair key); + bool erase(gtsam::IndexPair key); // returns true if value was removed + bool count(gtsam::IndexPair key) const; // returns true if value exists +}; + +// Used in Matlab wrapper +class IndexPairVector { + IndexPairVector(); + IndexPairVector(const gtsam::IndexPairVector& other); + + // common STL methods + size_t size() const; + bool empty() const; + void clear(); + + // structure specific methods + gtsam::IndexPair at(size_t i) const; + void push_back(gtsam::IndexPair key) const; +}; gtsam::IndexPairVector IndexPairSetAsArray(gtsam::IndexPairSet& set); +// Used in Matlab wrapper +class IndexPairSetMap { + IndexPairSetMap(); + // common STL methods + size_t size() const; + bool empty() const; + void clear(); + + // structure specific methods + gtsam::IndexPairSet at(gtsam::IndexPair& key); +}; + #include #include bool linear_independent(Matrix A, Matrix B, double tol); diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 684cf31a1..2b2abf507 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -48,6 +48,7 @@ set(ignore gtsam::ISAM2ThresholdMapValue gtsam::FactorIndices gtsam::FactorIndexSet + gtsam::IndexPairSet gtsam::IndexPairSetMap gtsam::IndexPairVector gtsam::BetweenFactorPose2s From a763944249fbb9b2b5c3aadb53a38b2441d5868e Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 15 Jun 2023 18:55:05 -0400 Subject: [PATCH 57/78] handle some TODOs --- gtsam/nonlinear/GncParams.h | 3 +-- gtsam/nonlinear/nonlinear.i | 8 ++++---- gtsam/sfm/sfm.i | 5 ++--- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/gtsam/nonlinear/GncParams.h b/gtsam/nonlinear/GncParams.h index 10ac80663..d05e72ee2 100644 --- a/gtsam/nonlinear/GncParams.h +++ b/gtsam/nonlinear/GncParams.h @@ -73,8 +73,7 @@ class GncParams { double weightsTol = 1e-4; ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS) Verbosity verbosity = SILENT; ///< Verbosity level - //TODO(Varun) replace IndexVector with vector once pybind11/stl.h is globally enabled. - /// Use IndexVector for inliers and outliers since it is fast + wrapping + /// Use IndexVector for inliers and outliers since it is fast using IndexVector = FastVector; ///< Slots in the factor graph corresponding to measurements that we know are inliers IndexVector knownInliers = IndexVector(); diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index 19f4ae588..b47fd8a4c 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -305,8 +305,8 @@ virtual class GncParams { double relativeCostTol; double weightsTol; gtsam::This::Verbosity verbosity; - gtsam::KeyVector knownInliers; - gtsam::KeyVector knownOutliers; + gtsam::This::IndexVector knownInliers; + gtsam::This::IndexVector knownOutliers; void setLossType(const gtsam::GncLossType type); void setMaxIterations(const size_t maxIter); @@ -314,8 +314,8 @@ virtual class GncParams { void setRelativeCostTol(double value); void setWeightsTol(double value); void setVerbosityGNC(const gtsam::This::Verbosity value); - void setKnownInliers(const gtsam::KeyVector& knownIn); - void setKnownOutliers(const gtsam::KeyVector& knownOut); + void setKnownInliers(const gtsam::This::IndexVector& knownIn); + void setKnownOutliers(const gtsam::This::IndexVector& knownOut); void print(const string& str = "GncParams: ") const; enum Verbosity { diff --git a/gtsam/sfm/sfm.i b/gtsam/sfm/sfm.i index 2198692e8..a9deaa895 100644 --- a/gtsam/sfm/sfm.i +++ b/gtsam/sfm/sfm.i @@ -225,10 +225,9 @@ class ShonanAveraging3 { ShonanAveraging3(string g2oFile, const gtsam::ShonanAveragingParameters3& parameters); - // TODO(frank): deprecate once we land pybind wrapper - ShonanAveraging3(const gtsam::BetweenFactorPose3s& factors); ShonanAveraging3(const gtsam::BetweenFactorPose3s& factors, - const gtsam::ShonanAveragingParameters3& parameters); + const gtsam::ShonanAveragingParameters3& parameters = + gtsam::ShonanAveragingParameters3()); // Query properties size_t nrUnknowns() const; From 2340f1aa09a9db047819329b0457ca616e842bbb Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Fri, 16 Jun 2023 05:53:50 -0400 Subject: [PATCH 58/78] uncomment test --- python/gtsam/tests/test_DsfTrackGenerator.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/python/gtsam/tests/test_DsfTrackGenerator.py b/python/gtsam/tests/test_DsfTrackGenerator.py index 282b384ec..be6aa0796 100644 --- a/python/gtsam/tests/test_DsfTrackGenerator.py +++ b/python/gtsam/tests/test_DsfTrackGenerator.py @@ -81,16 +81,16 @@ class TestDsfTrackGenerator(GtsamTestCase): np.testing.assert_allclose(track2.indexVector(), [1, 2]) -# class TestSfmTrack2d(GtsamTestCase): -# """Tests for SfmTrack2d.""" +class TestSfmTrack2d(GtsamTestCase): + """Tests for SfmTrack2d.""" -# def test_sfm_track_2d_constructor(self) -> None: -# """Test construction of 2D SfM track.""" -# measurements = [] -# measurements.append((0, Point2(10, 20))) -# track = SfmTrack2d(measurements=measurements) -# track.measurement(0) -# assert track.numberMeasurements() == 1 + def test_sfm_track_2d_constructor(self) -> None: + """Test construction of 2D SfM track.""" + measurements = [] + measurements.append((0, Point2(10, 20))) + track = SfmTrack2d(measurements=measurements) + track.measurement(0) + assert track.numberMeasurements() == 1 if __name__ == "__main__": From 80a052d048211f9f369ca9602db340508ec58b78 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Fri, 16 Jun 2023 10:38:31 -0400 Subject: [PATCH 59/78] fix templating TODOs --- gtsam/geometry/geometry.i | 4 ++-- gtsam/inference/inference.i | 10 +++------ gtsam/sfm/sfm.i | 43 +++++++++---------------------------- timing/timeTest.cpp | 1 - 4 files changed, 15 insertions(+), 43 deletions(-) diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index 60d6a2bed..46172b774 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -1181,8 +1181,8 @@ class TriangulationParameters { const gtsam::SharedNoiseModel& noiseModel = nullptr); }; -// Templates appear not yet supported for free functions - issue raised at -// borglab/wrap#14 to add support +// Can be templated but overloaded for convenience. +// We can now call `triangulatePoint3` with any template type. // Cal3_S2 versions gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, diff --git a/gtsam/inference/inference.i b/gtsam/inference/inference.i index d4c90e356..b5413c747 100644 --- a/gtsam/inference/inference.i +++ b/gtsam/inference/inference.i @@ -176,13 +176,9 @@ class DotWriter { class VariableIndex { // Standard Constructors and Named Constructors VariableIndex(); - // TODO: Templetize constructor when wrap supports it - // template - // VariableIndex(const T& factorGraph, size_t nVariables); - // VariableIndex(const T& factorGraph); - VariableIndex(const gtsam::SymbolicFactorGraph& sfg); - VariableIndex(const gtsam::GaussianFactorGraph& gfg); - VariableIndex(const gtsam::NonlinearFactorGraph& fg); + template + VariableIndex(const T& factorGraph); VariableIndex(const gtsam::VariableIndex& other); // Testable diff --git a/gtsam/sfm/sfm.i b/gtsam/sfm/sfm.i index a9deaa895..930a0dd46 100644 --- a/gtsam/sfm/sfm.i +++ b/gtsam/sfm/sfm.i @@ -126,39 +126,16 @@ class BinaryMeasurementsRot3 { #include #include -// TODO(frank): copy/pasta below until we have integer template parameters in -// wrap! - -class ShonanAveragingParameters2 { - ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm); - ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm, +template +class ShonanAveragingParameters { + ShonanAveragingParameters(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters(const gtsam::LevenbergMarquardtParams& lm, string method); gtsam::LevenbergMarquardtParams getLMParams() const; void setOptimalityThreshold(double value); double getOptimalityThreshold() const; - void setAnchor(size_t index, const gtsam::Rot2& value); - pair getAnchor(); - void setAnchorWeight(double value); - double getAnchorWeight() const; - void setKarcherWeight(double value); - double getKarcherWeight() const; - void setGaugesWeight(double value); - double getGaugesWeight() const; - void setUseHuber(bool value); - bool getUseHuber() const; - void setCertifyOptimality(bool value); - bool getCertifyOptimality() const; -}; - -class ShonanAveragingParameters3 { - ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm); - ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm, - string method); - gtsam::LevenbergMarquardtParams getLMParams() const; - void setOptimalityThreshold(double value); - double getOptimalityThreshold() const; - void setAnchor(size_t index, const gtsam::Rot3& value); - pair getAnchor(); + void setAnchor(size_t index, const gtsam::This::Rot& value); + pair getAnchor(); void setAnchorWeight(double value); double getAnchorWeight() const; void setKarcherWeight(double value); @@ -171,6 +148,7 @@ class ShonanAveragingParameters3 { bool getCertifyOptimality() const; }; +// NOTE(Varun): Not templated because each class has specializations defined. class ShonanAveraging2 { ShonanAveraging2(string g2oFile); ShonanAveraging2(string g2oFile, @@ -217,10 +195,9 @@ class ShonanAveraging2 { }; class ShonanAveraging3 { - ShonanAveraging3( - const std::vector>& measurements, - const gtsam::ShonanAveragingParameters3& parameters = - gtsam::ShonanAveragingParameters3()); + ShonanAveraging3(const gtsam::This::Measurements& measurements, + const gtsam::ShonanAveragingParameters3& parameters = + gtsam::ShonanAveragingParameters3()); ShonanAveraging3(string g2oFile); ShonanAveraging3(string g2oFile, const gtsam::ShonanAveragingParameters3& parameters); diff --git a/timing/timeTest.cpp b/timing/timeTest.cpp index 9da3cfb2e..c450b9cf0 100644 --- a/timing/timeTest.cpp +++ b/timing/timeTest.cpp @@ -21,7 +21,6 @@ using namespace gtsam; int main(int argc, char *argv[]) { - // FIXME: ticPush_ does not exist { gttic_(top1); gttic_(sub1); From 87402328bf2eae8a2b482cb41a14d1061ec55390 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 19 Jun 2023 15:47:18 -0400 Subject: [PATCH 60/78] remove template from ParameterMatrix --- gtsam/basis/Basis.h | 30 +++++----- gtsam/basis/BasisFactors.h | 24 ++++---- gtsam/basis/ParameterMatrix.h | 70 ++++++++++------------- gtsam/basis/basis.i | 3 +- gtsam/basis/tests/testBasisFactors.cpp | 51 ++++++++--------- gtsam/basis/tests/testChebyshev2.cpp | 34 +++++------ gtsam/basis/tests/testFourier.cpp | 2 +- gtsam/basis/tests/testParameterMatrix.cpp | 38 ++++++------ gtsam/nonlinear/values.i | 64 ++------------------- 9 files changed, 125 insertions(+), 191 deletions(-) diff --git a/gtsam/basis/Basis.h b/gtsam/basis/Basis.h index 97704dab4..2963b99a8 100644 --- a/gtsam/basis/Basis.h +++ b/gtsam/basis/Basis.h @@ -169,7 +169,7 @@ class Basis { }; /** - * VectorEvaluationFunctor at a given x, applied to ParameterMatrix. + * VectorEvaluationFunctor at a given x, applied to ParameterMatrix. * This functor is used to evaluate a parameterized function at a given scalar * value x. When given a specific M*N parameters, returns an M-vector the M * corresponding functions at x, possibly with Jacobians wrpt the parameters. @@ -211,14 +211,14 @@ class Basis { } /// M-dimensional evaluation - VectorM apply(const ParameterMatrix& P, + VectorM apply(const ParameterMatrix& P, OptionalJacobian H = {}) const { if (H) *H = H_; return P.matrix() * this->weights_.transpose(); } /// c++ sugar - VectorM operator()(const ParameterMatrix& P, + VectorM operator()(const ParameterMatrix& P, OptionalJacobian H = {}) const { return apply(P, H); } @@ -270,21 +270,21 @@ class Basis { } /// Calculate component of component rowIndex_ of P - double apply(const ParameterMatrix& P, + double apply(const ParameterMatrix& P, OptionalJacobian H = {}) const { if (H) *H = H_; return P.row(rowIndex_) * EvaluationFunctor::weights_.transpose(); } /// c++ sugar - double operator()(const ParameterMatrix& P, + double operator()(const ParameterMatrix& P, OptionalJacobian H = {}) const { return apply(P, H); } }; /** - * Manifold EvaluationFunctor at a given x, applied to ParameterMatrix. + * Manifold EvaluationFunctor at a given x, applied to ParameterMatrix. * This functor is used to evaluate a parameterized function at a given scalar * value x. When given a specific M*N parameters, returns an M-vector the M * corresponding functions at x, possibly with Jacobians wrpt the parameters. @@ -314,7 +314,7 @@ class Basis { : Base(N, x, a, b) {} /// Manifold evaluation - T apply(const ParameterMatrix& P, + T apply(const ParameterMatrix& P, OptionalJacobian H = {}) const { // Interpolate the M-dimensional vector to yield a vector in tangent space Eigen::Matrix xi = Base::operator()(P, H); @@ -333,7 +333,7 @@ class Basis { } /// c++ sugar - T operator()(const ParameterMatrix& P, + T operator()(const ParameterMatrix& P, OptionalJacobian H = {}) const { return apply(P, H); // might call apply in derived } @@ -389,7 +389,7 @@ class Basis { }; /** - * VectorDerivativeFunctor at a given x, applied to ParameterMatrix. + * VectorDerivativeFunctor at a given x, applied to ParameterMatrix. * * This functor is used to evaluate the derivatives of a parameterized * function at a given scalar value x. When given a specific M*N parameters, @@ -432,15 +432,14 @@ class Basis { calculateJacobian(); } - VectorM apply(const ParameterMatrix& P, + VectorM apply(const ParameterMatrix& P, OptionalJacobian H = {}) const { if (H) *H = H_; return P.matrix() * this->weights_.transpose(); } /// c++ sugar - VectorM operator()( - const ParameterMatrix& P, - OptionalJacobian H = {}) const { + VectorM operator()(const ParameterMatrix& P, + OptionalJacobian H = {}) const { return apply(P, H); } }; @@ -490,18 +489,17 @@ class Basis { calculateJacobian(N); } /// Calculate derivative of component rowIndex_ of F - double apply(const ParameterMatrix& P, + double apply(const ParameterMatrix& P, OptionalJacobian H = {}) const { if (H) *H = H_; return P.row(rowIndex_) * this->weights_.transpose(); } /// c++ sugar - double operator()(const ParameterMatrix& P, + double operator()(const ParameterMatrix& P, OptionalJacobian H = {}) const { return apply(P, H); } }; - }; } // namespace gtsam diff --git a/gtsam/basis/BasisFactors.h b/gtsam/basis/BasisFactors.h index 0e42a8866..496d7094c 100644 --- a/gtsam/basis/BasisFactors.h +++ b/gtsam/basis/BasisFactors.h @@ -93,9 +93,9 @@ class EvaluationFactor : public FunctorizedFactor { */ template class VectorEvaluationFactor - : public FunctorizedFactor> { + : public FunctorizedFactor { private: - using Base = FunctorizedFactor>; + using Base = FunctorizedFactor; public: VectorEvaluationFactor() {} @@ -156,11 +156,12 @@ class VectorEvaluationFactor * * @ingroup basis */ +// TODO(Varun) remove template P template class VectorComponentFactor - : public FunctorizedFactor> { + : public FunctorizedFactor { private: - using Base = FunctorizedFactor>; + using Base = FunctorizedFactor; public: VectorComponentFactor() {} @@ -226,10 +227,9 @@ class VectorComponentFactor * where `x` is the value (e.g. timestep) at which the rotation was evaluated. */ template -class ManifoldEvaluationFactor - : public FunctorizedFactor::dimension>> { +class ManifoldEvaluationFactor : public FunctorizedFactor { private: - using Base = FunctorizedFactor::dimension>>; + using Base = FunctorizedFactor; public: ManifoldEvaluationFactor() {} @@ -326,11 +326,12 @@ class DerivativeFactor * @param BASIS: The basis class to use e.g. Chebyshev2 * @param M: Size of the evaluated state vector derivative. */ +//TODO(Varun) remove template M template class VectorDerivativeFactor - : public FunctorizedFactor> { + : public FunctorizedFactor { private: - using Base = FunctorizedFactor>; + using Base = FunctorizedFactor; using Func = typename BASIS::template VectorDerivativeFunctor; public: @@ -379,11 +380,12 @@ class VectorDerivativeFactor * @param BASIS: The basis class to use e.g. Chebyshev2 * @param P: Size of the control component derivative. */ +// TODO(Varun) remove template P template class ComponentDerivativeFactor - : public FunctorizedFactor> { + : public FunctorizedFactor { private: - using Base = FunctorizedFactor>; + using Base = FunctorizedFactor; using Func = typename BASIS::template ComponentDerivativeFunctor

; public: diff --git a/gtsam/basis/ParameterMatrix.h b/gtsam/basis/ParameterMatrix.h index 81686e046..bfb26c4de 100644 --- a/gtsam/basis/ParameterMatrix.h +++ b/gtsam/basis/ParameterMatrix.h @@ -30,16 +30,12 @@ namespace gtsam { /** * A matrix abstraction of MxN values at the Basis points. * This class serves as a wrapper over an Eigen matrix. - * @tparam M: The dimension of the type you wish to evaluate. * @param N: the number of Basis points (e.g. Chebyshev points of the second * kind). */ -template class ParameterMatrix { - using MatrixType = Eigen::Matrix; - private: - MatrixType matrix_; + Matrix matrix_; public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW @@ -48,15 +44,18 @@ class ParameterMatrix { /** * Create ParameterMatrix using the number of basis points. + * @param M: The dimension size of the type you wish to evaluate. * @param N: The number of basis points (the columns). */ - ParameterMatrix(const size_t N) : matrix_(M, N) { matrix_.setZero(); } + ParameterMatrix(const size_t M, const size_t N) : matrix_(M, N) { + matrix_.setZero(); + } /** * Create ParameterMatrix from an MxN Eigen Matrix. * @param matrix: An Eigen matrix used to initialze the ParameterMatrix. */ - ParameterMatrix(const MatrixType& matrix) : matrix_(matrix) {} + ParameterMatrix(const Matrix& matrix) : matrix_(matrix) {} /// Get the number of rows. size_t rows() const { return matrix_.rows(); } @@ -65,10 +64,10 @@ class ParameterMatrix { size_t cols() const { return matrix_.cols(); } /// Get the underlying matrix. - MatrixType matrix() const { return matrix_; } + Matrix matrix() const { return matrix_; } /// Return the tranpose of the underlying matrix. - Eigen::Matrix transpose() const { return matrix_.transpose(); } + Matrix transpose() const { return matrix_.transpose(); } /** * Get the matrix row specified by `index`. @@ -82,7 +81,7 @@ class ParameterMatrix { * Set the matrix row specified by `index`. * @param index: The row index to set. */ - auto row(size_t index) -> Eigen::Block { + auto row(size_t index) -> Eigen::Block { return matrix_.row(index); } @@ -90,7 +89,7 @@ class ParameterMatrix { * Get the matrix column specified by `index`. * @param index: The column index to retrieve. */ - Eigen::Matrix col(size_t index) const { + Eigen::Matrix col(size_t index) const { return matrix_.col(index); } @@ -98,7 +97,7 @@ class ParameterMatrix { * Set the matrix column specified by `index`. * @param index: The column index to set. */ - auto col(size_t index) -> Eigen::Block { + auto col(size_t index) -> Eigen::Block { return matrix_.col(index); } @@ -111,37 +110,35 @@ class ParameterMatrix { * Add a ParameterMatrix to another. * @param other: ParameterMatrix to add. */ - ParameterMatrix operator+(const ParameterMatrix& other) const { - return ParameterMatrix(matrix_ + other.matrix()); + ParameterMatrix operator+(const ParameterMatrix& other) const { + return ParameterMatrix(matrix_ + other.matrix()); } /** * Add a MxN-sized vector to the ParameterMatrix. * @param other: Vector which is reshaped and added. */ - ParameterMatrix operator+( - const Eigen::Matrix& other) const { + ParameterMatrix operator+(const Eigen::Matrix& other) const { // This form avoids a deep copy and instead typecasts `other`. - Eigen::Map other_(other.data(), M, cols()); - return ParameterMatrix(matrix_ + other_); + Eigen::Map other_(other.data(), rows(), cols()); + return ParameterMatrix(matrix_ + other_); } /** * Subtract a ParameterMatrix from another. * @param other: ParameterMatrix to subtract. */ - ParameterMatrix operator-(const ParameterMatrix& other) const { - return ParameterMatrix(matrix_ - other.matrix()); + ParameterMatrix operator-(const ParameterMatrix& other) const { + return ParameterMatrix(matrix_ - other.matrix()); } /** * Subtract a MxN-sized vector from the ParameterMatrix. * @param other: Vector which is reshaped and subracted. */ - ParameterMatrix operator-( - const Eigen::Matrix& other) const { - Eigen::Map other_(other.data(), M, cols()); - return ParameterMatrix(matrix_ - other_); + ParameterMatrix operator-(const Eigen::Matrix& other) const { + Eigen::Map other_(other.data(), rows(), cols()); + return ParameterMatrix(matrix_ - other_); } /** @@ -149,9 +146,7 @@ class ParameterMatrix { * @param other: Eigen matrix which should be multiplication compatible with * the ParameterMatrix. */ - MatrixType operator*(const Eigen::Matrix& other) const { - return matrix_ * other; - } + Matrix operator*(const Matrix& other) const { return matrix_ * other; } /// @name Vector Space requirements /// @{ @@ -169,7 +164,7 @@ class ParameterMatrix { * @param other: The ParameterMatrix to check equality with. * @param tol: The absolute tolerance threshold. */ - bool equals(const ParameterMatrix& other, double tol = 1e-8) const { + bool equals(const ParameterMatrix& other, double tol = 1e-8) const { return gtsam::equal_with_abs_tol(matrix_, other.matrix(), tol); } @@ -189,27 +184,24 @@ class ParameterMatrix { * NOTE: The size at compile time is unknown so this identity is zero * length and thus not valid. */ - inline static ParameterMatrix Identity() { - // throw std::runtime_error( - // "ParameterMatrix::Identity(): Don't use this function"); - return ParameterMatrix(0); + inline static ParameterMatrix Identity(size_t M = 0, size_t N = 0) { + return ParameterMatrix(M, N); } /// @} }; -// traits for ParameterMatrix -template -struct traits> - : public internal::VectorSpace> {}; +/// traits for ParameterMatrix +template <> +struct traits : public internal::VectorSpace { +}; /* ************************************************************************* */ // Stream operator that takes a ParameterMatrix. Used for printing. -template inline std::ostream& operator<<(std::ostream& os, - const ParameterMatrix& parameterMatrix) { + const ParameterMatrix& parameterMatrix) { os << parameterMatrix.matrix(); return os; } -} // namespace gtsam \ No newline at end of file +} // namespace gtsam diff --git a/gtsam/basis/basis.i b/gtsam/basis/basis.i index 5ff4e777c..61ba1eada 100644 --- a/gtsam/basis/basis.i +++ b/gtsam/basis/basis.i @@ -48,9 +48,8 @@ class Chebyshev2 { #include -template class ParameterMatrix { - ParameterMatrix(const size_t N); + ParameterMatrix(const size_t M, const size_t N); ParameterMatrix(const Matrix& matrix); Matrix matrix() const; diff --git a/gtsam/basis/tests/testBasisFactors.cpp b/gtsam/basis/tests/testBasisFactors.cpp index 703830b8d..c2552ac11 100644 --- a/gtsam/basis/tests/testBasisFactors.cpp +++ b/gtsam/basis/tests/testBasisFactors.cpp @@ -17,30 +17,29 @@ * @brief unit tests for factors in BasisFactors.h */ +#include +#include +#include +#include #include #include #include #include +#include #include #include #include -#include -#include -#include -#include -#include - -using gtsam::noiseModel::Isotropic; -using gtsam::Pose2; -using gtsam::Vector; -using gtsam::Values; using gtsam::Chebyshev2; -using gtsam::ParameterMatrix; -using gtsam::LevenbergMarquardtParams; using gtsam::LevenbergMarquardtOptimizer; +using gtsam::LevenbergMarquardtParams; using gtsam::NonlinearFactorGraph; using gtsam::NonlinearOptimizerParams; +using gtsam::ParameterMatrix; +using gtsam::Pose2; +using gtsam::Values; +using gtsam::Vector; +using gtsam::noiseModel::Isotropic; constexpr size_t N = 2; @@ -86,10 +85,10 @@ TEST(BasisFactors, VectorEvaluationFactor) { NonlinearFactorGraph graph; graph.add(factor); - ParameterMatrix stateMatrix(N); + ParameterMatrix stateMatrix(M, N); Values initial; - initial.insert>(key, stateMatrix); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); @@ -128,16 +127,16 @@ TEST(BasisFactors, VectorComponentFactor) { const size_t i = 2; const double measured = 0.0, t = 3.0, a = 2.0, b = 4.0; auto model = Isotropic::Sigma(1, 1.0); - VectorComponentFactor factor(key, measured, model, N, i, - t, a, b); + VectorComponentFactor factor(key, measured, model, N, i, t, a, + b); NonlinearFactorGraph graph; graph.add(factor); - ParameterMatrix

stateMatrix(N); + ParameterMatrix stateMatrix(P, N); Values initial; - initial.insert>(key, stateMatrix); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); @@ -153,16 +152,16 @@ TEST(BasisFactors, ManifoldEvaluationFactor) { const Pose2 measured; const double t = 3.0, a = 2.0, b = 4.0; auto model = Isotropic::Sigma(3, 1.0); - ManifoldEvaluationFactor factor(key, measured, model, N, - t, a, b); + ManifoldEvaluationFactor factor(key, measured, model, N, t, + a, b); NonlinearFactorGraph graph; graph.add(factor); - ParameterMatrix<3> stateMatrix(N); + ParameterMatrix stateMatrix(3, N); Values initial; - initial.insert>(key, stateMatrix); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); @@ -186,10 +185,10 @@ TEST(BasisFactors, VecDerivativePrior) { NonlinearFactorGraph graph; graph.add(vecDPrior); - ParameterMatrix stateMatrix(N); + ParameterMatrix stateMatrix(M, N); Values initial; - initial.insert>(key, stateMatrix); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); @@ -213,8 +212,8 @@ TEST(BasisFactors, ComponentDerivativeFactor) { graph.add(controlDPrior); Values initial; - ParameterMatrix stateMatrix(N); - initial.insert>(key, stateMatrix); + ParameterMatrix stateMatrix(M, N); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); diff --git a/gtsam/basis/tests/testChebyshev2.cpp b/gtsam/basis/tests/testChebyshev2.cpp index e51436fcf..abf98e8e4 100644 --- a/gtsam/basis/tests/testChebyshev2.cpp +++ b/gtsam/basis/tests/testChebyshev2.cpp @@ -108,7 +108,7 @@ TEST(Chebyshev2, InterpolateVector) { double t = 30, a = 0, b = 100; const size_t N = 3; // Create 2x3 matrix with Vectors at Chebyshev points - ParameterMatrix<2> X(N); + ParameterMatrix X(2, N); X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp // Check value @@ -120,11 +120,11 @@ TEST(Chebyshev2, InterpolateVector) { EXPECT(assert_equal(expected, fx(X, actualH), 1e-9)); // Check derivative - std::function)> f = + std::function f = std::bind(&Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, std::placeholders::_1, nullptr); Matrix numericalH = - numericalDerivative11, 2 * N>(f, X); + numericalDerivative11(f, X); EXPECT(assert_equal(numericalH, actualH, 1e-9)); } @@ -133,7 +133,7 @@ TEST(Chebyshev2, InterpolateVector) { TEST(Chebyshev2, InterpolatePose2) { double t = 30, a = 0, b = 100; - ParameterMatrix<3> X(N); + ParameterMatrix X(3, N); X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp X.row(1) = Vector::Zero(N); X.row(2) = 0.1 * Vector::Ones(N); @@ -148,11 +148,11 @@ TEST(Chebyshev2, InterpolatePose2) { EXPECT(assert_equal(expected, fx(X, actualH))); // Check derivative - std::function)> f = + std::function f = std::bind(&Chebyshev2::ManifoldEvaluationFunctor::operator(), fx, std::placeholders::_1, nullptr); Matrix numericalH = - numericalDerivative11, 3 * N>(f, X); + numericalDerivative11(f, X); EXPECT(assert_equal(numericalH, actualH, 1e-9)); } @@ -169,7 +169,7 @@ TEST(Chebyshev2, InterpolatePose3) { Vector6 xi = Pose3::ChartAtOrigin::Local(pose); Eigen::Matrix actualH(6, 6 * N); - ParameterMatrix<6> X(N); + ParameterMatrix X(6, N); X.col(11) = xi; Chebyshev2::ManifoldEvaluationFunctor fx(N, t, a, b); @@ -178,11 +178,11 @@ TEST(Chebyshev2, InterpolatePose3) { EXPECT(assert_equal(expected, fx(X, actualH))); // Check derivative - std::function)> f = + std::function f = std::bind(&Chebyshev2::ManifoldEvaluationFunctor::operator(), fx, std::placeholders::_1, nullptr); Matrix numericalH = - numericalDerivative11, 6 * N>(f, X); + numericalDerivative11(f, X); EXPECT(assert_equal(numericalH, actualH, 1e-8)); } #endif @@ -415,12 +415,12 @@ TEST(Chebyshev2, VectorDerivativeFunctor) { const double x = 0.2; using VecD = Chebyshev2::VectorDerivativeFunctor; VecD fx(N, x, 0, 3); - ParameterMatrix X(N); + ParameterMatrix X(M, N); Matrix actualH(M, M * N); EXPECT(assert_equal(Vector::Zero(M), (Vector)fx(X, actualH), 1e-8)); // Test Jacobian - Matrix expectedH = numericalDerivative11, M * N>( + Matrix expectedH = numericalDerivative11( std::bind(&VecD::operator(), fx, std::placeholders::_1, nullptr), X); EXPECT(assert_equal(expectedH, actualH, 1e-7)); } @@ -433,12 +433,12 @@ TEST(Chebyshev2, VectorDerivativeFunctor2) { const Vector points = Chebyshev2::Points(N, 0, T); - // Assign the parameter matrix - Vector values(N); + // Assign the parameter matrix 1xN + Matrix values(1, N); for (size_t i = 0; i < N; ++i) { values(i) = f(points(i)); } - ParameterMatrix X(values); + ParameterMatrix X(values); // Evaluate the derivative at the chebyshev points using // VectorDerivativeFunctor. @@ -452,7 +452,7 @@ TEST(Chebyshev2, VectorDerivativeFunctor2) { Matrix actualH(M, M * N); VecD vecd(N, points(0), 0, T); vecd(X, actualH); - Matrix expectedH = numericalDerivative11, M * N>( + Matrix expectedH = numericalDerivative11( std::bind(&VecD::operator(), vecd, std::placeholders::_1, nullptr), X); EXPECT(assert_equal(expectedH, actualH, 1e-6)); } @@ -465,11 +465,11 @@ TEST(Chebyshev2, ComponentDerivativeFunctor) { using CompFunc = Chebyshev2::ComponentDerivativeFunctor; size_t row = 1; CompFunc fx(N, row, x, 0, 3); - ParameterMatrix X(N); + ParameterMatrix X(M, N); Matrix actualH(1, M * N); EXPECT_DOUBLES_EQUAL(0, fx(X, actualH), 1e-8); - Matrix expectedH = numericalDerivative11, M * N>( + Matrix expectedH = numericalDerivative11( std::bind(&CompFunc::operator(), fx, std::placeholders::_1, nullptr), X); EXPECT(assert_equal(expectedH, actualH, 1e-7)); } diff --git a/gtsam/basis/tests/testFourier.cpp b/gtsam/basis/tests/testFourier.cpp index 3a147a9f6..c24d4b30a 100644 --- a/gtsam/basis/tests/testFourier.cpp +++ b/gtsam/basis/tests/testFourier.cpp @@ -190,7 +190,7 @@ TEST(Basis, VecDerivativeFunctor) { Matrix23 theta_mat = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071) .finished() .transpose(); - ParameterMatrix<2> theta(theta_mat); + ParameterMatrix theta(theta_mat); EXPECT(assert_equal(Vector(dotShape), dotShapeFunction(theta), 1e-4)); } diff --git a/gtsam/basis/tests/testParameterMatrix.cpp b/gtsam/basis/tests/testParameterMatrix.cpp index ae2e8e111..11a098172 100644 --- a/gtsam/basis/tests/testParameterMatrix.cpp +++ b/gtsam/basis/tests/testParameterMatrix.cpp @@ -32,19 +32,19 @@ const size_t M = 2, N = 5; //****************************************************************************** TEST(ParameterMatrix, Constructor) { - ParameterMatrix<2> actual1(3); - ParameterMatrix<2> expected1(Matrix::Zero(2, 3)); + ParameterMatrix actual1(2, 3); + ParameterMatrix expected1(Matrix::Zero(2, 3)); EXPECT(assert_equal(expected1, actual1)); - ParameterMatrix<2> actual2(Matrix::Ones(2, 3)); - ParameterMatrix<2> expected2(Matrix::Ones(2, 3)); + ParameterMatrix actual2(Matrix::Ones(2, 3)); + ParameterMatrix expected2(Matrix::Ones(2, 3)); EXPECT(assert_equal(expected2, actual2)); EXPECT(assert_equal(expected2.matrix(), actual2.matrix())); } //****************************************************************************** TEST(ParameterMatrix, Dimensions) { - ParameterMatrix params(N); + ParameterMatrix params(M, N); EXPECT_LONGS_EQUAL(params.rows(), M); EXPECT_LONGS_EQUAL(params.cols(), N); EXPECT_LONGS_EQUAL(params.dim(), M * N); @@ -52,7 +52,7 @@ TEST(ParameterMatrix, Dimensions) { //****************************************************************************** TEST(ParameterMatrix, Getters) { - ParameterMatrix params(N); + ParameterMatrix params(M, N); Matrix expectedMatrix = Matrix::Zero(2, 5); EXPECT(assert_equal(expectedMatrix, params.matrix())); @@ -60,13 +60,13 @@ TEST(ParameterMatrix, Getters) { Matrix expectedMatrixTranspose = Matrix::Zero(5, 2); EXPECT(assert_equal(expectedMatrixTranspose, params.transpose())); - ParameterMatrix p2(Matrix::Ones(M, N)); + ParameterMatrix p2(Matrix::Ones(M, N)); Vector expectedRowVector = Vector::Ones(N); for (size_t r = 0; r < M; ++r) { EXPECT(assert_equal(p2.row(r), expectedRowVector)); } - ParameterMatrix p3(2 * Matrix::Ones(M, N)); + ParameterMatrix p3(2 * Matrix::Ones(M, N)); Vector expectedColVector = 2 * Vector::Ones(M); for (size_t c = 0; c < M; ++c) { EXPECT(assert_equal(p3.col(c), expectedColVector)); @@ -75,7 +75,7 @@ TEST(ParameterMatrix, Getters) { //****************************************************************************** TEST(ParameterMatrix, Setters) { - ParameterMatrix params(Matrix::Zero(M, N)); + ParameterMatrix params(Matrix::Zero(M, N)); Matrix expected = Matrix::Zero(M, N); // row @@ -97,31 +97,31 @@ TEST(ParameterMatrix, Setters) { //****************************************************************************** TEST(ParameterMatrix, Addition) { - ParameterMatrix params(Matrix::Ones(M, N)); - ParameterMatrix expected(2 * Matrix::Ones(M, N)); + ParameterMatrix params(Matrix::Ones(M, N)); + ParameterMatrix expected(2 * Matrix::Ones(M, N)); // Add vector EXPECT(assert_equal(expected, params + Vector::Ones(M * N))); // Add another ParameterMatrix - ParameterMatrix actual = params + ParameterMatrix(Matrix::Ones(M, N)); + ParameterMatrix actual = params + ParameterMatrix(Matrix::Ones(M, N)); EXPECT(assert_equal(expected, actual)); } //****************************************************************************** TEST(ParameterMatrix, Subtraction) { - ParameterMatrix params(2 * Matrix::Ones(M, N)); - ParameterMatrix expected(Matrix::Ones(M, N)); + ParameterMatrix params(2 * Matrix::Ones(M, N)); + ParameterMatrix expected(Matrix::Ones(M, N)); // Subtract vector EXPECT(assert_equal(expected, params - Vector::Ones(M * N))); // Subtract another ParameterMatrix - ParameterMatrix actual = params - ParameterMatrix(Matrix::Ones(M, N)); + ParameterMatrix actual = params - ParameterMatrix(Matrix::Ones(M, N)); EXPECT(assert_equal(expected, actual)); } //****************************************************************************** TEST(ParameterMatrix, Multiplication) { - ParameterMatrix params(Matrix::Ones(M, N)); + ParameterMatrix params(Matrix::Ones(M, N)); Matrix multiplier = 2 * Matrix::Ones(N, 2); Matrix expected = 2 * N * Matrix::Ones(M, 2); EXPECT(assert_equal(expected, params * multiplier)); @@ -129,12 +129,12 @@ TEST(ParameterMatrix, Multiplication) { //****************************************************************************** TEST(ParameterMatrix, VectorSpace) { - ParameterMatrix params(Matrix::Ones(M, N)); + ParameterMatrix params(Matrix::Ones(M, N)); // vector EXPECT(assert_equal(Vector::Ones(M * N), params.vector())); // identity - EXPECT(assert_equal(ParameterMatrix::Identity(), - ParameterMatrix(Matrix::Zero(M, 0)))); + EXPECT(assert_equal(ParameterMatrix::Identity(M), + ParameterMatrix(Matrix::Zero(M, 0)))); } //****************************************************************************** diff --git a/gtsam/nonlinear/values.i b/gtsam/nonlinear/values.i index 680cafc31..7ad6c947a 100644 --- a/gtsam/nonlinear/values.i +++ b/gtsam/nonlinear/values.i @@ -96,21 +96,7 @@ class Values { void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void insert(size_t j, const gtsam::NavState& nav_state); void insert(size_t j, double c); - void insert(size_t j, const gtsam::ParameterMatrix<1>& X); - void insert(size_t j, const gtsam::ParameterMatrix<2>& X); - void insert(size_t j, const gtsam::ParameterMatrix<3>& X); - void insert(size_t j, const gtsam::ParameterMatrix<4>& X); - void insert(size_t j, const gtsam::ParameterMatrix<5>& X); - void insert(size_t j, const gtsam::ParameterMatrix<6>& X); - void insert(size_t j, const gtsam::ParameterMatrix<7>& X); - void insert(size_t j, const gtsam::ParameterMatrix<8>& X); - void insert(size_t j, const gtsam::ParameterMatrix<9>& X); - void insert(size_t j, const gtsam::ParameterMatrix<10>& X); - void insert(size_t j, const gtsam::ParameterMatrix<11>& X); - void insert(size_t j, const gtsam::ParameterMatrix<12>& X); - void insert(size_t j, const gtsam::ParameterMatrix<13>& X); - void insert(size_t j, const gtsam::ParameterMatrix<14>& X); - void insert(size_t j, const gtsam::ParameterMatrix<15>& X); + void insert(size_t j, const gtsam::ParameterMatrix& X); template void insert(size_t j, const T& val); @@ -144,21 +130,7 @@ class Values { void update(size_t j, Vector vector); void update(size_t j, Matrix matrix); void update(size_t j, double c); - void update(size_t j, const gtsam::ParameterMatrix<1>& X); - void update(size_t j, const gtsam::ParameterMatrix<2>& X); - void update(size_t j, const gtsam::ParameterMatrix<3>& X); - void update(size_t j, const gtsam::ParameterMatrix<4>& X); - void update(size_t j, const gtsam::ParameterMatrix<5>& X); - void update(size_t j, const gtsam::ParameterMatrix<6>& X); - void update(size_t j, const gtsam::ParameterMatrix<7>& X); - void update(size_t j, const gtsam::ParameterMatrix<8>& X); - void update(size_t j, const gtsam::ParameterMatrix<9>& X); - void update(size_t j, const gtsam::ParameterMatrix<10>& X); - void update(size_t j, const gtsam::ParameterMatrix<11>& X); - void update(size_t j, const gtsam::ParameterMatrix<12>& X); - void update(size_t j, const gtsam::ParameterMatrix<13>& X); - void update(size_t j, const gtsam::ParameterMatrix<14>& X); - void update(size_t j, const gtsam::ParameterMatrix<15>& X); + void update(size_t j, const gtsam::ParameterMatrix& X); void insert_or_assign(size_t j, const gtsam::Point2& point2); void insert_or_assign(size_t j, const gtsam::Point3& point3); @@ -199,21 +171,7 @@ class Values { void insert_or_assign(size_t j, Vector vector); void insert_or_assign(size_t j, Matrix matrix); void insert_or_assign(size_t j, double c); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<1>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<2>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<3>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<4>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<5>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<6>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<7>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<8>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<9>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<10>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<11>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<12>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<13>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<14>& X); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix<15>& X); + void insert_or_assign(size_t j, const gtsam::ParameterMatrix& X); template , - gtsam::ParameterMatrix<2>, - gtsam::ParameterMatrix<3>, - gtsam::ParameterMatrix<4>, - gtsam::ParameterMatrix<5>, - gtsam::ParameterMatrix<6>, - gtsam::ParameterMatrix<7>, - gtsam::ParameterMatrix<8>, - gtsam::ParameterMatrix<9>, - gtsam::ParameterMatrix<10>, - gtsam::ParameterMatrix<11>, - gtsam::ParameterMatrix<12>, - gtsam::ParameterMatrix<13>, - gtsam::ParameterMatrix<14>, - gtsam::ParameterMatrix<15>}> + gtsam::ParameterMatrix}> T at(size_t j); }; From 73c950e69a716c0042622388a48a6f02fb31c767 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 20 Jun 2023 08:59:01 -0400 Subject: [PATCH 61/78] remove templating and make all Basis code dynamic --- gtsam/basis/Basis.cpp | 33 ++++++++ gtsam/basis/Basis.h | 102 ++++++++++++------------- gtsam/basis/BasisFactors.h | 82 ++++++++++---------- gtsam/basis/tests/testBasisFactors.cpp | 14 ++-- gtsam/basis/tests/testChebyshev2.cpp | 18 ++--- gtsam/basis/tests/testFourier.cpp | 4 +- 6 files changed, 141 insertions(+), 112 deletions(-) create mode 100644 gtsam/basis/Basis.cpp diff --git a/gtsam/basis/Basis.cpp b/gtsam/basis/Basis.cpp new file mode 100644 index 000000000..3e684b197 --- /dev/null +++ b/gtsam/basis/Basis.cpp @@ -0,0 +1,33 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file Basis.cpp + * @brief Compute an interpolating basis + * @author Varun Agrawal + * @date June 20, 2023 + */ + +#include + +namespace gtsam { + +Matrix kroneckerProductIdentity(size_t M, const Weights& w) { + Matrix result(M, w.cols() * M); + result.setZero(); + + for (int i = 0; i < w.cols(); i++) { + result.block(0, i * M, M, M).diagonal().array() = w(i); + } + return result; +} + +} // namespace gtsam diff --git a/gtsam/basis/Basis.h b/gtsam/basis/Basis.h index 2963b99a8..ddc8f4ddd 100644 --- a/gtsam/basis/Basis.h +++ b/gtsam/basis/Basis.h @@ -81,16 +81,7 @@ using Weights = Eigen::Matrix; /* 1xN vector */ * * @ingroup basis */ -template -Matrix kroneckerProductIdentity(const Weights& w) { - Matrix result(M, w.cols() * M); - result.setZero(); - - for (int i = 0; i < w.cols(); i++) { - result.block(0, i * M, M, M).diagonal().array() = w(i); - } - return result; -} +Matrix kroneckerProductIdentity(size_t M, const Weights& w); /** * CRTP Base class for function bases @@ -174,13 +165,13 @@ class Basis { * value x. When given a specific M*N parameters, returns an M-vector the M * corresponding functions at x, possibly with Jacobians wrpt the parameters. */ - template class VectorEvaluationFunctor : protected EvaluationFunctor { protected: - using VectorM = Eigen::Matrix; - using Jacobian = Eigen::Matrix; + using Jacobian = Eigen::Matrix; Jacobian H_; + size_t M_; + /** * Calculate the `M*(M*N)` Jacobian of this functor with respect to * the M*N parameter matrix `P`. @@ -190,7 +181,7 @@ class Basis { * i.e., the Kronecker product of weights_ with the MxM identity matrix. */ void calculateJacobian() { - H_ = kroneckerProductIdentity(this->weights_); + H_ = kroneckerProductIdentity(M_, this->weights_); } public: @@ -200,26 +191,27 @@ class Basis { VectorEvaluationFunctor() {} /// Default Constructor - VectorEvaluationFunctor(size_t N, double x) : EvaluationFunctor(N, x) { + VectorEvaluationFunctor(size_t M, size_t N, double x) + : EvaluationFunctor(N, x), M_(M) { calculateJacobian(); } /// Constructor, with interval [a,b] - VectorEvaluationFunctor(size_t N, double x, double a, double b) - : EvaluationFunctor(N, x, a, b) { + VectorEvaluationFunctor(size_t M, size_t N, double x, double a, double b) + : EvaluationFunctor(N, x, a, b), M_(M) { calculateJacobian(); } /// M-dimensional evaluation - VectorM apply(const ParameterMatrix& P, - OptionalJacobian H = {}) const { + Vector apply(const ParameterMatrix& P, + OptionalJacobian H = {}) const { if (H) *H = H_; return P.matrix() * this->weights_.transpose(); } /// c++ sugar - VectorM operator()(const ParameterMatrix& P, - OptionalJacobian H = {}) const { + Vector operator()(const ParameterMatrix& P, + OptionalJacobian H = {}) const { return apply(P, H); } }; @@ -231,13 +223,14 @@ class Basis { * * This component is specified by the row index i, with 0 class VectorComponentFunctor : public EvaluationFunctor { protected: using Jacobian = Eigen::Matrix; - size_t rowIndex_; Jacobian H_; + size_t M_; + size_t rowIndex_; + /* * Calculate the `1*(M*N)` Jacobian of this functor with respect to * the M*N parameter matrix `P`. @@ -248,9 +241,9 @@ class Basis { * MxM identity matrix. See also VectorEvaluationFunctor. */ void calculateJacobian(size_t N) { - H_.setZero(1, M * N); + H_.setZero(1, M_ * N); for (int j = 0; j < EvaluationFunctor::weights_.size(); j++) - H_(0, rowIndex_ + j * M) = EvaluationFunctor::weights_(j); + H_(0, rowIndex_ + j * M_) = EvaluationFunctor::weights_(j); } public: @@ -258,14 +251,15 @@ class Basis { VectorComponentFunctor() {} /// Construct with row index - VectorComponentFunctor(size_t N, size_t i, double x) - : EvaluationFunctor(N, x), rowIndex_(i) { + VectorComponentFunctor(size_t M, size_t N, size_t i, double x) + : EvaluationFunctor(N, x), M_(M), rowIndex_(i) { calculateJacobian(N); } /// Construct with row index and interval - VectorComponentFunctor(size_t N, size_t i, double x, double a, double b) - : EvaluationFunctor(N, x, a, b), rowIndex_(i) { + VectorComponentFunctor(size_t M, size_t N, size_t i, double x, double a, + double b) + : EvaluationFunctor(N, x, a, b), M_(M), rowIndex_(i) { calculateJacobian(N); } @@ -297,21 +291,20 @@ class Basis { * 3D rotation. */ template - class ManifoldEvaluationFunctor - : public VectorEvaluationFunctor::dimension> { + class ManifoldEvaluationFunctor : public VectorEvaluationFunctor { enum { M = traits::dimension }; - using Base = VectorEvaluationFunctor; + using Base = VectorEvaluationFunctor; public: /// For serialization ManifoldEvaluationFunctor() {} /// Default Constructor - ManifoldEvaluationFunctor(size_t N, double x) : Base(N, x) {} + ManifoldEvaluationFunctor(size_t N, double x) : Base(M, N, x) {} /// Constructor, with interval [a,b] ManifoldEvaluationFunctor(size_t N, double x, double a, double b) - : Base(N, x, a, b) {} + : Base(M, N, x, a, b) {} /// Manifold evaluation T apply(const ParameterMatrix& P, @@ -396,13 +389,13 @@ class Basis { * returns an M-vector the M corresponding function derivatives at x, possibly * with Jacobians wrpt the parameters. */ - template class VectorDerivativeFunctor : protected DerivativeFunctorBase { protected: - using VectorM = Eigen::Matrix; - using Jacobian = Eigen::Matrix; + using Jacobian = Eigen::Matrix; Jacobian H_; + size_t M_; + /** * Calculate the `M*(M*N)` Jacobian of this functor with respect to * the M*N parameter matrix `P`. @@ -412,7 +405,7 @@ class Basis { * i.e., the Kronecker product of weights_ with the MxM identity matrix. */ void calculateJacobian() { - H_ = kroneckerProductIdentity(this->weights_); + H_ = kroneckerProductIdentity(M_, this->weights_); } public: @@ -422,24 +415,25 @@ class Basis { VectorDerivativeFunctor() {} /// Default Constructor - VectorDerivativeFunctor(size_t N, double x) : DerivativeFunctorBase(N, x) { + VectorDerivativeFunctor(size_t M, size_t N, double x) + : DerivativeFunctorBase(N, x), M_(M) { calculateJacobian(); } /// Constructor, with optional interval [a,b] - VectorDerivativeFunctor(size_t N, double x, double a, double b) - : DerivativeFunctorBase(N, x, a, b) { + VectorDerivativeFunctor(size_t M, size_t N, double x, double a, double b) + : DerivativeFunctorBase(N, x, a, b), M_(M) { calculateJacobian(); } - VectorM apply(const ParameterMatrix& P, - OptionalJacobian H = {}) const { + Vector apply(const ParameterMatrix& P, + OptionalJacobian H = {}) const { if (H) *H = H_; return P.matrix() * this->weights_.transpose(); } /// c++ sugar - VectorM operator()(const ParameterMatrix& P, - OptionalJacobian H = {}) const { + Vector operator()(const ParameterMatrix& P, + OptionalJacobian H = {}) const { return apply(P, H); } }; @@ -451,13 +445,14 @@ class Basis { * * This component is specified by the row index i, with 0 class ComponentDerivativeFunctor : protected DerivativeFunctorBase { protected: using Jacobian = Eigen::Matrix; - size_t rowIndex_; Jacobian H_; + size_t M_; + size_t rowIndex_; + /* * Calculate the `1*(M*N)` Jacobian of this functor with respect to * the M*N parameter matrix `P`. @@ -468,9 +463,9 @@ class Basis { * MxM identity matrix. See also VectorDerivativeFunctor. */ void calculateJacobian(size_t N) { - H_.setZero(1, M * N); + H_.setZero(1, M_ * N); for (int j = 0; j < this->weights_.size(); j++) - H_(0, rowIndex_ + j * M) = this->weights_(j); + H_(0, rowIndex_ + j * M_) = this->weights_(j); } public: @@ -478,14 +473,15 @@ class Basis { ComponentDerivativeFunctor() {} /// Construct with row index - ComponentDerivativeFunctor(size_t N, size_t i, double x) - : DerivativeFunctorBase(N, x), rowIndex_(i) { + ComponentDerivativeFunctor(size_t M, size_t N, size_t i, double x) + : DerivativeFunctorBase(N, x), M_(M), rowIndex_(i) { calculateJacobian(N); } /// Construct with row index and interval - ComponentDerivativeFunctor(size_t N, size_t i, double x, double a, double b) - : DerivativeFunctorBase(N, x, a, b), rowIndex_(i) { + ComponentDerivativeFunctor(size_t M, size_t N, size_t i, double x, double a, + double b) + : DerivativeFunctorBase(N, x, a, b), M_(M), rowIndex_(i) { calculateJacobian(N); } /// Calculate derivative of component rowIndex_ of F diff --git a/gtsam/basis/BasisFactors.h b/gtsam/basis/BasisFactors.h index 496d7094c..2ce0faae7 100644 --- a/gtsam/basis/BasisFactors.h +++ b/gtsam/basis/BasisFactors.h @@ -87,11 +87,10 @@ class EvaluationFactor : public FunctorizedFactor { * measurement prediction function. * * @param BASIS: The basis class to use e.g. Chebyshev2 - * @param M: Size of the evaluated state vector. * * @ingroup basis */ -template +template class VectorEvaluationFactor : public FunctorizedFactor { private: @@ -107,14 +106,14 @@ class VectorEvaluationFactor * polynomial. * @param z The measurement value. * @param model The noise model. + * @param M Size of the evaluated state vector. * @param N The degree of the polynomial. * @param x The point at which to evaluate the basis polynomial. */ VectorEvaluationFactor(Key key, const Vector &z, - const SharedNoiseModel &model, const size_t N, - double x) - : Base(key, z, model, - typename BASIS::template VectorEvaluationFunctor(N, x)) {} + const SharedNoiseModel &model, const size_t M, + const size_t N, double x) + : Base(key, z, model, typename BASIS::VectorEvaluationFunctor(M, N, x)) {} /** * @brief Construct a new VectorEvaluationFactor object. @@ -123,16 +122,17 @@ class VectorEvaluationFactor * polynomial. * @param z The measurement value. * @param model The noise model. + * @param M Size of the evaluated state vector. * @param N The degree of the polynomial. * @param x The point at which to evaluate the basis polynomial. * @param a Lower bound for the polynomial. * @param b Upper bound for the polynomial. */ VectorEvaluationFactor(Key key, const Vector &z, - const SharedNoiseModel &model, const size_t N, - double x, double a, double b) + const SharedNoiseModel &model, const size_t M, + const size_t N, double x, double a, double b) : Base(key, z, model, - typename BASIS::template VectorEvaluationFunctor(N, x, a, b)) {} + typename BASIS::VectorEvaluationFunctor(M, N, x, a, b)) {} virtual ~VectorEvaluationFactor() {} }; @@ -147,17 +147,15 @@ class VectorEvaluationFactor * indexed by `i`. * * @param BASIS: The basis class to use e.g. Chebyshev2 - * @param P: Size of the fixed-size vector. * * Example: - * VectorComponentFactor controlPrior(key, measured, model, - * N, i, t, a, b); + * VectorComponentFactor controlPrior(key, measured, model, + * N, i, t, a, b); * where N is the degree and i is the component index. * * @ingroup basis */ -// TODO(Varun) remove template P -template +template class VectorComponentFactor : public FunctorizedFactor { private: @@ -174,15 +172,16 @@ class VectorComponentFactor * @param z The scalar value at a specified index `i` of the full measurement * vector. * @param model The noise model. + * @param P Size of the fixed-size vector. * @param N The degree of the polynomial. * @param i The index for the evaluated vector to give us the desired scalar * value. * @param x The point at which to evaluate the basis polynomial. */ VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model, - const size_t N, size_t i, double x) + const size_t P, const size_t N, size_t i, double x) : Base(key, z, model, - typename BASIS::template VectorComponentFunctor

(N, i, x)) {} + typename BASIS::VectorComponentFunctor(P, N, i, x)) {} /** * @brief Construct a new VectorComponentFactor object. @@ -192,6 +191,7 @@ class VectorComponentFactor * @param z The scalar value at a specified index `i` of the full measurement * vector. * @param model The noise model. + * @param P Size of the fixed-size vector. * @param N The degree of the polynomial. * @param i The index for the evaluated vector to give us the desired scalar * value. @@ -200,11 +200,10 @@ class VectorComponentFactor * @param b Upper bound for the polynomial. */ VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model, - const size_t N, size_t i, double x, double a, double b) - : Base( - key, z, model, - typename BASIS::template VectorComponentFunctor

(N, i, x, a, b)) { - } + const size_t P, const size_t N, size_t i, double x, + double a, double b) + : Base(key, z, model, + typename BASIS::VectorComponentFunctor(P, N, i, x, a, b)) {} virtual ~VectorComponentFactor() {} }; @@ -324,15 +323,13 @@ class DerivativeFactor * polynomial at a specified point `x` is equal to the vector value `z`. * * @param BASIS: The basis class to use e.g. Chebyshev2 - * @param M: Size of the evaluated state vector derivative. */ -//TODO(Varun) remove template M -template +template class VectorDerivativeFactor : public FunctorizedFactor { private: using Base = FunctorizedFactor; - using Func = typename BASIS::template VectorDerivativeFunctor; + using Func = typename BASIS::VectorDerivativeFunctor; public: VectorDerivativeFactor() {} @@ -344,13 +341,14 @@ class VectorDerivativeFactor * polynomial. * @param z The measurement value. * @param model The noise model. + * @param M Size of the evaluated state vector derivative. * @param N The degree of the polynomial. * @param x The point at which to evaluate the basis polynomial. */ VectorDerivativeFactor(Key key, const Vector &z, - const SharedNoiseModel &model, const size_t N, - double x) - : Base(key, z, model, Func(N, x)) {} + const SharedNoiseModel &model, const size_t M, + const size_t N, double x) + : Base(key, z, model, Func(M, N, x)) {} /** * @brief Construct a new VectorDerivativeFactor object. @@ -359,15 +357,16 @@ class VectorDerivativeFactor * polynomial. * @param z The measurement value. * @param model The noise model. + * @param M Size of the evaluated state vector derivative. * @param N The degree of the polynomial. * @param x The point at which to evaluate the basis polynomial. * @param a Lower bound for the polynomial. * @param b Upper bound for the polynomial. */ VectorDerivativeFactor(Key key, const Vector &z, - const SharedNoiseModel &model, const size_t N, - double x, double a, double b) - : Base(key, z, model, Func(N, x, a, b)) {} + const SharedNoiseModel &model, const size_t M, + const size_t N, double x, double a, double b) + : Base(key, z, model, Func(M, N, x, a, b)) {} virtual ~VectorDerivativeFactor() {} }; @@ -378,15 +377,13 @@ class VectorDerivativeFactor * vector-valued measurement `z`. * * @param BASIS: The basis class to use e.g. Chebyshev2 - * @param P: Size of the control component derivative. */ -// TODO(Varun) remove template P -template +template class ComponentDerivativeFactor : public FunctorizedFactor { private: using Base = FunctorizedFactor; - using Func = typename BASIS::template ComponentDerivativeFunctor

; + using Func = typename BASIS::ComponentDerivativeFunctor; public: ComponentDerivativeFactor() {} @@ -399,15 +396,16 @@ class ComponentDerivativeFactor * @param z The scalar measurement value at a specific index `i` of the full * measurement vector. * @param model The degree of the polynomial. + * @param P: Size of the control component derivative. * @param N The degree of the polynomial. * @param i The index for the evaluated vector to give us the desired scalar * value. * @param x The point at which to evaluate the basis polynomial. */ ComponentDerivativeFactor(Key key, const double &z, - const SharedNoiseModel &model, const size_t N, - size_t i, double x) - : Base(key, z, model, Func(N, i, x)) {} + const SharedNoiseModel &model, const size_t P, + const size_t N, size_t i, double x) + : Base(key, z, model, Func(P, N, i, x)) {} /** * @brief Construct a new ComponentDerivativeFactor object. @@ -417,6 +415,7 @@ class ComponentDerivativeFactor * @param z The scalar measurement value at a specific index `i` of the full * measurement vector. * @param model The degree of the polynomial. + * @param P: Size of the control component derivative. * @param N The degree of the polynomial. * @param i The index for the evaluated vector to give us the desired scalar * value. @@ -425,9 +424,10 @@ class ComponentDerivativeFactor * @param b Upper bound for the polynomial. */ ComponentDerivativeFactor(Key key, const double &z, - const SharedNoiseModel &model, const size_t N, - size_t i, double x, double a, double b) - : Base(key, z, model, Func(N, i, x, a, b)) {} + const SharedNoiseModel &model, const size_t P, + const size_t N, size_t i, double x, double a, + double b) + : Base(key, z, model, Func(P, N, i, x, a, b)) {} virtual ~ComponentDerivativeFactor() {} }; diff --git a/gtsam/basis/tests/testBasisFactors.cpp b/gtsam/basis/tests/testBasisFactors.cpp index c2552ac11..2f936ddba 100644 --- a/gtsam/basis/tests/testBasisFactors.cpp +++ b/gtsam/basis/tests/testBasisFactors.cpp @@ -80,7 +80,7 @@ TEST(BasisFactors, VectorEvaluationFactor) { const Vector measured = Vector::Zero(M); auto model = Isotropic::Sigma(M, 1.0); - VectorEvaluationFactor factor(key, measured, model, N, 0); + VectorEvaluationFactor factor(key, measured, model, M, N, 0); NonlinearFactorGraph graph; graph.add(factor); @@ -106,7 +106,7 @@ TEST(BasisFactors, Print) { const Vector measured = Vector::Ones(M) * 42; auto model = Isotropic::Sigma(M, 1.0); - VectorEvaluationFactor factor(key, measured, model, N, 0); + VectorEvaluationFactor factor(key, measured, model, M, N, 0); std::string expected = " keys = { X0 }\n" @@ -127,8 +127,8 @@ TEST(BasisFactors, VectorComponentFactor) { const size_t i = 2; const double measured = 0.0, t = 3.0, a = 2.0, b = 4.0; auto model = Isotropic::Sigma(1, 1.0); - VectorComponentFactor factor(key, measured, model, N, i, t, a, - b); + VectorComponentFactor factor(key, measured, model, P, N, i, t, a, + b); NonlinearFactorGraph graph; graph.add(factor); @@ -180,7 +180,7 @@ TEST(BasisFactors, VecDerivativePrior) { const Vector measured = Vector::Zero(M); auto model = Isotropic::Sigma(M, 1.0); - VectorDerivativeFactor vecDPrior(key, measured, model, N, 0); + VectorDerivativeFactor vecDPrior(key, measured, model, M, N, 0); NonlinearFactorGraph graph; graph.add(vecDPrior); @@ -205,8 +205,8 @@ TEST(BasisFactors, ComponentDerivativeFactor) { double measured = 0; auto model = Isotropic::Sigma(1, 1.0); - ComponentDerivativeFactor controlDPrior(key, measured, model, - N, 0, 0); + ComponentDerivativeFactor controlDPrior(key, measured, model, M, + N, 0, 0); NonlinearFactorGraph graph; graph.add(controlDPrior); diff --git a/gtsam/basis/tests/testChebyshev2.cpp b/gtsam/basis/tests/testChebyshev2.cpp index abf98e8e4..8aa326ab1 100644 --- a/gtsam/basis/tests/testChebyshev2.cpp +++ b/gtsam/basis/tests/testChebyshev2.cpp @@ -116,12 +116,12 @@ TEST(Chebyshev2, InterpolateVector) { expected << t, 0; Eigen::Matrix actualH(2, 2 * N); - Chebyshev2::VectorEvaluationFunctor<2> fx(N, t, a, b); + Chebyshev2::VectorEvaluationFunctor fx(2, N, t, a, b); EXPECT(assert_equal(expected, fx(X, actualH), 1e-9)); // Check derivative std::function f = - std::bind(&Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, + std::bind(&Chebyshev2::VectorEvaluationFunctor::operator(), fx, std::placeholders::_1, nullptr); Matrix numericalH = numericalDerivative11(f, X); @@ -413,8 +413,8 @@ TEST(Chebyshev2, Derivative6_03) { TEST(Chebyshev2, VectorDerivativeFunctor) { const size_t N = 3, M = 2; const double x = 0.2; - using VecD = Chebyshev2::VectorDerivativeFunctor; - VecD fx(N, x, 0, 3); + using VecD = Chebyshev2::VectorDerivativeFunctor; + VecD fx(M, N, x, 0, 3); ParameterMatrix X(M, N); Matrix actualH(M, M * N); EXPECT(assert_equal(Vector::Zero(M), (Vector)fx(X, actualH), 1e-8)); @@ -429,7 +429,7 @@ TEST(Chebyshev2, VectorDerivativeFunctor) { // Test VectorDerivativeFunctor with polynomial function TEST(Chebyshev2, VectorDerivativeFunctor2) { const size_t N = 64, M = 1, T = 15; - using VecD = Chebyshev2::VectorDerivativeFunctor; + using VecD = Chebyshev2::VectorDerivativeFunctor; const Vector points = Chebyshev2::Points(N, 0, T); @@ -443,14 +443,14 @@ TEST(Chebyshev2, VectorDerivativeFunctor2) { // Evaluate the derivative at the chebyshev points using // VectorDerivativeFunctor. for (size_t i = 0; i < N; ++i) { - VecD d(N, points(i), 0, T); + VecD d(M, N, points(i), 0, T); Vector1 Dx = d(X); EXPECT_DOUBLES_EQUAL(fprime(points(i)), Dx(0), 1e-6); } // Test Jacobian at the first chebyshev point. Matrix actualH(M, M * N); - VecD vecd(N, points(0), 0, T); + VecD vecd(M, N, points(0), 0, T); vecd(X, actualH); Matrix expectedH = numericalDerivative11( std::bind(&VecD::operator(), vecd, std::placeholders::_1, nullptr), X); @@ -462,9 +462,9 @@ TEST(Chebyshev2, VectorDerivativeFunctor2) { TEST(Chebyshev2, ComponentDerivativeFunctor) { const size_t N = 6, M = 2; const double x = 0.2; - using CompFunc = Chebyshev2::ComponentDerivativeFunctor; + using CompFunc = Chebyshev2::ComponentDerivativeFunctor; size_t row = 1; - CompFunc fx(N, row, x, 0, 3); + CompFunc fx(M, N, row, x, 0, 3); ParameterMatrix X(M, N); Matrix actualH(1, M * N); EXPECT_DOUBLES_EQUAL(0, fx(X, actualH), 1e-8); diff --git a/gtsam/basis/tests/testFourier.cpp b/gtsam/basis/tests/testFourier.cpp index c24d4b30a..0060dc317 100644 --- a/gtsam/basis/tests/testFourier.cpp +++ b/gtsam/basis/tests/testFourier.cpp @@ -180,13 +180,13 @@ TEST(Basis, Derivative7) { //****************************************************************************** TEST(Basis, VecDerivativeFunctor) { - using DotShape = typename FourierBasis::VectorDerivativeFunctor<2>; + using DotShape = typename FourierBasis::VectorDerivativeFunctor; const size_t N = 3; // MATLAB example, Dec 27 2019, commit 014eded5 double h = 2 * M_PI / 16; Vector2 dotShape(0.5556, -0.8315); // at h/2 - DotShape dotShapeFunction(N, h / 2); + DotShape dotShapeFunction(2, N, h / 2); Matrix23 theta_mat = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071) .finished() .transpose(); From 445ffb3110cf68de553bbea8b5e0fe10cf824d8f Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 20 Jun 2023 09:07:52 -0400 Subject: [PATCH 62/78] update wrapped code --- gtsam/basis/basis.i | 71 +++++++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 28 deletions(-) diff --git a/gtsam/basis/basis.i b/gtsam/basis/basis.i index 61ba1eada..ce91f7782 100644 --- a/gtsam/basis/basis.i +++ b/gtsam/basis/basis.i @@ -71,44 +71,28 @@ virtual class EvaluationFactor : gtsam::NoiseModelFactor { double x, double a, double b); }; -template +template virtual class VectorEvaluationFactor : gtsam::NoiseModelFactor { VectorEvaluationFactor(); VectorEvaluationFactor(gtsam::Key key, const Vector& z, - const gtsam::noiseModel::Base* model, const size_t N, - double x); + const gtsam::noiseModel::Base* model, const size_t M, + const size_t N, double x); VectorEvaluationFactor(gtsam::Key key, const Vector& z, - const gtsam::noiseModel::Base* model, const size_t N, - double x, double a, double b); + const gtsam::noiseModel::Base* model, const size_t M, + const size_t N, double x, double a, double b); }; -// TODO(Varun) Better way to support arbitrary dimensions? -// Especially if users mainly do `pip install gtsam` for the Python wrapper. -typedef gtsam::VectorEvaluationFactor - VectorEvaluationFactorChebyshev2D3; -typedef gtsam::VectorEvaluationFactor - VectorEvaluationFactorChebyshev2D4; -typedef gtsam::VectorEvaluationFactor - VectorEvaluationFactorChebyshev2D12; - -template +template virtual class VectorComponentFactor : gtsam::NoiseModelFactor { VectorComponentFactor(); VectorComponentFactor(gtsam::Key key, const double z, - const gtsam::noiseModel::Base* model, const size_t N, - size_t i, double x); + const gtsam::noiseModel::Base* model, const size_t M, + const size_t N, size_t i, double x); VectorComponentFactor(gtsam::Key key, const double z, - const gtsam::noiseModel::Base* model, const size_t N, - size_t i, double x, double a, double b); + const gtsam::noiseModel::Base* model, const size_t M, + const size_t N, size_t i, double x, double a, double b); }; -typedef gtsam::VectorComponentFactor - VectorComponentFactorChebyshev2D3; -typedef gtsam::VectorComponentFactor - VectorComponentFactorChebyshev2D4; -typedef gtsam::VectorComponentFactor - VectorComponentFactorChebyshev2D12; - template virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor { ManifoldEvaluationFactor(); @@ -127,8 +111,39 @@ typedef gtsam::ManifoldEvaluationFactor typedef gtsam::ManifoldEvaluationFactor ManifoldEvaluationFactorChebyshev2Pose3; -// TODO(gerry): Add `DerivativeFactor`, `VectorDerivativeFactor`, and -// `ComponentDerivativeFactor` +template +virtual class DerivativeFactor : gtsam::NoiseModelFactor { + DerivativeFactor(); + DerivativeFactor(gtsam::Key key, const double z, + const gtsam::noiseModel::Base* model, const size_t N, + double x); + DerivativeFactor(gtsam::Key key, const double z, + const gtsam::noiseModel::Base* model, const size_t N, + double x, double a, double b); +}; + +template +virtual class VectorDerivativeFactor : gtsam::NoiseModelFactor { + VectorDerivativeFactor(); + VectorDerivativeFactor(gtsam::Key key, const Vector& z, + const gtsam::noiseModel::Base* model, const size_t M, + const size_t N, double x); + VectorDerivativeFactor(gtsam::Key key, const Vector& z, + const gtsam::noiseModel::Base* model, const size_t M, + const size_t N, double x, double a, double b); +}; + +template +virtual class ComponentDerivativeFactor : gtsam::NoiseModelFactor { + ComponentDerivativeFactor(); + ComponentDerivativeFactor(gtsam::Key key, const double z, + const gtsam::noiseModel::Base* model, + const size_t P, const size_t N, size_t i, double x); + ComponentDerivativeFactor(gtsam::Key key, const double z, + const gtsam::noiseModel::Base* model, + const size_t P, const size_t N, size_t i, double x, + double a, double b); +}; #include template Date: Tue, 20 Jun 2023 09:14:12 -0400 Subject: [PATCH 63/78] template on multiple bases --- gtsam/basis/basis.i | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/gtsam/basis/basis.i b/gtsam/basis/basis.i index ce91f7782..8cbe4593d 100644 --- a/gtsam/basis/basis.i +++ b/gtsam/basis/basis.i @@ -71,7 +71,8 @@ virtual class EvaluationFactor : gtsam::NoiseModelFactor { double x, double a, double b); }; -template +template virtual class VectorEvaluationFactor : gtsam::NoiseModelFactor { VectorEvaluationFactor(); VectorEvaluationFactor(gtsam::Key key, const Vector& z, @@ -82,7 +83,8 @@ virtual class VectorEvaluationFactor : gtsam::NoiseModelFactor { const size_t N, double x, double a, double b); }; -template +template virtual class VectorComponentFactor : gtsam::NoiseModelFactor { VectorComponentFactor(); VectorComponentFactor(gtsam::Key key, const double z, @@ -93,7 +95,12 @@ virtual class VectorComponentFactor : gtsam::NoiseModelFactor { const size_t N, size_t i, double x, double a, double b); }; -template +#include +#include + +template virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor { ManifoldEvaluationFactor(); ManifoldEvaluationFactor(gtsam::Key key, const T& z, @@ -104,14 +111,8 @@ virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor { double x, double a, double b); }; -#include - -typedef gtsam::ManifoldEvaluationFactor - ManifoldEvaluationFactorChebyshev2Rot3; -typedef gtsam::ManifoldEvaluationFactor - ManifoldEvaluationFactorChebyshev2Pose3; - -template +template virtual class DerivativeFactor : gtsam::NoiseModelFactor { DerivativeFactor(); DerivativeFactor(gtsam::Key key, const double z, @@ -122,7 +123,8 @@ virtual class DerivativeFactor : gtsam::NoiseModelFactor { double x, double a, double b); }; -template +template virtual class VectorDerivativeFactor : gtsam::NoiseModelFactor { VectorDerivativeFactor(); VectorDerivativeFactor(gtsam::Key key, const Vector& z, @@ -133,7 +135,8 @@ virtual class VectorDerivativeFactor : gtsam::NoiseModelFactor { const size_t N, double x, double a, double b); }; -template +template virtual class ComponentDerivativeFactor : gtsam::NoiseModelFactor { ComponentDerivativeFactor(); ComponentDerivativeFactor(gtsam::Key key, const double z, From 55ce145bf7e60169e80ee96228bc6e01a2eafe9a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 20 Jun 2023 10:05:52 -0400 Subject: [PATCH 64/78] undo changes to tests to ensure backwards compatibility --- python/gtsam/tests/test_Cal3Fisheye.py | 18 ++++++++------- python/gtsam/tests/test_Cal3Unified.py | 16 +++++++------ python/gtsam/tests/test_DsfTrackGenerator.py | 6 ++--- .../tests/test_FixedLagSmootherExample.py | 6 ++--- .../gtsam/tests/test_GaussianFactorGraph.py | 5 ++-- python/gtsam/tests/test_KarcherMeanFactor.py | 23 ++++++++++++------- python/gtsam/tests/test_Pose2.py | 5 ++-- python/gtsam/tests/test_Pose3.py | 7 +++--- python/gtsam/tests/test_ShonanAveraging.py | 7 +++--- python/gtsam/tests/test_Sim2.py | 9 ++++---- python/gtsam/tests/test_Sim3.py | 13 ++++++----- python/gtsam/tests/test_Triangulation.py | 19 ++++++++------- 12 files changed, 77 insertions(+), 57 deletions(-) diff --git a/python/gtsam/tests/test_Cal3Fisheye.py b/python/gtsam/tests/test_Cal3Fisheye.py index 246ec19ee..3e4c58ccb 100644 --- a/python/gtsam/tests/test_Cal3Fisheye.py +++ b/python/gtsam/tests/test_Cal3Fisheye.py @@ -11,11 +11,12 @@ Refactored: Roderick Koehle """ import unittest -import gtsam import numpy as np from gtsam.symbol_shorthand import K, L, P from gtsam.utils.test_case import GtsamTestCase +import gtsam + def ulp(ftype=np.float64): """ @@ -26,7 +27,7 @@ def ulp(ftype=np.float64): class TestCal3Fisheye(GtsamTestCase): - + @classmethod def setUpClass(cls): """ @@ -50,10 +51,11 @@ class TestCal3Fisheye(GtsamTestCase): camera1 = gtsam.PinholeCameraCal3Fisheye(pose1) camera2 = gtsam.PinholeCameraCal3Fisheye(pose2) cls.origin = np.array([0.0, 0.0, 0.0]) - cls.poses = [pose1, pose2] - cls.cameras = [camera1, camera2] - cls.measurements = [k.project(cls.origin) for k in cls.cameras] - + cls.poses = gtsam.Pose3Vector([pose1, pose2]) + cls.cameras = gtsam.CameraSetCal3Fisheye([camera1, camera2]) + cls.measurements = gtsam.Point2Vector( + [k.project(cls.origin) for k in cls.cameras]) + def test_Cal3Fisheye(self): K = gtsam.Cal3Fisheye() self.assertEqual(K.fx(), 1.) @@ -62,7 +64,7 @@ class TestCal3Fisheye(GtsamTestCase): def test_distortion(self): """Fisheye distortion and rectification""" equidistant = gtsam.Cal3Fisheye() - perspective_pt = self.obj_point[0:2]/self.obj_point[2] + perspective_pt = self.obj_point[0:2] / self.obj_point[2] distorted_pt = equidistant.uncalibrate(perspective_pt) rectified_pt = equidistant.calibrate(distorted_pt) self.gtsamAssertEquals(distorted_pt, self.img_point) @@ -166,7 +168,7 @@ class TestCal3Fisheye(GtsamTestCase): pose = gtsam.Pose3() camera = gtsam.Cal3Fisheye() state = gtsam.Values() - camera_key, pose_key, landmark_key = K(0), P(0), L(0) + pose_key, landmark_key = P(0), L(0) state.insert_point3(landmark_key, obj_point) state.insert_pose3(pose_key, pose) g = gtsam.NonlinearFactorGraph() diff --git a/python/gtsam/tests/test_Cal3Unified.py b/python/gtsam/tests/test_Cal3Unified.py index 32e7cea9d..d0cb1a683 100644 --- a/python/gtsam/tests/test_Cal3Unified.py +++ b/python/gtsam/tests/test_Cal3Unified.py @@ -10,11 +10,12 @@ Author: Frank Dellaert & Duy Nguyen Ta (Python) """ import unittest -import gtsam import numpy as np from gtsam.symbol_shorthand import K, L, P from gtsam.utils.test_case import GtsamTestCase +import gtsam + class TestCal3Unified(GtsamTestCase): @@ -47,9 +48,10 @@ class TestCal3Unified(GtsamTestCase): camera1 = gtsam.PinholeCameraCal3Unified(pose1, cls.stereographic) camera2 = gtsam.PinholeCameraCal3Unified(pose2, cls.stereographic) cls.origin = np.array([0.0, 0.0, 0.0]) - cls.poses = [pose1, pose2] - cls.cameras = [camera1, camera2] - cls.measurements = [k.project(cls.origin) for k in cls.cameras] + cls.poses = gtsam.Pose3Vector([pose1, pose2]) + cls.cameras = gtsam.CameraSetCal3Unified([camera1, camera2]) + cls.measurements = gtsam.Point2Vector( + [k.project(cls.origin) for k in cls.cameras]) def test_Cal3Unified(self): K = gtsam.Cal3Unified() @@ -106,7 +108,7 @@ class TestCal3Unified(GtsamTestCase): state = gtsam.Values() measured = self.img_point noise_model = gtsam.noiseModel.Isotropic.Sigma(2, 1) - camera_key, pose_key, landmark_key = K(0), P(0), L(0) + camera_key, pose_key, landmark_key = K(0), P(0), L(0) k = self.stereographic state.insert_cal3unified(camera_key, k) state.insert_pose3(pose_key, gtsam.Pose3()) @@ -141,7 +143,7 @@ class TestCal3Unified(GtsamTestCase): Dcal = np.zeros((2, 10), order='F') Dp = np.zeros((2, 2), order='F') camera.calibrate(img_point, Dcal, Dp) - + self.gtsamAssertEquals(Dcal, np.array( [[ 0., 0., 0., -1., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., -1., 0., 0., 0., 0., 0.]])) @@ -157,7 +159,7 @@ class TestCal3Unified(GtsamTestCase): def test_triangulation_rectify(self): """Estimate spatial point from image measurements using rectification""" - rectified = [k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)] + rectified = gtsam.Point2Vector([k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)]) shared_cal = gtsam.Cal3_S2() triangulated = gtsam.triangulatePoint3(self.poses, shared_cal, rectified, rank_tol=1e-9, optimize=False) self.gtsamAssertEquals(triangulated, self.origin) diff --git a/python/gtsam/tests/test_DsfTrackGenerator.py b/python/gtsam/tests/test_DsfTrackGenerator.py index be6aa0796..681b39c6d 100644 --- a/python/gtsam/tests/test_DsfTrackGenerator.py +++ b/python/gtsam/tests/test_DsfTrackGenerator.py @@ -23,14 +23,14 @@ class TestDsfTrackGenerator(GtsamTestCase): kps_i1 = Keypoints(np.array([[50.0, 60], [70, 80], [90, 100]])) kps_i2 = Keypoints(np.array([[110.0, 120], [130, 140]])) - keypoints_list = [] + keypoints_list = gtsam.KeypointsVector() keypoints_list.append(kps_i0) keypoints_list.append(kps_i1) keypoints_list.append(kps_i2) # For each image pair (i1,i2), we provide a (K,2) matrix # of corresponding image indices (k1,k2). - matches_dict = {} + matches_dict = gtsam.MatchIndicesMap() matches_dict[IndexPair(0, 1)] = np.array([[0, 0], [1, 1]]) matches_dict[IndexPair(1, 2)] = np.array([[2, 0], [1, 1]]) @@ -86,7 +86,7 @@ class TestSfmTrack2d(GtsamTestCase): def test_sfm_track_2d_constructor(self) -> None: """Test construction of 2D SfM track.""" - measurements = [] + measurements = gtsam.SfmMeasurementVector() measurements.append((0, Point2(10, 20))) track = SfmTrack2d(measurements=measurements) track.measurement(0) diff --git a/python/gtsam/tests/test_FixedLagSmootherExample.py b/python/gtsam/tests/test_FixedLagSmootherExample.py index 412abe987..05256f358 100644 --- a/python/gtsam/tests/test_FixedLagSmootherExample.py +++ b/python/gtsam/tests/test_FixedLagSmootherExample.py @@ -37,7 +37,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): # that will be sent to the smoothers new_factors = gtsam.NonlinearFactorGraph() new_values = gtsam.Values() - new_timestamps = {} + new_timestamps = gtsam.FixedLagSmootherKeyTimestampMap() # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) @@ -48,7 +48,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): gtsam.PriorFactorPose2( X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps[X1] = 0.0 + new_timestamps.insert((X1, 0.0)) delta_time = 0.25 time = 0.25 @@ -78,7 +78,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps[current_key] = time + new_timestamps.insert((current_key, time)) # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * diff --git a/python/gtsam/tests/test_GaussianFactorGraph.py b/python/gtsam/tests/test_GaussianFactorGraph.py index d96f28747..4d88b4214 100644 --- a/python/gtsam/tests/test_GaussianFactorGraph.py +++ b/python/gtsam/tests/test_GaussianFactorGraph.py @@ -14,11 +14,12 @@ from __future__ import print_function import unittest -import gtsam import numpy as np from gtsam.symbol_shorthand import X from gtsam.utils.test_case import GtsamTestCase +import gtsam + def create_graph(): """Create a basic linear factor graph for testing""" @@ -98,7 +99,7 @@ class TestGaussianFactorGraph(GtsamTestCase): bn = gfg.eliminateSequential(ordering) self.assertEqual(bn.size(), 3) - keyVector = [] + keyVector = gtsam.KeyVector() keyVector.append(keys[2]) #TODO(Varun) Below code causes segfault in Debug config # ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph(gfg, keyVector) diff --git a/python/gtsam/tests/test_KarcherMeanFactor.py b/python/gtsam/tests/test_KarcherMeanFactor.py index 9ec33ad8a..4c6f8db78 100644 --- a/python/gtsam/tests/test_KarcherMeanFactor.py +++ b/python/gtsam/tests/test_KarcherMeanFactor.py @@ -13,11 +13,12 @@ Author: Frank Dellaert import unittest -import gtsam import numpy as np -from gtsam import Rot3 from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import Rot3 + KEY = 0 MODEL = gtsam.noiseModel.Unit.Create(3) @@ -29,9 +30,11 @@ R = Rot3.Expmap(np.array([0.1, 0, 0])) class TestKarcherMean(GtsamTestCase): def test_find(self): - # Check that optimizing for Karcher mean (which minimizes Between distance) - # gets correct result. - rotations = [R, R.inverse()] + """ + Check that optimizing for Karcher mean (which minimizes Between distance) + gets correct result. + """ + rotations = gtsam.Rot3Vector([R, R.inverse()]) expected = Rot3() actual = gtsam.FindKarcherMean(rotations) self.gtsamAssertEquals(expected, actual) @@ -42,7 +45,7 @@ class TestKarcherMean(GtsamTestCase): a2Rb2 = Rot3() a3Rb3 = Rot3() - aRb_list = [a1Rb1, a2Rb2, a3Rb3] + aRb_list = gtsam.Rot3Vector([a1Rb1, a2Rb2, a3Rb3]) aRb_expected = Rot3() aRb = gtsam.FindKarcherMean(aRb_list) @@ -58,7 +61,9 @@ class TestKarcherMean(GtsamTestCase): graph = gtsam.NonlinearFactorGraph() R12 = R.compose(R.compose(R)) graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) - keys = [1, 2] + keys = gtsam.KeyVector() + keys.append(1) + keys.append(2) graph.add(gtsam.KarcherMeanFactorRot3(keys)) initial = gtsam.Values() @@ -67,7 +72,9 @@ class TestKarcherMean(GtsamTestCase): expected = Rot3() result = gtsam.GaussNewtonOptimizer(graph, initial).optimize() - actual = gtsam.FindKarcherMean([result.atRot3(1), result.atRot3(2)]) + actual = gtsam.FindKarcherMean( + gtsam.Rot3Vector([result.atRot3(1), + result.atRot3(2)])) self.gtsamAssertEquals(expected, actual) self.gtsamAssertEquals( R12, result.atRot3(1).between(result.atRot3(2))) diff --git a/python/gtsam/tests/test_Pose2.py b/python/gtsam/tests/test_Pose2.py index f364f55e3..69eb02b62 100644 --- a/python/gtsam/tests/test_Pose2.py +++ b/python/gtsam/tests/test_Pose2.py @@ -12,9 +12,10 @@ import math import unittest import numpy as np -from gtsam import Point2, Pose2 from gtsam.utils.test_case import GtsamTestCase +from gtsam import Point2, Point2Pairs, Pose2 + class TestPose2(GtsamTestCase): """Test selected Pose2 methods.""" @@ -82,7 +83,7 @@ class TestPose2(GtsamTestCase): ] # fmt: on - ab_pairs = list(zip(pts_a, pts_b)) + ab_pairs = Point2Pairs(list(zip(pts_a, pts_b))) aTb = Pose2.Align(ab_pairs) self.assertIsNotNone(aTb) diff --git a/python/gtsam/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py index 40bf9a87f..5d1761b08 100644 --- a/python/gtsam/tests/test_Pose3.py +++ b/python/gtsam/tests/test_Pose3.py @@ -12,11 +12,12 @@ Author: Frank Dellaert, Duy Nguyen Ta import math import unittest -import gtsam import numpy as np -from gtsam import Point3, Pose3, Rot3 from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import Point3, Pose3, Rot3 + def numerical_derivative_pose(pose, method, delta=1e-5): jacobian = np.zeros((6, 6)) @@ -222,7 +223,7 @@ class TestPose3(GtsamTestCase): sTt = Pose3(Rot3.Rodrigues(0, 0, -math.pi), Point3(2, 4, 0)) transformed = sTt.transformTo(square) - st_pairs = [] + st_pairs = gtsam.Point3Pairs() for j in range(4): st_pairs.append((square[:,j], transformed[:,j])) diff --git a/python/gtsam/tests/test_ShonanAveraging.py b/python/gtsam/tests/test_ShonanAveraging.py index 036ea401c..c9709ecf9 100644 --- a/python/gtsam/tests/test_ShonanAveraging.py +++ b/python/gtsam/tests/test_ShonanAveraging.py @@ -12,12 +12,13 @@ Author: Frank Dellaert import unittest -import gtsam import numpy as np +from gtsam.utils.test_case import GtsamTestCase + +import gtsam from gtsam import (BetweenFactorPose2, LevenbergMarquardtParams, Pose2, Rot2, ShonanAveraging2, ShonanAveraging3, ShonanAveragingParameters2, ShonanAveragingParameters3) -from gtsam.utils.test_case import GtsamTestCase DEFAULT_PARAMS = ShonanAveragingParameters3( gtsam.LevenbergMarquardtParams.CeresDefaults() @@ -176,7 +177,7 @@ class TestShonanAveraging(GtsamTestCase): shonan_params.setCertifyOptimality(True) noise_model = gtsam.noiseModel.Unit.Create(3) - between_factors = [] + between_factors = gtsam.BetweenFactorPose2s() for (i1, i2), i2Ri1 in i2Ri1_dict.items(): i2Ti1 = Pose2(i2Ri1, np.zeros(2)) between_factors.append( diff --git a/python/gtsam/tests/test_Sim2.py b/python/gtsam/tests/test_Sim2.py index 96ba6eb1e..50a8cf156 100644 --- a/python/gtsam/tests/test_Sim2.py +++ b/python/gtsam/tests/test_Sim2.py @@ -12,9 +12,10 @@ Author: John Lambert import unittest import numpy as np -from gtsam import Pose2, Rot2, Similarity2 from gtsam.utils.test_case import GtsamTestCase +from gtsam import Pose2, Pose2Pairs, Rot2, Similarity2 + class TestSim2(GtsamTestCase): """Test selected Sim2 methods.""" @@ -46,7 +47,7 @@ class TestSim2(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = list(zip(wToi_list, eToi_list)) + we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list))) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity2.Align(we_pairs) @@ -81,7 +82,7 @@ class TestSim2(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = list(zip(wToi_list, eToi_list)) + we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list))) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity2.Align(we_pairs) @@ -119,7 +120,7 @@ class TestSim2(GtsamTestCase): bTi_list = [bTi0, bTi1, bTi2, bTi3] - ab_pairs = list(zip(aTi_list, bTi_list)) + ab_pairs = Pose2Pairs(list(zip(aTi_list, bTi_list))) # Recover the transformation wSe (i.e. world_S_egovehicle) aSb = Similarity2.Align(ab_pairs) diff --git a/python/gtsam/tests/test_Sim3.py b/python/gtsam/tests/test_Sim3.py index c3fd9e909..b5ae25221 100644 --- a/python/gtsam/tests/test_Sim3.py +++ b/python/gtsam/tests/test_Sim3.py @@ -12,11 +12,12 @@ Author: John Lambert import unittest from typing import List, Optional -import gtsam import numpy as np -from gtsam import Point3, Pose3, Rot3, Similarity3 from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import Point3, Pose3, Rot3, Similarity3 + class TestSim3(GtsamTestCase): """Test selected Sim3 methods.""" @@ -48,7 +49,7 @@ class TestSim3(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = list(zip(wToi_list, eToi_list)) + we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list))) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity3.Align(we_pairs) @@ -83,7 +84,7 @@ class TestSim3(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = list(zip(wToi_list, eToi_list)) + we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list))) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity3.Align(we_pairs) @@ -121,7 +122,7 @@ class TestSim3(GtsamTestCase): bTi_list = [bTi0, bTi1, bTi2, bTi3] - ab_pairs = list(zip(aTi_list, bTi_list)) + ab_pairs = gtsam.Pose3Pairs(list(zip(aTi_list, bTi_list))) # Recover the transformation wSe (i.e. world_S_egovehicle) aSb = Similarity3.Align(ab_pairs) @@ -688,7 +689,7 @@ def align_poses_sim3(aTi_list: List[Pose3], bTi_list: List[Pose3]) -> Similarity assert len(aTi_list) == len(bTi_list) assert n_to_align >= 2, "SIM(3) alignment uses at least 2 frames" - ab_pairs = list(zip(aTi_list, bTi_list)) + ab_pairs = gtsam.Pose3Pairs(list(zip(aTi_list, bTi_list))) aSb = Similarity3.Align(ab_pairs) diff --git a/python/gtsam/tests/test_Triangulation.py b/python/gtsam/tests/test_Triangulation.py index 25d293e6f..703392d67 100644 --- a/python/gtsam/tests/test_Triangulation.py +++ b/python/gtsam/tests/test_Triangulation.py @@ -12,13 +12,14 @@ Authors: Frank Dellaert & Fan Jiang (Python) & Sushmita Warrier & John Lambert import unittest from typing import Iterable, List, Optional, Tuple, Union -import gtsam import numpy as np +from gtsam.utils.test_case import GtsamTestCase + +import gtsam from gtsam import (Cal3_S2, Cal3Bundler, CameraSetCal3_S2, CameraSetCal3Bundler, PinholeCameraCal3_S2, PinholeCameraCal3Bundler, Point2, Point3, Pose3, Rot3, TriangulationParameters, TriangulationResult) -from gtsam.utils.test_case import GtsamTestCase UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2) @@ -34,7 +35,9 @@ class TestTriangulationExample(GtsamTestCase): # create second camera 1 meter to the right of first camera pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0))) # twoPoses - self.poses = [pose1, pose2] + self.poses = gtsam.Pose3Vector() + self.poses.append(pose1) + self.poses.append(pose2) # landmark ~5 meters infront of camera self.landmark = Point3(5, 0.5, 1.2) @@ -64,7 +67,7 @@ class TestTriangulationExample(GtsamTestCase): cameras = camera_set() else: cameras = [] - measurements = [] + measurements = gtsam.Point2Vector() for k, pose in zip(cal_params, self.poses): K = calibration(*k) @@ -93,7 +96,7 @@ class TestTriangulationExample(GtsamTestCase): self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9) # Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) - measurements_noisy = [] + measurements_noisy = gtsam.Point2Vector() measurements_noisy.append(measurements[0] - np.array([0.1, 0.5])) measurements_noisy.append(measurements[1] - np.array([-0.2, 0.3])) @@ -160,8 +163,8 @@ class TestTriangulationExample(GtsamTestCase): z2: Point2 = camera2.project(landmark) z3: Point2 = camera3.project(landmark) - poses = [pose1, pose2, pose3] - measurements = [z1, z2, z3] + poses = gtsam.Pose3Vector([pose1, pose2, pose3]) + measurements = gtsam.Point2Vector([z1, z2, z3]) # noise free, so should give exactly the landmark actual = gtsam.triangulatePoint3(poses, @@ -226,7 +229,7 @@ class TestTriangulationExample(GtsamTestCase): cameras.append(camera1) cameras.append(camera2) - measurements = [] + measurements = gtsam.Point2Vector() measurements.append(z1) measurements.append(z2) From 1f951f7dfe1b33586b5a7df170bb2304aafc3cc2 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 20 Jun 2023 10:06:12 -0400 Subject: [PATCH 65/78] add typedefs to ensure backwards compatibility --- python/gtsam/__init__.py | 42 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/python/gtsam/__init__.py b/python/gtsam/__init__.py index d00e47b65..80d11ef54 100644 --- a/python/gtsam/__init__.py +++ b/python/gtsam/__init__.py @@ -4,9 +4,49 @@ import sys +from gtsam.utils import findExampleDataFile + from gtsam import gtsam, utils from gtsam.gtsam import * -from gtsam.utils import findExampleDataFile + +#### Typedefs to allow for backwards compatibility +#TODO(Varun) deprecate in future release +# gtsam +KeyVector = list +# base +IndexPairSetMap = dict +IndexPairVector = list +# geometry +Point2Vector = list +Pose3Vector = list +Rot3Vector = list +Point2Pairs = list +Point3Pairs = list +Pose2Pairs = list +Pose3Pairs = list +# sfm +BinaryMeasurementsPoint3 = list +BinaryMeasurementsUnit3 = list +BinaryMeasurementsRot3 = list +KeyPairDoubleMap = dict +SfmTrack2dVector = list +SfmTracks = list +SfmCameras = list +SfmMeasurementVector = list +MatchIndicesMap = dict +KeypointsVector = list +# slam +BetweenFactorPose3s = list +BetweenFactorPose2s = list + + +class FixedLagSmootherKeyTimestampMap(dict): + + def insert(self, key_value): + self[key_value[0]] = key_value[1] + + +#### End typedefs def _init(): From ab3e3773ec0ecae3905655d11c1eb82302ceeb57 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Wed, 21 Jun 2023 13:25:36 -0500 Subject: [PATCH 66/78] removed boost from TableFactor and added guards to testSerializationSlam --- gtsam/discrete/TableFactor.cpp | 3 +-- tests/testSerializationSlam.cpp | 4 ++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/gtsam/discrete/TableFactor.cpp b/gtsam/discrete/TableFactor.cpp index acb59a8be..5fe3cd9d1 100644 --- a/gtsam/discrete/TableFactor.cpp +++ b/gtsam/discrete/TableFactor.cpp @@ -21,7 +21,6 @@ #include #include -#include #include using namespace std; @@ -203,7 +202,7 @@ void TableFactor::print(const string& s, const KeyFormatter& formatter) const { cout << s; cout << " f["; for (auto&& key : keys()) - cout << boost::format(" (%1%,%2%),") % formatter(key) % cardinality(key); + cout << " (" << formatter(key) << "," << cardinality(key) << "),"; cout << " ]" << endl; for (SparseIt it(sparse_table_); it; ++it) { DiscreteValues assignment = findAssignments(it.index()); diff --git a/tests/testSerializationSlam.cpp b/tests/testSerializationSlam.cpp index c4170b108..6b504a810 100644 --- a/tests/testSerializationSlam.cpp +++ b/tests/testSerializationSlam.cpp @@ -52,6 +52,8 @@ #include #include +#ifdef GTSAM_USE_BOOST_FEATURES + #include #include @@ -674,6 +676,8 @@ TEST(SubgraphSolver, Solves) { } } +#endif + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ From 6f94f52f411b7893509673b6eb7198774a862618 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 21 Jun 2023 16:02:18 -0400 Subject: [PATCH 67/78] modernize main tests --- python/gtsam/tests/test_Cal3Fisheye.py | 7 +++---- python/gtsam/tests/test_Cal3Unified.py | 9 ++++----- python/gtsam/tests/test_DsfTrackGenerator.py | 6 +++--- .../gtsam/tests/test_FixedLagSmootherExample.py | 7 +++---- python/gtsam/tests/test_GaussianFactorGraph.py | 12 ++++++------ python/gtsam/tests/test_KarcherMeanFactor.py | 16 +++++----------- python/gtsam/tests/test_Pose2.py | 7 ++++--- python/gtsam/tests/test_Pose3.py | 2 +- python/gtsam/tests/test_ShonanAveraging.py | 7 +++---- python/gtsam/tests/test_Sim2.py | 12 ++++++------ python/gtsam/tests/test_Sim3.py | 14 +++++++------- python/gtsam/tests/test_Triangulation.py | 14 ++++++-------- 12 files changed, 51 insertions(+), 62 deletions(-) diff --git a/python/gtsam/tests/test_Cal3Fisheye.py b/python/gtsam/tests/test_Cal3Fisheye.py index 3e4c58ccb..50a15dc6e 100644 --- a/python/gtsam/tests/test_Cal3Fisheye.py +++ b/python/gtsam/tests/test_Cal3Fisheye.py @@ -51,10 +51,9 @@ class TestCal3Fisheye(GtsamTestCase): camera1 = gtsam.PinholeCameraCal3Fisheye(pose1) camera2 = gtsam.PinholeCameraCal3Fisheye(pose2) cls.origin = np.array([0.0, 0.0, 0.0]) - cls.poses = gtsam.Pose3Vector([pose1, pose2]) - cls.cameras = gtsam.CameraSetCal3Fisheye([camera1, camera2]) - cls.measurements = gtsam.Point2Vector( - [k.project(cls.origin) for k in cls.cameras]) + cls.poses = [pose1, pose2] + cls.cameras = [camera1, camera2] + cls.measurements = [k.project(cls.origin) for k in cls.cameras] def test_Cal3Fisheye(self): K = gtsam.Cal3Fisheye() diff --git a/python/gtsam/tests/test_Cal3Unified.py b/python/gtsam/tests/test_Cal3Unified.py index d0cb1a683..fce741999 100644 --- a/python/gtsam/tests/test_Cal3Unified.py +++ b/python/gtsam/tests/test_Cal3Unified.py @@ -48,10 +48,9 @@ class TestCal3Unified(GtsamTestCase): camera1 = gtsam.PinholeCameraCal3Unified(pose1, cls.stereographic) camera2 = gtsam.PinholeCameraCal3Unified(pose2, cls.stereographic) cls.origin = np.array([0.0, 0.0, 0.0]) - cls.poses = gtsam.Pose3Vector([pose1, pose2]) - cls.cameras = gtsam.CameraSetCal3Unified([camera1, camera2]) - cls.measurements = gtsam.Point2Vector( - [k.project(cls.origin) for k in cls.cameras]) + cls.poses = [pose1, pose2] + cls.cameras = [camera1, camera2] + cls.measurements = [k.project(cls.origin) for k in cls.cameras] def test_Cal3Unified(self): K = gtsam.Cal3Unified() @@ -159,7 +158,7 @@ class TestCal3Unified(GtsamTestCase): def test_triangulation_rectify(self): """Estimate spatial point from image measurements using rectification""" - rectified = gtsam.Point2Vector([k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)]) + rectified = [k.calibration().calibrate(pt) for k, pt in zip(self.cameras, self.measurements)] shared_cal = gtsam.Cal3_S2() triangulated = gtsam.triangulatePoint3(self.poses, shared_cal, rectified, rank_tol=1e-9, optimize=False) self.gtsamAssertEquals(triangulated, self.origin) diff --git a/python/gtsam/tests/test_DsfTrackGenerator.py b/python/gtsam/tests/test_DsfTrackGenerator.py index 681b39c6d..be6aa0796 100644 --- a/python/gtsam/tests/test_DsfTrackGenerator.py +++ b/python/gtsam/tests/test_DsfTrackGenerator.py @@ -23,14 +23,14 @@ class TestDsfTrackGenerator(GtsamTestCase): kps_i1 = Keypoints(np.array([[50.0, 60], [70, 80], [90, 100]])) kps_i2 = Keypoints(np.array([[110.0, 120], [130, 140]])) - keypoints_list = gtsam.KeypointsVector() + keypoints_list = [] keypoints_list.append(kps_i0) keypoints_list.append(kps_i1) keypoints_list.append(kps_i2) # For each image pair (i1,i2), we provide a (K,2) matrix # of corresponding image indices (k1,k2). - matches_dict = gtsam.MatchIndicesMap() + matches_dict = {} matches_dict[IndexPair(0, 1)] = np.array([[0, 0], [1, 1]]) matches_dict[IndexPair(1, 2)] = np.array([[2, 0], [1, 1]]) @@ -86,7 +86,7 @@ class TestSfmTrack2d(GtsamTestCase): def test_sfm_track_2d_constructor(self) -> None: """Test construction of 2D SfM track.""" - measurements = gtsam.SfmMeasurementVector() + measurements = [] measurements.append((0, Point2(10, 20))) track = SfmTrack2d(measurements=measurements) track.measurement(0) diff --git a/python/gtsam/tests/test_FixedLagSmootherExample.py b/python/gtsam/tests/test_FixedLagSmootherExample.py index 05256f358..64914ab38 100644 --- a/python/gtsam/tests/test_FixedLagSmootherExample.py +++ b/python/gtsam/tests/test_FixedLagSmootherExample.py @@ -14,7 +14,6 @@ import numpy as np from gtsam.utils.test_case import GtsamTestCase import gtsam -import gtsam_unstable class TestFixedLagSmootherExample(GtsamTestCase): @@ -37,7 +36,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): # that will be sent to the smoothers new_factors = gtsam.NonlinearFactorGraph() new_values = gtsam.Values() - new_timestamps = gtsam.FixedLagSmootherKeyTimestampMap() + new_timestamps = {} # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) @@ -48,7 +47,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): gtsam.PriorFactorPose2( X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert((X1, 0.0)) + new_timestamps[X1] = 0.0 delta_time = 0.25 time = 0.25 @@ -78,7 +77,7 @@ class TestFixedLagSmootherExample(GtsamTestCase): current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert((current_key, time)) + new_timestamps[current_key] = time # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * diff --git a/python/gtsam/tests/test_GaussianFactorGraph.py b/python/gtsam/tests/test_GaussianFactorGraph.py index 4d88b4214..326f62d5a 100644 --- a/python/gtsam/tests/test_GaussianFactorGraph.py +++ b/python/gtsam/tests/test_GaussianFactorGraph.py @@ -41,6 +41,7 @@ def create_graph(): class TestGaussianFactorGraph(GtsamTestCase): """Tests for Gaussian Factor Graphs.""" + def test_fg(self): """Test solving a linear factor graph""" graph, X = create_graph() @@ -99,12 +100,11 @@ class TestGaussianFactorGraph(GtsamTestCase): bn = gfg.eliminateSequential(ordering) self.assertEqual(bn.size(), 3) - keyVector = gtsam.KeyVector() - keyVector.append(keys[2]) - #TODO(Varun) Below code causes segfault in Debug config - # ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph(gfg, keyVector) - # bn = gfg.eliminateSequential(ordering) - # self.assertEqual(bn.size(), 3) + keyVector = [keys[2]] + ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph( + gfg, keyVector) + bn = gfg.eliminateSequential(ordering) + self.assertEqual(bn.size(), 3) if __name__ == '__main__': diff --git a/python/gtsam/tests/test_KarcherMeanFactor.py b/python/gtsam/tests/test_KarcherMeanFactor.py index 4c6f8db78..0bc942341 100644 --- a/python/gtsam/tests/test_KarcherMeanFactor.py +++ b/python/gtsam/tests/test_KarcherMeanFactor.py @@ -22,7 +22,6 @@ from gtsam import Rot3 KEY = 0 MODEL = gtsam.noiseModel.Unit.Create(3) - # Rot3 version R = Rot3.Expmap(np.array([0.1, 0, 0])) @@ -34,7 +33,7 @@ class TestKarcherMean(GtsamTestCase): Check that optimizing for Karcher mean (which minimizes Between distance) gets correct result. """ - rotations = gtsam.Rot3Vector([R, R.inverse()]) + rotations = [R, R.inverse()] expected = Rot3() actual = gtsam.FindKarcherMean(rotations) self.gtsamAssertEquals(expected, actual) @@ -45,7 +44,7 @@ class TestKarcherMean(GtsamTestCase): a2Rb2 = Rot3() a3Rb3 = Rot3() - aRb_list = gtsam.Rot3Vector([a1Rb1, a2Rb2, a3Rb3]) + aRb_list = [a1Rb1, a2Rb2, a3Rb3] aRb_expected = Rot3() aRb = gtsam.FindKarcherMean(aRb_list) @@ -61,9 +60,7 @@ class TestKarcherMean(GtsamTestCase): graph = gtsam.NonlinearFactorGraph() R12 = R.compose(R.compose(R)) graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) - keys = gtsam.KeyVector() - keys.append(1) - keys.append(2) + keys = [1, 2] graph.add(gtsam.KarcherMeanFactorRot3(keys)) initial = gtsam.Values() @@ -72,12 +69,9 @@ class TestKarcherMean(GtsamTestCase): expected = Rot3() result = gtsam.GaussNewtonOptimizer(graph, initial).optimize() - actual = gtsam.FindKarcherMean( - gtsam.Rot3Vector([result.atRot3(1), - result.atRot3(2)])) + actual = gtsam.FindKarcherMean([result.atRot3(1), result.atRot3(2)]) self.gtsamAssertEquals(expected, actual) - self.gtsamAssertEquals( - R12, result.atRot3(1).between(result.atRot3(2))) + self.gtsamAssertEquals(R12, result.atRot3(1).between(result.atRot3(2))) if __name__ == "__main__": diff --git a/python/gtsam/tests/test_Pose2.py b/python/gtsam/tests/test_Pose2.py index 69eb02b62..d29448194 100644 --- a/python/gtsam/tests/test_Pose2.py +++ b/python/gtsam/tests/test_Pose2.py @@ -19,6 +19,7 @@ from gtsam import Point2, Point2Pairs, Pose2 class TestPose2(GtsamTestCase): """Test selected Pose2 methods.""" + def test_adjoint(self) -> None: """Test adjoint method.""" xi = np.array([1, 2, 3]) @@ -28,7 +29,7 @@ class TestPose2(GtsamTestCase): def test_transformTo(self): """Test transformTo method.""" - pose = Pose2(2, 4, -math.pi/2) + pose = Pose2(2, 4, -math.pi / 2) actual = pose.transformTo(Point2(3, 2)) expected = Point2(2, 1) self.gtsamAssertEquals(actual, expected, 1e-6) @@ -42,7 +43,7 @@ class TestPose2(GtsamTestCase): def test_transformFrom(self): """Test transformFrom method.""" - pose = Pose2(2, 4, -math.pi/2) + pose = Pose2(2, 4, -math.pi / 2) actual = pose.transformFrom(Point2(2, 1)) expected = Point2(3, 2) self.gtsamAssertEquals(actual, expected, 1e-6) @@ -83,7 +84,7 @@ class TestPose2(GtsamTestCase): ] # fmt: on - ab_pairs = Point2Pairs(list(zip(pts_a, pts_b))) + ab_pairs = list(zip(pts_a, pts_b)) aTb = Pose2.Align(ab_pairs) self.assertIsNotNone(aTb) diff --git a/python/gtsam/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py index 5d1761b08..32b3ad47f 100644 --- a/python/gtsam/tests/test_Pose3.py +++ b/python/gtsam/tests/test_Pose3.py @@ -223,7 +223,7 @@ class TestPose3(GtsamTestCase): sTt = Pose3(Rot3.Rodrigues(0, 0, -math.pi), Point3(2, 4, 0)) transformed = sTt.transformTo(square) - st_pairs = gtsam.Point3Pairs() + st_pairs = [] for j in range(4): st_pairs.append((square[:,j], transformed[:,j])) diff --git a/python/gtsam/tests/test_ShonanAveraging.py b/python/gtsam/tests/test_ShonanAveraging.py index c9709ecf9..845f6cf1c 100644 --- a/python/gtsam/tests/test_ShonanAveraging.py +++ b/python/gtsam/tests/test_ShonanAveraging.py @@ -140,7 +140,6 @@ class TestShonanAveraging(GtsamTestCase): self.assertAlmostEqual(3.0756, shonan.cost(initial), places=3) result, _lambdaMin = shonan.run(initial, 3, 3) self.assertAlmostEqual(0.0015, shonan.cost(result), places=3) - def test_constructorBetweenFactorPose2s(self) -> None: """Check if ShonanAveraging2 constructor works when not initialized from g2o file. @@ -177,7 +176,7 @@ class TestShonanAveraging(GtsamTestCase): shonan_params.setCertifyOptimality(True) noise_model = gtsam.noiseModel.Unit.Create(3) - between_factors = gtsam.BetweenFactorPose2s() + between_factors = [] for (i1, i2), i2Ri1 in i2Ri1_dict.items(): i2Ti1 = Pose2(i2Ri1, np.zeros(2)) between_factors.append( @@ -190,11 +189,11 @@ class TestShonanAveraging(GtsamTestCase): wRi_list = [result_values.atRot2(i) for i in range(num_images)] thetas_deg = np.array([wRi.degrees() for wRi in wRi_list]) - + # map all angles to [0,360) thetas_deg = thetas_deg % 360 thetas_deg -= thetas_deg[0] - + expected_thetas_deg = np.array([0.0, 90.0, 0.0]) np.testing.assert_allclose(thetas_deg, expected_thetas_deg, atol=0.1) diff --git a/python/gtsam/tests/test_Sim2.py b/python/gtsam/tests/test_Sim2.py index 50a8cf156..5501322c3 100644 --- a/python/gtsam/tests/test_Sim2.py +++ b/python/gtsam/tests/test_Sim2.py @@ -14,7 +14,7 @@ import unittest import numpy as np from gtsam.utils.test_case import GtsamTestCase -from gtsam import Pose2, Pose2Pairs, Rot2, Similarity2 +from gtsam import Pose2, Rot2, Similarity2 class TestSim2(GtsamTestCase): @@ -47,7 +47,7 @@ class TestSim2(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list))) + we_pairs = list(zip(wToi_list, eToi_list)) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity2.Align(we_pairs) @@ -56,7 +56,7 @@ class TestSim2(GtsamTestCase): self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi)) def test_align_poses_along_straight_line_gauge(self): - """Test if Align Pose3Pairs method can account for gauge ambiguity. + """Test if Pose2 Align method can account for gauge ambiguity. Scenario: 3 object poses @@ -82,7 +82,7 @@ class TestSim2(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list))) + we_pairs = list(zip(wToi_list, eToi_list)) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity2.Align(we_pairs) @@ -91,7 +91,7 @@ class TestSim2(GtsamTestCase): self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi)) def test_align_poses_scaled_squares(self): - """Test if Align Pose2Pairs method can account for gauge ambiguity. + """Test if Align method can account for gauge ambiguity. Make sure a big and small square can be aligned. The u's represent a big square (10x10), and v's represents a small square (4x4). @@ -120,7 +120,7 @@ class TestSim2(GtsamTestCase): bTi_list = [bTi0, bTi1, bTi2, bTi3] - ab_pairs = Pose2Pairs(list(zip(aTi_list, bTi_list))) + ab_pairs = list(zip(aTi_list, bTi_list)) # Recover the transformation wSe (i.e. world_S_egovehicle) aSb = Similarity2.Align(ab_pairs) diff --git a/python/gtsam/tests/test_Sim3.py b/python/gtsam/tests/test_Sim3.py index b5ae25221..7723738c8 100644 --- a/python/gtsam/tests/test_Sim3.py +++ b/python/gtsam/tests/test_Sim3.py @@ -23,7 +23,7 @@ class TestSim3(GtsamTestCase): """Test selected Sim3 methods.""" def test_align_poses_along_straight_line(self): - """Test Align Pose3Pairs method. + """Test Pose3 Align method. Scenario: 3 object poses @@ -49,7 +49,7 @@ class TestSim3(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list))) + we_pairs = list(zip(wToi_list, eToi_list)) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity3.Align(we_pairs) @@ -58,7 +58,7 @@ class TestSim3(GtsamTestCase): self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi)) def test_align_poses_along_straight_line_gauge(self): - """Test if Align Pose3Pairs method can account for gauge ambiguity. + """Test if Pose3 Align method can account for gauge ambiguity. Scenario: 3 object poses @@ -84,7 +84,7 @@ class TestSim3(GtsamTestCase): wToi_list = [wTo0, wTo1, wTo2] - we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list))) + we_pairs = list(zip(wToi_list, eToi_list)) # Recover the transformation wSe (i.e. world_S_egovehicle) wSe = Similarity3.Align(we_pairs) @@ -93,7 +93,7 @@ class TestSim3(GtsamTestCase): self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi)) def test_align_poses_scaled_squares(self): - """Test if Align Pose3Pairs method can account for gauge ambiguity. + """Test if Pose3 Align method can account for gauge ambiguity. Make sure a big and small square can be aligned. The u's represent a big square (10x10), and v's represents a small square (4x4). @@ -122,7 +122,7 @@ class TestSim3(GtsamTestCase): bTi_list = [bTi0, bTi1, bTi2, bTi3] - ab_pairs = gtsam.Pose3Pairs(list(zip(aTi_list, bTi_list))) + ab_pairs = list(zip(aTi_list, bTi_list)) # Recover the transformation wSe (i.e. world_S_egovehicle) aSb = Similarity3.Align(ab_pairs) @@ -689,7 +689,7 @@ def align_poses_sim3(aTi_list: List[Pose3], bTi_list: List[Pose3]) -> Similarity assert len(aTi_list) == len(bTi_list) assert n_to_align >= 2, "SIM(3) alignment uses at least 2 frames" - ab_pairs = gtsam.Pose3Pairs(list(zip(aTi_list, bTi_list))) + ab_pairs = list(zip(aTi_list, bTi_list)) aSb = Similarity3.Align(ab_pairs) diff --git a/python/gtsam/tests/test_Triangulation.py b/python/gtsam/tests/test_Triangulation.py index 703392d67..8e245093b 100644 --- a/python/gtsam/tests/test_Triangulation.py +++ b/python/gtsam/tests/test_Triangulation.py @@ -35,9 +35,7 @@ class TestTriangulationExample(GtsamTestCase): # create second camera 1 meter to the right of first camera pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0))) # twoPoses - self.poses = gtsam.Pose3Vector() - self.poses.append(pose1) - self.poses.append(pose2) + self.poses = [pose1, pose2] # landmark ~5 meters infront of camera self.landmark = Point3(5, 0.5, 1.2) @@ -67,7 +65,7 @@ class TestTriangulationExample(GtsamTestCase): cameras = camera_set() else: cameras = [] - measurements = gtsam.Point2Vector() + measurements = [] for k, pose in zip(cal_params, self.poses): K = calibration(*k) @@ -96,7 +94,7 @@ class TestTriangulationExample(GtsamTestCase): self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9) # Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) - measurements_noisy = gtsam.Point2Vector() + measurements_noisy = [] measurements_noisy.append(measurements[0] - np.array([0.1, 0.5])) measurements_noisy.append(measurements[1] - np.array([-0.2, 0.3])) @@ -163,8 +161,8 @@ class TestTriangulationExample(GtsamTestCase): z2: Point2 = camera2.project(landmark) z3: Point2 = camera3.project(landmark) - poses = gtsam.Pose3Vector([pose1, pose2, pose3]) - measurements = gtsam.Point2Vector([z1, z2, z3]) + poses = [pose1, pose2, pose3] + measurements = [z1, z2, z3] # noise free, so should give exactly the landmark actual = gtsam.triangulatePoint3(poses, @@ -229,7 +227,7 @@ class TestTriangulationExample(GtsamTestCase): cameras.append(camera1) cameras.append(camera2) - measurements = gtsam.Point2Vector() + measurements = [] measurements.append(z1) measurements.append(z2) From 0ac12c9d3260fc30d429deeb06dd779f632e09ad Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 21 Jun 2023 16:02:37 -0400 Subject: [PATCH 68/78] add tests for Python wrapper backwards compatibility --- python/gtsam/__init__.py | 2 +- .../tests/test_backwards_compatibility.py | 897 ++++++++++++++++++ 2 files changed, 898 insertions(+), 1 deletion(-) create mode 100644 python/gtsam/tests/test_backwards_compatibility.py diff --git a/python/gtsam/__init__.py b/python/gtsam/__init__.py index 80d11ef54..904afd2e0 100644 --- a/python/gtsam/__init__.py +++ b/python/gtsam/__init__.py @@ -41,7 +41,7 @@ BetweenFactorPose2s = list class FixedLagSmootherKeyTimestampMap(dict): - + """Class to provide backwards compatibility""" def insert(self, key_value): self[key_value[0]] = key_value[1] diff --git a/python/gtsam/tests/test_backwards_compatibility.py b/python/gtsam/tests/test_backwards_compatibility.py new file mode 100644 index 000000000..414b65e8c --- /dev/null +++ b/python/gtsam/tests/test_backwards_compatibility.py @@ -0,0 +1,897 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests to ensure backwards compatibility of the Python wrapper. +Author: Varun Agrawal +""" +import unittest +from typing import Iterable, List, Optional, Tuple, Union + +import numpy as np +from gtsam.gtsfm import Keypoints +from gtsam.symbol_shorthand import X +from gtsam.utils.test_case import GtsamTestCase + +import gtsam +from gtsam import (BetweenFactorPose2, Cal3_S2, Cal3Bundler, CameraSetCal3_S2, + CameraSetCal3Bundler, IndexPair, LevenbergMarquardtParams, + PinholeCameraCal3_S2, PinholeCameraCal3Bundler, Point2, + Point2Pairs, Point3, Pose2, Pose2Pairs, Pose3, Rot2, Rot3, + SfmTrack2d, ShonanAveraging2, ShonanAveragingParameters2, + Similarity2, Similarity3, TriangulationParameters, + TriangulationResult) + +UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2) + + +class TestBackwardsCompatibility(GtsamTestCase): + """Tests for the backwards compatibility for the Python wrapper.""" + + def setUp(self): + """Setup test fixtures""" + p1 = [-1.0, 0.0, -1.0] + p2 = [1.0, 0.0, -1.0] + q1 = Rot3(1.0, 0.0, 0.0, 0.0) + q2 = Rot3(1.0, 0.0, 0.0, 0.0) + pose1 = Pose3(q1, p1) + pose2 = Pose3(q2, p2) + camera1 = gtsam.PinholeCameraCal3Fisheye(pose1) + camera2 = gtsam.PinholeCameraCal3Fisheye(pose2) + self.origin = np.array([0.0, 0.0, 0.0]) + self.poses = gtsam.Pose3Vector([pose1, pose2]) + + self.fisheye_cameras = gtsam.CameraSetCal3Fisheye([camera1, camera2]) + self.fisheye_measurements = gtsam.Point2Vector( + [k.project(self.origin) for k in self.fisheye_cameras]) + + fx, fy, s, u0, v0 = 2, 2, 0, 0, 0 + k1, k2, p1, p2 = 0, 0, 0, 0 + xi = 1 + self.stereographic = gtsam.Cal3Unified(fx, fy, s, u0, v0, k1, k2, p1, + p2, xi) + camera1 = gtsam.PinholeCameraCal3Unified(pose1, self.stereographic) + camera2 = gtsam.PinholeCameraCal3Unified(pose2, self.stereographic) + self.unified_cameras = gtsam.CameraSetCal3Unified([camera1, camera2]) + self.unified_measurements = gtsam.Point2Vector( + [k.project(self.origin) for k in self.unified_cameras]) + + ## Set up two camera poses + # Looking along X-axis, 1 meter above ground plane (x-y) + pose1 = Pose3(UPRIGHT, Point3(0, 0, 1)) + + # create second camera 1 meter to the right of first camera + pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0))) + # twoPoses + self.triangulation_poses = gtsam.Pose3Vector() + self.triangulation_poses.append(pose1) + self.triangulation_poses.append(pose2) + + # landmark ~5 meters infront of camera + self.landmark = Point3(5, 0.5, 1.2) + + def test_Cal3Fisheye_triangulation_rectify(self): + """ + Estimate spatial point from image measurements using + rectification from a Cal3Fisheye camera model. + """ + rectified = gtsam.Point2Vector([ + k.calibration().calibrate(pt) + for k, pt in zip(self.fisheye_cameras, self.fisheye_measurements) + ]) + shared_cal = gtsam.Cal3_S2() + triangulated = gtsam.triangulatePoint3(self.poses, + shared_cal, + rectified, + rank_tol=1e-9, + optimize=False) + self.gtsamAssertEquals(triangulated, self.origin) + + def test_Cal3Unified_triangulation_rectify(self): + """ + Estimate spatial point from image measurements using + rectification from a Cal3Unified camera model. + """ + rectified = gtsam.Point2Vector([ + k.calibration().calibrate(pt) + for k, pt in zip(self.unified_cameras, self.unified_measurements) + ]) + shared_cal = gtsam.Cal3_S2() + triangulated = gtsam.triangulatePoint3(self.poses, + shared_cal, + rectified, + rank_tol=1e-9, + optimize=False) + self.gtsamAssertEquals(triangulated, self.origin) + + def test_track_generation(self) -> None: + """Ensures that DSF generates three tracks from measurements + in 3 images (H=200,W=400).""" + kps_i0 = Keypoints(np.array([[10.0, 20], [30, 40]])) + kps_i1 = Keypoints(np.array([[50.0, 60], [70, 80], [90, 100]])) + kps_i2 = Keypoints(np.array([[110.0, 120], [130, 140]])) + + keypoints_list = gtsam.KeypointsVector() + keypoints_list.append(kps_i0) + keypoints_list.append(kps_i1) + keypoints_list.append(kps_i2) + + # For each image pair (i1,i2), we provide a (K,2) matrix + # of corresponding image indices (k1,k2). + matches_dict = gtsam.MatchIndicesMap() + matches_dict[IndexPair(0, 1)] = np.array([[0, 0], [1, 1]]) + matches_dict[IndexPair(1, 2)] = np.array([[2, 0], [1, 1]]) + + tracks = gtsam.gtsfm.tracksFromPairwiseMatches( + matches_dict, + keypoints_list, + verbose=False, + ) + assert len(tracks) == 3 + + # Verify track 0. + track0 = tracks[0] + assert track0.numberMeasurements() == 2 + np.testing.assert_allclose(track0.measurements[0][1], Point2(10, 20)) + np.testing.assert_allclose(track0.measurements[1][1], Point2(50, 60)) + assert track0.measurements[0][0] == 0 + assert track0.measurements[1][0] == 1 + np.testing.assert_allclose( + track0.measurementMatrix(), + [ + [10, 20], + [50, 60], + ], + ) + np.testing.assert_allclose(track0.indexVector(), [0, 1]) + + # Verify track 1. + track1 = tracks[1] + np.testing.assert_allclose( + track1.measurementMatrix(), + [ + [30, 40], + [70, 80], + [130, 140], + ], + ) + np.testing.assert_allclose(track1.indexVector(), [0, 1, 2]) + + # Verify track 2. + track2 = tracks[2] + np.testing.assert_allclose( + track2.measurementMatrix(), + [ + [90, 100], + [110, 120], + ], + ) + np.testing.assert_allclose(track2.indexVector(), [1, 2]) + + def test_sfm_track_2d_constructor(self) -> None: + """Test construction of 2D SfM track.""" + measurements = gtsam.SfmMeasurementVector() + measurements.append((0, Point2(10, 20))) + track = SfmTrack2d(measurements=measurements) + track.measurement(0) + assert track.numberMeasurements() == 1 + + def test_FixedLagSmootherExample(self): + ''' + Simple test that checks for equality between C++ example + file and the Python implementation. See + gtsam_unstable/examples/FixedLagSmootherExample.cpp + ''' + # Define a batch fixed lag smoother, which uses + # Levenberg-Marquardt to perform the nonlinear optimization + lag = 2.0 + smoother_batch = gtsam.BatchFixedLagSmoother(lag) + + # Create containers to store the factors and linearization points + # that will be sent to the smoothers + new_factors = gtsam.NonlinearFactorGraph() + new_values = gtsam.Values() + new_timestamps = gtsam.FixedLagSmootherKeyTimestampMap() + + # Create a prior on the first pose, placing it at the origin + prior_mean = Pose2(0, 0, 0) + prior_noise = gtsam.noiseModel.Diagonal.Sigmas( + np.array([0.3, 0.3, 0.1])) + X1 = 0 + new_factors.push_back( + gtsam.PriorFactorPose2(X1, prior_mean, prior_noise)) + new_values.insert(X1, prior_mean) + new_timestamps.insert((X1, 0.0)) + + delta_time = 0.25 + time = 0.25 + + i = 0 + + ground_truth = [ + Pose2(0.995821, 0.0231012, 0.0300001), + Pose2(1.49284, 0.0457247, 0.045), + Pose2(1.98981, 0.0758879, 0.06), + Pose2(2.48627, 0.113502, 0.075), + Pose2(2.98211, 0.158558, 0.09), + Pose2(3.47722, 0.211047, 0.105), + Pose2(3.97149, 0.270956, 0.12), + Pose2(4.4648, 0.338272, 0.135), + Pose2(4.95705, 0.41298, 0.15), + Pose2(5.44812, 0.495063, 0.165), + Pose2(5.9379, 0.584503, 0.18), + ] + + # Iterates from 0.25s to 3.0s, adding 0.25s each loop + # In each iteration, the agent moves at a constant speed + # and its two odometers measure the change. The smoothed + # result is then compared to the ground truth + while time <= 3.0: + previous_key = int(1000 * (time - delta_time)) + current_key = int(1000 * time) + + # assign current key to the current timestamp + new_timestamps.insert((current_key, time)) + + # Add a guess for this pose to the new values + # Assume that the robot moves at 2 m/s. Position is time[s] * + # 2[m/s] + current_pose = Pose2(time * 2, 0, 0) + new_values.insert(current_key, current_pose) + + # Add odometry factors from two different sources with different + # error stats + odometry_measurement_1 = Pose2(0.61, -0.08, 0.02) + odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas( + np.array([0.1, 0.1, 0.05])) + new_factors.push_back( + gtsam.BetweenFactorPose2(previous_key, current_key, + odometry_measurement_1, + odometry_noise_1)) + + odometry_measurement_2 = Pose2(0.47, 0.03, 0.01) + odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas( + np.array([0.05, 0.05, 0.05])) + new_factors.push_back( + gtsam.BetweenFactorPose2(previous_key, current_key, + odometry_measurement_2, + odometry_noise_2)) + + # Update the smoothers with the new factors. In this case, + # one iteration must pass for Levenberg-Marquardt to accurately + # estimate + if time >= 0.50: + smoother_batch.update(new_factors, new_values, new_timestamps) + + estimate = smoother_batch.calculateEstimatePose2(current_key) + self.assertTrue(estimate.equals(ground_truth[i], 1e-4)) + i += 1 + + new_timestamps.clear() + new_values.clear() + new_factors.resize(0) + + time += delta_time + + def test_ordering(self): + """Test ordering""" + gfg = gtsam.GaussianFactorGraph() + + x0 = X(0) + x1 = X(1) + x2 = X(2) + + BETWEEN_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) + PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) + + gfg.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE) + gfg.add(x2, np.eye(1), x1, -np.eye(1), 2 * np.ones(1), BETWEEN_NOISE) + gfg.add(x0, np.eye(1), np.zeros(1), PRIOR_NOISE) + + keys = (x0, x1, x2) + ordering = gtsam.Ordering() + for key in keys[::-1]: + ordering.push_back(key) + + bn = gfg.eliminateSequential(ordering) + self.assertEqual(bn.size(), 3) + + keyVector = gtsam.KeyVector() + keyVector.append(keys[2]) + ordering = gtsam.Ordering.ColamdConstrainedLastGaussianFactorGraph( + gfg, keyVector) + bn = gfg.eliminateSequential(ordering) + self.assertEqual(bn.size(), 3) + + def test_find(self): + """ + Check that optimizing for Karcher mean (which minimizes Between distance) + gets correct result. + """ + R = Rot3.Expmap(np.array([0.1, 0, 0])) + + rotations = gtsam.Rot3Vector([R, R.inverse()]) + expected = Rot3() + actual = gtsam.FindKarcherMean(rotations) + self.gtsamAssertEquals(expected, actual) + + def test_find_karcher_mean_identity(self): + """Averaging 3 identity rotations should yield the identity.""" + a1Rb1 = Rot3() + a2Rb2 = Rot3() + a3Rb3 = Rot3() + + aRb_list = gtsam.Rot3Vector([a1Rb1, a2Rb2, a3Rb3]) + aRb_expected = Rot3() + + aRb = gtsam.FindKarcherMean(aRb_list) + self.gtsamAssertEquals(aRb, aRb_expected) + + def test_factor(self): + """Check that the InnerConstraint factor leaves the mean unchanged.""" + # Make a graph with two variables, one between, and one InnerConstraint + # The optimal result should satisfy the between, while moving the other + # variable to make the mean the same as before. + # Mean of R and R' is identity. Let's make a BetweenFactor making R21 = + # R*R*R, i.e. geodesic length is 3 rather than 2. + R = Rot3.Expmap(np.array([0.1, 0, 0])) + MODEL = gtsam.noiseModel.Unit.Create(3) + + graph = gtsam.NonlinearFactorGraph() + R12 = R.compose(R.compose(R)) + graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) + keys = gtsam.KeyVector() + keys.append(1) + keys.append(2) + graph.add(gtsam.KarcherMeanFactorRot3(keys)) + + initial = gtsam.Values() + initial.insert(1, R.inverse()) + initial.insert(2, R) + expected = Rot3() + + result = gtsam.GaussNewtonOptimizer(graph, initial).optimize() + actual = gtsam.FindKarcherMean( + gtsam.Rot3Vector([result.atRot3(1), + result.atRot3(2)])) + self.gtsamAssertEquals(expected, actual) + self.gtsamAssertEquals(R12, result.atRot3(1).between(result.atRot3(2))) + + def test_align(self) -> None: + """Ensure estimation of the Pose2 element to align two 2d point clouds succeeds. + + Two point clouds represent horseshoe-shapes of the same size, just rotated and translated: + + | X---X + | | + | X---X + ------------------ + | + | + O | O + | | | + O---O + """ + pts_a = [ + Point2(1, -3), + Point2(1, -5), + Point2(-1, -5), + Point2(-1, -3), + ] + pts_b = [ + Point2(3, 1), + Point2(1, 1), + Point2(1, 3), + Point2(3, 3), + ] + + ab_pairs = Point2Pairs(list(zip(pts_a, pts_b))) + aTb = Pose2.Align(ab_pairs) + self.assertIsNotNone(aTb) + + for pt_a, pt_b in zip(pts_a, pts_b): + pt_a_ = aTb.transformFrom(pt_b) + np.testing.assert_allclose(pt_a, pt_a_) + + # Matrix version + A = np.array(pts_a).T + B = np.array(pts_b).T + aTb = Pose2.Align(A, B) + self.assertIsNotNone(aTb) + + for pt_a, pt_b in zip(pts_a, pts_b): + pt_a_ = aTb.transformFrom(pt_b) + np.testing.assert_allclose(pt_a, pt_a_) + + def test_align_squares(self): + """Test if Align method can align 2 squares.""" + square = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], + float).T + sTt = Pose3(Rot3.Rodrigues(0, 0, -np.pi), gtsam.Point3(2, 4, 0)) + transformed = sTt.transformTo(square) + + st_pairs = gtsam.Point3Pairs() + for j in range(4): + st_pairs.append((square[:, j], transformed[:, j])) + + # Recover the transformation sTt + estimated_sTt = Pose3.Align(st_pairs) + self.gtsamAssertEquals(estimated_sTt, sTt, 1e-10) + + # Matrix version + estimated_sTt = Pose3.Align(square, transformed) + self.gtsamAssertEquals(estimated_sTt, sTt, 1e-10) + + def test_constructorBetweenFactorPose2s(self) -> None: + """Check if ShonanAveraging2 constructor works when not initialized from g2o file. + + GT pose graph: + + | cam 1 = (0,4) + --o + | . + . . + . . + | | + o-- ... o-- + cam 0 cam 2 = (4,0) + (0,0) + """ + num_images = 3 + + wTi_list = [ + Pose2(Rot2.fromDegrees(0), np.array([0, 0])), + Pose2(Rot2.fromDegrees(90), np.array([0, 4])), + Pose2(Rot2.fromDegrees(0), np.array([4, 0])), + ] + + edges = [(0, 1), (1, 2), (0, 2)] + i2Ri1_dict = {(i1, i2): + wTi_list[i2].inverse().compose(wTi_list[i1]).rotation() + for (i1, i2) in edges} + + lm_params = LevenbergMarquardtParams.CeresDefaults() + shonan_params = ShonanAveragingParameters2(lm_params) + shonan_params.setUseHuber(False) + shonan_params.setCertifyOptimality(True) + + noise_model = gtsam.noiseModel.Unit.Create(3) + between_factors = gtsam.BetweenFactorPose2s() + for (i1, i2), i2Ri1 in i2Ri1_dict.items(): + i2Ti1 = Pose2(i2Ri1, np.zeros(2)) + between_factors.append( + BetweenFactorPose2(i2, i1, i2Ti1, noise_model)) + + obj = ShonanAveraging2(between_factors, shonan_params) + initial = obj.initializeRandomly() + result_values, _ = obj.run(initial, min_p=2, max_p=100) + + wRi_list = [result_values.atRot2(i) for i in range(num_images)] + thetas_deg = np.array([wRi.degrees() for wRi in wRi_list]) + + # map all angles to [0,360) + thetas_deg = thetas_deg % 360 + thetas_deg -= thetas_deg[0] + + expected_thetas_deg = np.array([0.0, 90.0, 0.0]) + np.testing.assert_allclose(thetas_deg, expected_thetas_deg, atol=0.1) + + def test_align_poses2_along_straight_line(self) -> None: + """Test Align of list of Pose2Pair. + + Scenario: + 3 object poses + same scale (no gauge ambiguity) + world frame has poses rotated about 180 degrees. + world and egovehicle frame translated by 15 meters w.r.t. each other + """ + R180 = Rot2.fromDegrees(180) + + # Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame) + # Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame + eTo0 = Pose2(Rot2(), np.array([5, 0])) + eTo1 = Pose2(Rot2(), np.array([10, 0])) + eTo2 = Pose2(Rot2(), np.array([15, 0])) + + eToi_list = [eTo0, eTo1, eTo2] + + # Create destination poses + # (same three objects, but instead living in the world "w" frame) + wTo0 = Pose2(R180, np.array([-10, 0])) + wTo1 = Pose2(R180, np.array([-5, 0])) + wTo2 = Pose2(R180, np.array([0, 0])) + + wToi_list = [wTo0, wTo1, wTo2] + + we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list))) + + # Recover the transformation wSe (i.e. world_S_egovehicle) + wSe = Similarity2.Align(we_pairs) + + for wToi, eToi in zip(wToi_list, eToi_list): + self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi)) + + def test_align_poses2_along_straight_line_gauge(self): + """Test if Align Pose2Pairs method can account for gauge ambiguity. + + Scenario: + 3 object poses + with gauge ambiguity (2x scale) + world frame has poses rotated by 90 degrees. + world and egovehicle frame translated by 11 meters w.r.t. each other + """ + R90 = Rot2.fromDegrees(90) + + # Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame) + # Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame + eTo0 = Pose2(Rot2(), np.array([1, 0])) + eTo1 = Pose2(Rot2(), np.array([2, 0])) + eTo2 = Pose2(Rot2(), np.array([4, 0])) + + eToi_list = [eTo0, eTo1, eTo2] + + # Create destination poses + # (same three objects, but instead living in the world/city "w" frame) + wTo0 = Pose2(R90, np.array([0, 12])) + wTo1 = Pose2(R90, np.array([0, 14])) + wTo2 = Pose2(R90, np.array([0, 18])) + + wToi_list = [wTo0, wTo1, wTo2] + + we_pairs = Pose2Pairs(list(zip(wToi_list, eToi_list))) + + # Recover the transformation wSe (i.e. world_S_egovehicle) + wSe = Similarity2.Align(we_pairs) + + for wToi, eToi in zip(wToi_list, eToi_list): + self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi)) + + def test_align_poses2_scaled_squares(self): + """Test if Align Pose2Pairs method can account for gauge ambiguity. + + Make sure a big and small square can be aligned. + The u's represent a big square (10x10), and v's represents a small square (4x4). + + Scenario: + 4 object poses + with gauge ambiguity (2.5x scale) + """ + # 0, 90, 180, and 270 degrees yaw + R0 = Rot2.fromDegrees(0) + R90 = Rot2.fromDegrees(90) + R180 = Rot2.fromDegrees(180) + R270 = Rot2.fromDegrees(270) + + aTi0 = Pose2(R0, np.array([2, 3])) + aTi1 = Pose2(R90, np.array([12, 3])) + aTi2 = Pose2(R180, np.array([12, 13])) + aTi3 = Pose2(R270, np.array([2, 13])) + + aTi_list = [aTi0, aTi1, aTi2, aTi3] + + bTi0 = Pose2(R0, np.array([4, 3])) + bTi1 = Pose2(R90, np.array([8, 3])) + bTi2 = Pose2(R180, np.array([8, 7])) + bTi3 = Pose2(R270, np.array([4, 7])) + + bTi_list = [bTi0, bTi1, bTi2, bTi3] + + ab_pairs = Pose2Pairs(list(zip(aTi_list, bTi_list))) + + # Recover the transformation wSe (i.e. world_S_egovehicle) + aSb = Similarity2.Align(ab_pairs) + + for aTi, bTi in zip(aTi_list, bTi_list): + self.gtsamAssertEquals(aTi, aSb.transformFrom(bTi)) + + def test_align_poses3_along_straight_line(self): + """Test Align Pose3Pairs method. + + Scenario: + 3 object poses + same scale (no gauge ambiguity) + world frame has poses rotated about x-axis (90 degree roll) + world and egovehicle frame translated by 15 meters w.r.t. each other + """ + Rx90 = Rot3.Rx(np.deg2rad(90)) + + # Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame) + # Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame + eTo0 = Pose3(Rot3(), np.array([5, 0, 0])) + eTo1 = Pose3(Rot3(), np.array([10, 0, 0])) + eTo2 = Pose3(Rot3(), np.array([15, 0, 0])) + + eToi_list = [eTo0, eTo1, eTo2] + + # Create destination poses + # (same three objects, but instead living in the world/city "w" frame) + wTo0 = Pose3(Rx90, np.array([-10, 0, 0])) + wTo1 = Pose3(Rx90, np.array([-5, 0, 0])) + wTo2 = Pose3(Rx90, np.array([0, 0, 0])) + + wToi_list = [wTo0, wTo1, wTo2] + + we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list))) + + # Recover the transformation wSe (i.e. world_S_egovehicle) + wSe = Similarity3.Align(we_pairs) + + for wToi, eToi in zip(wToi_list, eToi_list): + self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi)) + + def test_align_poses3_along_straight_line_gauge(self): + """Test if Align Pose3Pairs method can account for gauge ambiguity. + + Scenario: + 3 object poses + with gauge ambiguity (2x scale) + world frame has poses rotated about z-axis (90 degree yaw) + world and egovehicle frame translated by 11 meters w.r.t. each other + """ + Rz90 = Rot3.Rz(np.deg2rad(90)) + + # Create source poses (three objects o1, o2, o3 living in the egovehicle "e" frame) + # Suppose they are 3d cuboids detected by an onboard sensor in the egovehicle frame + eTo0 = Pose3(Rot3(), np.array([1, 0, 0])) + eTo1 = Pose3(Rot3(), np.array([2, 0, 0])) + eTo2 = Pose3(Rot3(), np.array([4, 0, 0])) + + eToi_list = [eTo0, eTo1, eTo2] + + # Create destination poses + # (same three objects, but instead living in the world/city "w" frame) + wTo0 = Pose3(Rz90, np.array([0, 12, 0])) + wTo1 = Pose3(Rz90, np.array([0, 14, 0])) + wTo2 = Pose3(Rz90, np.array([0, 18, 0])) + + wToi_list = [wTo0, wTo1, wTo2] + + we_pairs = gtsam.Pose3Pairs(list(zip(wToi_list, eToi_list))) + + # Recover the transformation wSe (i.e. world_S_egovehicle) + wSe = Similarity3.Align(we_pairs) + + for wToi, eToi in zip(wToi_list, eToi_list): + self.gtsamAssertEquals(wToi, wSe.transformFrom(eToi)) + + def test_align_poses3_scaled_squares(self): + """Test if Align Pose3Pairs method can account for gauge ambiguity. + + Make sure a big and small square can be aligned. + The u's represent a big square (10x10), and v's represents a small square (4x4). + + Scenario: + 4 object poses + with gauge ambiguity (2.5x scale) + """ + # 0, 90, 180, and 270 degrees yaw + R0 = Rot3.Rz(np.deg2rad(0)) + R90 = Rot3.Rz(np.deg2rad(90)) + R180 = Rot3.Rz(np.deg2rad(180)) + R270 = Rot3.Rz(np.deg2rad(270)) + + aTi0 = Pose3(R0, np.array([2, 3, 0])) + aTi1 = Pose3(R90, np.array([12, 3, 0])) + aTi2 = Pose3(R180, np.array([12, 13, 0])) + aTi3 = Pose3(R270, np.array([2, 13, 0])) + + aTi_list = [aTi0, aTi1, aTi2, aTi3] + + bTi0 = Pose3(R0, np.array([4, 3, 0])) + bTi1 = Pose3(R90, np.array([8, 3, 0])) + bTi2 = Pose3(R180, np.array([8, 7, 0])) + bTi3 = Pose3(R270, np.array([4, 7, 0])) + + bTi_list = [bTi0, bTi1, bTi2, bTi3] + + ab_pairs = gtsam.Pose3Pairs(list(zip(aTi_list, bTi_list))) + + # Recover the transformation wSe (i.e. world_S_egovehicle) + aSb = Similarity3.Align(ab_pairs) + + for aTi, bTi in zip(aTi_list, bTi_list): + self.gtsamAssertEquals(aTi, aSb.transformFrom(bTi)) + + def generate_measurements( + self, + calibration: Union[Cal3Bundler, Cal3_S2], + camera_model: Union[PinholeCameraCal3Bundler, PinholeCameraCal3_S2], + cal_params: Iterable[Iterable[Union[int, float]]], + camera_set: Optional[Union[CameraSetCal3Bundler, + CameraSetCal3_S2]] = None, + ) -> Tuple[List[Point2], Union[CameraSetCal3Bundler, CameraSetCal3_S2, + List[Cal3Bundler], List[Cal3_S2]]]: + """ + Generate vector of measurements for given calibration and camera model. + + Args: + calibration: Camera calibration e.g. Cal3_S2 + camera_model: Camera model e.g. PinholeCameraCal3_S2 + cal_params: Iterable of camera parameters for `calibration` e.g. [K1, K2] + camera_set: Cameraset object (for individual calibrations) + + Returns: + list of measurements and list/CameraSet object for cameras + """ + if camera_set is not None: + cameras = camera_set() + else: + cameras = [] + measurements = gtsam.Point2Vector() + + for k, pose in zip(cal_params, self.triangulation_poses): + K = calibration(*k) + camera = camera_model(pose, K) + cameras.append(camera) + z = camera.project(self.landmark) + measurements.append(z) + + return measurements, cameras + + def test_TriangulationExample(self) -> None: + """Tests triangulation with shared Cal3_S2 calibration""" + # Some common constants + sharedCal = (1500, 1200, 0, 640, 480) + + measurements, _ = self.generate_measurements( + calibration=Cal3_S2, + camera_model=PinholeCameraCal3_S2, + cal_params=(sharedCal, sharedCal)) + + triangulated_landmark = gtsam.triangulatePoint3( + self.triangulation_poses, + Cal3_S2(sharedCal), + measurements, + rank_tol=1e-9, + optimize=True) + self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9) + + # Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) + measurements_noisy = gtsam.Point2Vector() + measurements_noisy.append(measurements[0] - np.array([0.1, 0.5])) + measurements_noisy.append(measurements[1] - np.array([-0.2, 0.3])) + + triangulated_landmark = gtsam.triangulatePoint3( + self.triangulation_poses, + Cal3_S2(sharedCal), + measurements_noisy, + rank_tol=1e-9, + optimize=True) + + self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-2) + + def test_triangulation_robust_three_poses(self) -> None: + """Ensure triangulation with a robust model works.""" + sharedCal = Cal3_S2(1500, 1200, 0, 640, 480) + + # landmark ~5 meters infront of camera + landmark = Point3(5, 0.5, 1.2) + + pose1 = Pose3(UPRIGHT, Point3(0, 0, 1)) + pose2 = pose1 * Pose3(Rot3(), Point3(1, 0, 0)) + pose3 = pose1 * Pose3(Rot3.Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -0.1)) + + camera1 = PinholeCameraCal3_S2(pose1, sharedCal) + camera2 = PinholeCameraCal3_S2(pose2, sharedCal) + camera3 = PinholeCameraCal3_S2(pose3, sharedCal) + + z1: Point2 = camera1.project(landmark) + z2: Point2 = camera2.project(landmark) + z3: Point2 = camera3.project(landmark) + + poses = gtsam.Pose3Vector([pose1, pose2, pose3]) + measurements = gtsam.Point2Vector([z1, z2, z3]) + + # noise free, so should give exactly the landmark + actual = gtsam.triangulatePoint3(poses, + sharedCal, + measurements, + rank_tol=1e-9, + optimize=False) + self.assertTrue(np.allclose(landmark, actual, atol=1e-2)) + + # Add outlier + measurements[0] += Point2(100, 120) # very large pixel noise! + + # now estimate does not match landmark + actual2 = gtsam.triangulatePoint3(poses, + sharedCal, + measurements, + rank_tol=1e-9, + optimize=False) + # DLT is surprisingly robust, but still off (actual error is around 0.26m) + self.assertTrue(np.linalg.norm(landmark - actual2) >= 0.2) + self.assertTrue(np.linalg.norm(landmark - actual2) <= 0.5) + + # Again with nonlinear optimization + actual3 = gtsam.triangulatePoint3(poses, + sharedCal, + measurements, + rank_tol=1e-9, + optimize=True) + # result from nonlinear (but non-robust optimization) is close to DLT and still off + self.assertTrue(np.allclose(actual2, actual3, atol=0.1)) + + # Again with nonlinear optimization, this time with robust loss + model = gtsam.noiseModel.Robust.Create( + gtsam.noiseModel.mEstimator.Huber.Create(1.345), + gtsam.noiseModel.Unit.Create(2)) + actual4 = gtsam.triangulatePoint3(poses, + sharedCal, + measurements, + rank_tol=1e-9, + optimize=True, + model=model) + # using the Huber loss we now have a quite small error!! nice! + self.assertTrue(np.allclose(landmark, actual4, atol=0.05)) + + def test_outliers_and_far_landmarks(self) -> None: + """Check safe triangulation function.""" + pose1, pose2 = self.poses + + K1 = Cal3_S2(1500, 1200, 0, 640, 480) + # create first camera. Looking along X-axis, 1 meter above ground plane (x-y) + camera1 = PinholeCameraCal3_S2(pose1, K1) + + # create second camera 1 meter to the right of first camera + K2 = Cal3_S2(1600, 1300, 0, 650, 440) + camera2 = PinholeCameraCal3_S2(pose2, K2) + + # 1. Project two landmarks into two cameras and triangulate + z1 = camera1.project(self.landmark) + z2 = camera2.project(self.landmark) + + cameras = CameraSetCal3_S2() + cameras.append(camera1) + cameras.append(camera2) + + measurements = gtsam.Point2Vector() + measurements.append(z1) + measurements.append(z2) + + landmarkDistanceThreshold = 10 # landmark is closer than that + # all default except landmarkDistanceThreshold: + params = TriangulationParameters(1.0, False, landmarkDistanceThreshold) + actual: TriangulationResult = gtsam.triangulateSafe( + cameras, measurements, params) + self.gtsamAssertEquals(actual.get(), self.landmark, 1e-2) + self.assertTrue(actual.valid()) + + landmarkDistanceThreshold = 4 # landmark is farther than that + params2 = TriangulationParameters(1.0, False, + landmarkDistanceThreshold) + actual = gtsam.triangulateSafe(cameras, measurements, params2) + self.assertTrue(actual.farPoint()) + + # 3. Add a slightly rotated third camera above with a wrong measurement + # (OUTLIER) + pose3 = pose1 * Pose3(Rot3.Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)) + K3 = Cal3_S2(700, 500, 0, 640, 480) + camera3 = PinholeCameraCal3_S2(pose3, K3) + z3 = camera3.project(self.landmark) + + cameras.append(camera3) + measurements.append(z3 + Point2(10, -10)) + + landmarkDistanceThreshold = 10 # landmark is closer than that + outlierThreshold = 100 # loose, the outlier is going to pass + params3 = TriangulationParameters(1.0, False, + landmarkDistanceThreshold, + outlierThreshold) + actual = gtsam.triangulateSafe(cameras, measurements, params3) + self.assertTrue(actual.valid()) + + # now set stricter threshold for outlier rejection + outlierThreshold = 5 # tighter, the outlier is not going to pass + params4 = TriangulationParameters(1.0, False, + landmarkDistanceThreshold, + outlierThreshold) + actual = gtsam.triangulateSafe(cameras, measurements, params4) + self.assertTrue(actual.outlier()) + + +if __name__ == "__main__": + unittest.main() From 42231879bfc9e6075d0f230216197ffda1e4a324 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 21 Jun 2023 17:47:40 -0400 Subject: [PATCH 69/78] remove ParameterMatrix in favor of gtsam::Matrix --- gtsam/basis/Basis.h | 28 ++- gtsam/basis/BasisFactors.h | 48 +++-- gtsam/basis/ParameterMatrix.h | 207 ---------------------- gtsam/basis/basis.i | 11 -- gtsam/basis/tests/testBasisFactors.cpp | 21 ++- gtsam/basis/tests/testChebyshev2.cpp | 33 ++-- gtsam/basis/tests/testFourier.cpp | 7 +- gtsam/basis/tests/testParameterMatrix.cpp | 145 --------------- gtsam/nonlinear/nonlinear.i | 1 - gtsam/nonlinear/values.i | 7 +- 10 files changed, 65 insertions(+), 443 deletions(-) delete mode 100644 gtsam/basis/ParameterMatrix.h delete mode 100644 gtsam/basis/tests/testParameterMatrix.cpp diff --git a/gtsam/basis/Basis.h b/gtsam/basis/Basis.h index ddc8f4ddd..41cdeeaaa 100644 --- a/gtsam/basis/Basis.h +++ b/gtsam/basis/Basis.h @@ -20,7 +20,6 @@ #include #include -#include #include @@ -160,7 +159,7 @@ class Basis { }; /** - * VectorEvaluationFunctor at a given x, applied to ParameterMatrix. + * VectorEvaluationFunctor at a given x, applied to a parameter Matrix. * This functor is used to evaluate a parameterized function at a given scalar * value x. When given a specific M*N parameters, returns an M-vector the M * corresponding functions at x, possibly with Jacobians wrpt the parameters. @@ -203,14 +202,14 @@ class Basis { } /// M-dimensional evaluation - Vector apply(const ParameterMatrix& P, + Vector apply(const Matrix& P, OptionalJacobian H = {}) const { if (H) *H = H_; return P.matrix() * this->weights_.transpose(); } /// c++ sugar - Vector operator()(const ParameterMatrix& P, + Vector operator()(const Matrix& P, OptionalJacobian H = {}) const { return apply(P, H); } @@ -264,21 +263,21 @@ class Basis { } /// Calculate component of component rowIndex_ of P - double apply(const ParameterMatrix& P, + double apply(const Matrix& P, OptionalJacobian H = {}) const { if (H) *H = H_; return P.row(rowIndex_) * EvaluationFunctor::weights_.transpose(); } /// c++ sugar - double operator()(const ParameterMatrix& P, + double operator()(const Matrix& P, OptionalJacobian H = {}) const { return apply(P, H); } }; /** - * Manifold EvaluationFunctor at a given x, applied to ParameterMatrix. + * Manifold EvaluationFunctor at a given x, applied to a parameter Matrix. * This functor is used to evaluate a parameterized function at a given scalar * value x. When given a specific M*N parameters, returns an M-vector the M * corresponding functions at x, possibly with Jacobians wrpt the parameters. @@ -307,8 +306,7 @@ class Basis { : Base(M, N, x, a, b) {} /// Manifold evaluation - T apply(const ParameterMatrix& P, - OptionalJacobian H = {}) const { + T apply(const Matrix& P, OptionalJacobian H = {}) const { // Interpolate the M-dimensional vector to yield a vector in tangent space Eigen::Matrix xi = Base::operator()(P, H); @@ -326,7 +324,7 @@ class Basis { } /// c++ sugar - T operator()(const ParameterMatrix& P, + T operator()(const Matrix& P, OptionalJacobian H = {}) const { return apply(P, H); // might call apply in derived } @@ -382,7 +380,7 @@ class Basis { }; /** - * VectorDerivativeFunctor at a given x, applied to ParameterMatrix. + * VectorDerivativeFunctor at a given x, applied to a parameter Matrix. * * This functor is used to evaluate the derivatives of a parameterized * function at a given scalar value x. When given a specific M*N parameters, @@ -426,13 +424,13 @@ class Basis { calculateJacobian(); } - Vector apply(const ParameterMatrix& P, + Vector apply(const Matrix& P, OptionalJacobian H = {}) const { if (H) *H = H_; return P.matrix() * this->weights_.transpose(); } /// c++ sugar - Vector operator()(const ParameterMatrix& P, + Vector operator()(const Matrix& P, OptionalJacobian H = {}) const { return apply(P, H); } @@ -485,13 +483,13 @@ class Basis { calculateJacobian(N); } /// Calculate derivative of component rowIndex_ of F - double apply(const ParameterMatrix& P, + double apply(const Matrix& P, OptionalJacobian H = {}) const { if (H) *H = H_; return P.row(rowIndex_) * this->weights_.transpose(); } /// c++ sugar - double operator()(const ParameterMatrix& P, + double operator()(const Matrix& P, OptionalJacobian H = {}) const { return apply(P, H); } diff --git a/gtsam/basis/BasisFactors.h b/gtsam/basis/BasisFactors.h index 2ce0faae7..ad92ad672 100644 --- a/gtsam/basis/BasisFactors.h +++ b/gtsam/basis/BasisFactors.h @@ -75,7 +75,7 @@ class EvaluationFactor : public FunctorizedFactor { }; /** - * Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix + * Unary factor for enforcing BASIS polynomial evaluation on a parameter Matrix * of size (M, N) is equal to a vector-valued measurement at the same point, when * using a pseudo-spectral parameterization. @@ -91,10 +91,9 @@ class EvaluationFactor : public FunctorizedFactor { * @ingroup basis */ template -class VectorEvaluationFactor - : public FunctorizedFactor { +class VectorEvaluationFactor : public FunctorizedFactor { private: - using Base = FunctorizedFactor; + using Base = FunctorizedFactor; public: VectorEvaluationFactor() {} @@ -102,7 +101,7 @@ class VectorEvaluationFactor /** * @brief Construct a new VectorEvaluationFactor object. * - * @param key The key to the ParameterMatrix object used to represent the + * @param key The key to the parameter Matrix object used to represent the * polynomial. * @param z The measurement value. * @param model The noise model. @@ -118,7 +117,7 @@ class VectorEvaluationFactor /** * @brief Construct a new VectorEvaluationFactor object. * - * @param key The key to the ParameterMatrix object used to represent the + * @param key The key to the parameter Matrix object used to represent the * polynomial. * @param z The measurement value. * @param model The noise model. @@ -138,7 +137,7 @@ class VectorEvaluationFactor }; /** - * Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix + * Unary factor for enforcing BASIS polynomial evaluation on a parameter Matrix * of size (P, N) is equal to specified measurement at the same point, when * using a pseudo-spectral parameterization. * @@ -156,10 +155,9 @@ class VectorEvaluationFactor * @ingroup basis */ template -class VectorComponentFactor - : public FunctorizedFactor { +class VectorComponentFactor : public FunctorizedFactor { private: - using Base = FunctorizedFactor; + using Base = FunctorizedFactor; public: VectorComponentFactor() {} @@ -167,7 +165,7 @@ class VectorComponentFactor /** * @brief Construct a new VectorComponentFactor object. * - * @param key The key to the ParameterMatrix object used to represent the + * @param key The key to the parameter Matrix object used to represent the * polynomial. * @param z The scalar value at a specified index `i` of the full measurement * vector. @@ -186,7 +184,7 @@ class VectorComponentFactor /** * @brief Construct a new VectorComponentFactor object. * - * @param key The key to the ParameterMatrix object used to represent the + * @param key The key to the parameter Matrix object used to represent the * polynomial. * @param z The scalar value at a specified index `i` of the full measurement * vector. @@ -226,9 +224,9 @@ class VectorComponentFactor * where `x` is the value (e.g. timestep) at which the rotation was evaluated. */ template -class ManifoldEvaluationFactor : public FunctorizedFactor { +class ManifoldEvaluationFactor : public FunctorizedFactor { private: - using Base = FunctorizedFactor; + using Base = FunctorizedFactor; public: ManifoldEvaluationFactor() {} @@ -288,7 +286,7 @@ class DerivativeFactor /** * @brief Construct a new DerivativeFactor object. * - * @param key The key to the ParameterMatrix which represents the basis + * @param key The key to the parameter Matrix which represents the basis * polynomial. * @param z The measurement value. * @param model The noise model. @@ -302,7 +300,7 @@ class DerivativeFactor /** * @brief Construct a new DerivativeFactor object. * - * @param key The key to the ParameterMatrix which represents the basis + * @param key The key to the parameter Matrix which represents the basis * polynomial. * @param z The measurement value. * @param model The noise model. @@ -325,10 +323,9 @@ class DerivativeFactor * @param BASIS: The basis class to use e.g. Chebyshev2 */ template -class VectorDerivativeFactor - : public FunctorizedFactor { +class VectorDerivativeFactor : public FunctorizedFactor { private: - using Base = FunctorizedFactor; + using Base = FunctorizedFactor; using Func = typename BASIS::VectorDerivativeFunctor; public: @@ -337,7 +334,7 @@ class VectorDerivativeFactor /** * @brief Construct a new VectorDerivativeFactor object. * - * @param key The key to the ParameterMatrix which represents the basis + * @param key The key to the parameter Matrix which represents the basis * polynomial. * @param z The measurement value. * @param model The noise model. @@ -353,7 +350,7 @@ class VectorDerivativeFactor /** * @brief Construct a new VectorDerivativeFactor object. * - * @param key The key to the ParameterMatrix which represents the basis + * @param key The key to the parameter Matrix which represents the basis * polynomial. * @param z The measurement value. * @param model The noise model. @@ -379,10 +376,9 @@ class VectorDerivativeFactor * @param BASIS: The basis class to use e.g. Chebyshev2 */ template -class ComponentDerivativeFactor - : public FunctorizedFactor { +class ComponentDerivativeFactor : public FunctorizedFactor { private: - using Base = FunctorizedFactor; + using Base = FunctorizedFactor; using Func = typename BASIS::ComponentDerivativeFunctor; public: @@ -391,7 +387,7 @@ class ComponentDerivativeFactor /** * @brief Construct a new ComponentDerivativeFactor object. * - * @param key The key to the ParameterMatrix which represents the basis + * @param key The key to the parameter Matrix which represents the basis * polynomial. * @param z The scalar measurement value at a specific index `i` of the full * measurement vector. @@ -410,7 +406,7 @@ class ComponentDerivativeFactor /** * @brief Construct a new ComponentDerivativeFactor object. * - * @param key The key to the ParameterMatrix which represents the basis + * @param key The key to the parameter Matrix which represents the basis * polynomial. * @param z The scalar measurement value at a specific index `i` of the full * measurement vector. diff --git a/gtsam/basis/ParameterMatrix.h b/gtsam/basis/ParameterMatrix.h deleted file mode 100644 index bfb26c4de..000000000 --- a/gtsam/basis/ParameterMatrix.h +++ /dev/null @@ -1,207 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file ParameterMatrix.h - * @brief Define ParameterMatrix class which is used to store values at - * interpolation points. - * @author Varun Agrawal, Frank Dellaert - * @date September 21, 2020 - */ - -#pragma once - -#include -#include -#include - -#include - -namespace gtsam { - -/** - * A matrix abstraction of MxN values at the Basis points. - * This class serves as a wrapper over an Eigen matrix. - * @param N: the number of Basis points (e.g. Chebyshev points of the second - * kind). - */ -class ParameterMatrix { - private: - Matrix matrix_; - - public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - - enum { dimension = Eigen::Dynamic }; - - /** - * Create ParameterMatrix using the number of basis points. - * @param M: The dimension size of the type you wish to evaluate. - * @param N: The number of basis points (the columns). - */ - ParameterMatrix(const size_t M, const size_t N) : matrix_(M, N) { - matrix_.setZero(); - } - - /** - * Create ParameterMatrix from an MxN Eigen Matrix. - * @param matrix: An Eigen matrix used to initialze the ParameterMatrix. - */ - ParameterMatrix(const Matrix& matrix) : matrix_(matrix) {} - - /// Get the number of rows. - size_t rows() const { return matrix_.rows(); } - - /// Get the number of columns. - size_t cols() const { return matrix_.cols(); } - - /// Get the underlying matrix. - Matrix matrix() const { return matrix_; } - - /// Return the tranpose of the underlying matrix. - Matrix transpose() const { return matrix_.transpose(); } - - /** - * Get the matrix row specified by `index`. - * @param index: The row index to retrieve. - */ - Eigen::Matrix row(size_t index) const { - return matrix_.row(index); - } - - /** - * Set the matrix row specified by `index`. - * @param index: The row index to set. - */ - auto row(size_t index) -> Eigen::Block { - return matrix_.row(index); - } - - /** - * Get the matrix column specified by `index`. - * @param index: The column index to retrieve. - */ - Eigen::Matrix col(size_t index) const { - return matrix_.col(index); - } - - /** - * Set the matrix column specified by `index`. - * @param index: The column index to set. - */ - auto col(size_t index) -> Eigen::Block { - return matrix_.col(index); - } - - /** - * Set all matrix coefficients to zero. - */ - void setZero() { matrix_.setZero(); } - - /** - * Add a ParameterMatrix to another. - * @param other: ParameterMatrix to add. - */ - ParameterMatrix operator+(const ParameterMatrix& other) const { - return ParameterMatrix(matrix_ + other.matrix()); - } - - /** - * Add a MxN-sized vector to the ParameterMatrix. - * @param other: Vector which is reshaped and added. - */ - ParameterMatrix operator+(const Eigen::Matrix& other) const { - // This form avoids a deep copy and instead typecasts `other`. - Eigen::Map other_(other.data(), rows(), cols()); - return ParameterMatrix(matrix_ + other_); - } - - /** - * Subtract a ParameterMatrix from another. - * @param other: ParameterMatrix to subtract. - */ - ParameterMatrix operator-(const ParameterMatrix& other) const { - return ParameterMatrix(matrix_ - other.matrix()); - } - - /** - * Subtract a MxN-sized vector from the ParameterMatrix. - * @param other: Vector which is reshaped and subracted. - */ - ParameterMatrix operator-(const Eigen::Matrix& other) const { - Eigen::Map other_(other.data(), rows(), cols()); - return ParameterMatrix(matrix_ - other_); - } - - /** - * Multiply ParameterMatrix with an Eigen matrix. - * @param other: Eigen matrix which should be multiplication compatible with - * the ParameterMatrix. - */ - Matrix operator*(const Matrix& other) const { return matrix_ * other; } - - /// @name Vector Space requirements - /// @{ - - /** - * Print the ParameterMatrix. - * @param s: The prepend string to add more contextual info. - */ - void print(const std::string& s = "") const { - std::cout << (s == "" ? s : s + " ") << matrix_ << std::endl; - } - - /** - * Check for equality up to absolute tolerance. - * @param other: The ParameterMatrix to check equality with. - * @param tol: The absolute tolerance threshold. - */ - bool equals(const ParameterMatrix& other, double tol = 1e-8) const { - return gtsam::equal_with_abs_tol(matrix_, other.matrix(), tol); - } - - /// Returns dimensionality of the tangent space - inline size_t dim() const { return matrix_.size(); } - - /// Convert to vector form, is done row-wise - inline Vector vector() const { - using RowMajor = Eigen::Matrix; - Vector result(matrix_.size()); - Eigen::Map(&result(0), rows(), cols()) = matrix_; - return result; - } - - /** Identity function to satisfy VectorSpace traits. - * - * NOTE: The size at compile time is unknown so this identity is zero - * length and thus not valid. - */ - inline static ParameterMatrix Identity(size_t M = 0, size_t N = 0) { - return ParameterMatrix(M, N); - } - - /// @} -}; - -/// traits for ParameterMatrix -template <> -struct traits : public internal::VectorSpace { -}; - -/* ************************************************************************* */ -// Stream operator that takes a ParameterMatrix. Used for printing. -inline std::ostream& operator<<(std::ostream& os, - const ParameterMatrix& parameterMatrix) { - os << parameterMatrix.matrix(); - return os; -} - -} // namespace gtsam diff --git a/gtsam/basis/basis.i b/gtsam/basis/basis.i index 8cbe4593d..f8cea70f8 100644 --- a/gtsam/basis/basis.i +++ b/gtsam/basis/basis.i @@ -46,17 +46,6 @@ class Chebyshev2 { static Matrix DifferentiationMatrix(size_t N, double a, double b); }; -#include - -class ParameterMatrix { - ParameterMatrix(const size_t M, const size_t N); - ParameterMatrix(const Matrix& matrix); - - Matrix matrix() const; - - void print(const string& s = "") const; -}; - #include template (key, stateMatrix); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); @@ -133,10 +132,10 @@ TEST(BasisFactors, VectorComponentFactor) { NonlinearFactorGraph graph; graph.add(factor); - ParameterMatrix stateMatrix(P, N); + gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(P, N); Values initial; - initial.insert(key, stateMatrix); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); @@ -158,10 +157,10 @@ TEST(BasisFactors, ManifoldEvaluationFactor) { NonlinearFactorGraph graph; graph.add(factor); - ParameterMatrix stateMatrix(3, N); + gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(3, N); Values initial; - initial.insert(key, stateMatrix); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); @@ -185,10 +184,10 @@ TEST(BasisFactors, VecDerivativePrior) { NonlinearFactorGraph graph; graph.add(vecDPrior); - ParameterMatrix stateMatrix(M, N); + gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(M, N); Values initial; - initial.insert(key, stateMatrix); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); @@ -212,8 +211,8 @@ TEST(BasisFactors, ComponentDerivativeFactor) { graph.add(controlDPrior); Values initial; - ParameterMatrix stateMatrix(M, N); - initial.insert(key, stateMatrix); + gtsam::Matrix stateMatrix = gtsam::Matrix::Zero(M, N); + initial.insert(key, stateMatrix); LevenbergMarquardtParams parameters; parameters.setMaxIterations(20); diff --git a/gtsam/basis/tests/testChebyshev2.cpp b/gtsam/basis/tests/testChebyshev2.cpp index 8aa326ab1..780714936 100644 --- a/gtsam/basis/tests/testChebyshev2.cpp +++ b/gtsam/basis/tests/testChebyshev2.cpp @@ -108,7 +108,7 @@ TEST(Chebyshev2, InterpolateVector) { double t = 30, a = 0, b = 100; const size_t N = 3; // Create 2x3 matrix with Vectors at Chebyshev points - ParameterMatrix X(2, N); + Matrix X = Matrix::Zero(2, N); X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp // Check value @@ -120,11 +120,11 @@ TEST(Chebyshev2, InterpolateVector) { EXPECT(assert_equal(expected, fx(X, actualH), 1e-9)); // Check derivative - std::function f = + std::function f = std::bind(&Chebyshev2::VectorEvaluationFunctor::operator(), fx, std::placeholders::_1, nullptr); Matrix numericalH = - numericalDerivative11(f, X); + numericalDerivative11(f, X); EXPECT(assert_equal(numericalH, actualH, 1e-9)); } @@ -133,7 +133,7 @@ TEST(Chebyshev2, InterpolateVector) { TEST(Chebyshev2, InterpolatePose2) { double t = 30, a = 0, b = 100; - ParameterMatrix X(3, N); + Matrix X(3, N); X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp X.row(1) = Vector::Zero(N); X.row(2) = 0.1 * Vector::Ones(N); @@ -148,11 +148,11 @@ TEST(Chebyshev2, InterpolatePose2) { EXPECT(assert_equal(expected, fx(X, actualH))); // Check derivative - std::function f = + std::function f = std::bind(&Chebyshev2::ManifoldEvaluationFunctor::operator(), fx, std::placeholders::_1, nullptr); Matrix numericalH = - numericalDerivative11(f, X); + numericalDerivative11(f, X); EXPECT(assert_equal(numericalH, actualH, 1e-9)); } @@ -169,7 +169,7 @@ TEST(Chebyshev2, InterpolatePose3) { Vector6 xi = Pose3::ChartAtOrigin::Local(pose); Eigen::Matrix actualH(6, 6 * N); - ParameterMatrix X(6, N); + Matrix X = Matrix::Zero(6, N); X.col(11) = xi; Chebyshev2::ManifoldEvaluationFunctor fx(N, t, a, b); @@ -178,11 +178,11 @@ TEST(Chebyshev2, InterpolatePose3) { EXPECT(assert_equal(expected, fx(X, actualH))); // Check derivative - std::function f = + std::function f = std::bind(&Chebyshev2::ManifoldEvaluationFunctor::operator(), fx, std::placeholders::_1, nullptr); Matrix numericalH = - numericalDerivative11(f, X); + numericalDerivative11(f, X); EXPECT(assert_equal(numericalH, actualH, 1e-8)); } #endif @@ -415,12 +415,12 @@ TEST(Chebyshev2, VectorDerivativeFunctor) { const double x = 0.2; using VecD = Chebyshev2::VectorDerivativeFunctor; VecD fx(M, N, x, 0, 3); - ParameterMatrix X(M, N); + Matrix X = Matrix::Zero(M, N); Matrix actualH(M, M * N); EXPECT(assert_equal(Vector::Zero(M), (Vector)fx(X, actualH), 1e-8)); // Test Jacobian - Matrix expectedH = numericalDerivative11( + Matrix expectedH = numericalDerivative11( std::bind(&VecD::operator(), fx, std::placeholders::_1, nullptr), X); EXPECT(assert_equal(expectedH, actualH, 1e-7)); } @@ -434,11 +434,10 @@ TEST(Chebyshev2, VectorDerivativeFunctor2) { const Vector points = Chebyshev2::Points(N, 0, T); // Assign the parameter matrix 1xN - Matrix values(1, N); + Matrix X(1, N); for (size_t i = 0; i < N; ++i) { - values(i) = f(points(i)); + X(i) = f(points(i)); } - ParameterMatrix X(values); // Evaluate the derivative at the chebyshev points using // VectorDerivativeFunctor. @@ -452,7 +451,7 @@ TEST(Chebyshev2, VectorDerivativeFunctor2) { Matrix actualH(M, M * N); VecD vecd(M, N, points(0), 0, T); vecd(X, actualH); - Matrix expectedH = numericalDerivative11( + Matrix expectedH = numericalDerivative11( std::bind(&VecD::operator(), vecd, std::placeholders::_1, nullptr), X); EXPECT(assert_equal(expectedH, actualH, 1e-6)); } @@ -465,11 +464,11 @@ TEST(Chebyshev2, ComponentDerivativeFunctor) { using CompFunc = Chebyshev2::ComponentDerivativeFunctor; size_t row = 1; CompFunc fx(M, N, row, x, 0, 3); - ParameterMatrix X(M, N); + Matrix X = Matrix::Zero(M, N); Matrix actualH(1, M * N); EXPECT_DOUBLES_EQUAL(0, fx(X, actualH), 1e-8); - Matrix expectedH = numericalDerivative11( + Matrix expectedH = numericalDerivative11( std::bind(&CompFunc::operator(), fx, std::placeholders::_1, nullptr), X); EXPECT(assert_equal(expectedH, actualH, 1e-7)); } diff --git a/gtsam/basis/tests/testFourier.cpp b/gtsam/basis/tests/testFourier.cpp index 0060dc317..2995e8c45 100644 --- a/gtsam/basis/tests/testFourier.cpp +++ b/gtsam/basis/tests/testFourier.cpp @@ -187,10 +187,9 @@ TEST(Basis, VecDerivativeFunctor) { double h = 2 * M_PI / 16; Vector2 dotShape(0.5556, -0.8315); // at h/2 DotShape dotShapeFunction(2, N, h / 2); - Matrix23 theta_mat = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071) - .finished() - .transpose(); - ParameterMatrix theta(theta_mat); + Matrix theta = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071) + .finished() + .transpose(); EXPECT(assert_equal(Vector(dotShape), dotShapeFunction(theta), 1e-4)); } diff --git a/gtsam/basis/tests/testParameterMatrix.cpp b/gtsam/basis/tests/testParameterMatrix.cpp deleted file mode 100644 index 11a098172..000000000 --- a/gtsam/basis/tests/testParameterMatrix.cpp +++ /dev/null @@ -1,145 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file testParameterMatrix.cpp - * @date Sep 22, 2020 - * @author Varun Agrawal, Frank Dellaert - * @brief Unit tests for ParameterMatrix.h - */ - -#include -#include -#include -#include -#include -#include - -using namespace std; -using namespace gtsam; - -using Parameters = Chebyshev2::Parameters; - -const size_t M = 2, N = 5; - -//****************************************************************************** -TEST(ParameterMatrix, Constructor) { - ParameterMatrix actual1(2, 3); - ParameterMatrix expected1(Matrix::Zero(2, 3)); - EXPECT(assert_equal(expected1, actual1)); - - ParameterMatrix actual2(Matrix::Ones(2, 3)); - ParameterMatrix expected2(Matrix::Ones(2, 3)); - EXPECT(assert_equal(expected2, actual2)); - EXPECT(assert_equal(expected2.matrix(), actual2.matrix())); -} - -//****************************************************************************** -TEST(ParameterMatrix, Dimensions) { - ParameterMatrix params(M, N); - EXPECT_LONGS_EQUAL(params.rows(), M); - EXPECT_LONGS_EQUAL(params.cols(), N); - EXPECT_LONGS_EQUAL(params.dim(), M * N); -} - -//****************************************************************************** -TEST(ParameterMatrix, Getters) { - ParameterMatrix params(M, N); - - Matrix expectedMatrix = Matrix::Zero(2, 5); - EXPECT(assert_equal(expectedMatrix, params.matrix())); - - Matrix expectedMatrixTranspose = Matrix::Zero(5, 2); - EXPECT(assert_equal(expectedMatrixTranspose, params.transpose())); - - ParameterMatrix p2(Matrix::Ones(M, N)); - Vector expectedRowVector = Vector::Ones(N); - for (size_t r = 0; r < M; ++r) { - EXPECT(assert_equal(p2.row(r), expectedRowVector)); - } - - ParameterMatrix p3(2 * Matrix::Ones(M, N)); - Vector expectedColVector = 2 * Vector::Ones(M); - for (size_t c = 0; c < M; ++c) { - EXPECT(assert_equal(p3.col(c), expectedColVector)); - } -} - -//****************************************************************************** -TEST(ParameterMatrix, Setters) { - ParameterMatrix params(Matrix::Zero(M, N)); - Matrix expected = Matrix::Zero(M, N); - - // row - params.row(0) = Vector::Ones(N); - expected.row(0) = Vector::Ones(N); - EXPECT(assert_equal(expected, params.matrix())); - - // col - params.col(2) = Vector::Ones(M); - expected.col(2) = Vector::Ones(M); - - EXPECT(assert_equal(expected, params.matrix())); - - // setZero - params.setZero(); - expected.setZero(); - EXPECT(assert_equal(expected, params.matrix())); -} - -//****************************************************************************** -TEST(ParameterMatrix, Addition) { - ParameterMatrix params(Matrix::Ones(M, N)); - ParameterMatrix expected(2 * Matrix::Ones(M, N)); - - // Add vector - EXPECT(assert_equal(expected, params + Vector::Ones(M * N))); - // Add another ParameterMatrix - ParameterMatrix actual = params + ParameterMatrix(Matrix::Ones(M, N)); - EXPECT(assert_equal(expected, actual)); -} - -//****************************************************************************** -TEST(ParameterMatrix, Subtraction) { - ParameterMatrix params(2 * Matrix::Ones(M, N)); - ParameterMatrix expected(Matrix::Ones(M, N)); - - // Subtract vector - EXPECT(assert_equal(expected, params - Vector::Ones(M * N))); - // Subtract another ParameterMatrix - ParameterMatrix actual = params - ParameterMatrix(Matrix::Ones(M, N)); - EXPECT(assert_equal(expected, actual)); -} - -//****************************************************************************** -TEST(ParameterMatrix, Multiplication) { - ParameterMatrix params(Matrix::Ones(M, N)); - Matrix multiplier = 2 * Matrix::Ones(N, 2); - Matrix expected = 2 * N * Matrix::Ones(M, 2); - EXPECT(assert_equal(expected, params * multiplier)); -} - -//****************************************************************************** -TEST(ParameterMatrix, VectorSpace) { - ParameterMatrix params(Matrix::Ones(M, N)); - // vector - EXPECT(assert_equal(Vector::Ones(M * N), params.vector())); - // identity - EXPECT(assert_equal(ParameterMatrix::Identity(M), - ParameterMatrix(Matrix::Zero(M, 0)))); -} - -//****************************************************************************** -int main() { - TestResult tr; - return TestRegistry::runAllTests(tr); -} -//****************************************************************************** diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index 19f4ae588..9b1fa58cd 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -25,7 +25,6 @@ namespace gtsam { #include #include #include -#include #include class GraphvizFormatting : gtsam::DotWriter { diff --git a/gtsam/nonlinear/values.i b/gtsam/nonlinear/values.i index 7ad6c947a..e36b1cf1c 100644 --- a/gtsam/nonlinear/values.i +++ b/gtsam/nonlinear/values.i @@ -25,7 +25,6 @@ namespace gtsam { #include #include #include -#include #include @@ -96,7 +95,6 @@ class Values { void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void insert(size_t j, const gtsam::NavState& nav_state); void insert(size_t j, double c); - void insert(size_t j, const gtsam::ParameterMatrix& X); template void insert(size_t j, const T& val); @@ -130,7 +128,6 @@ class Values { void update(size_t j, Vector vector); void update(size_t j, Matrix matrix); void update(size_t j, double c); - void update(size_t j, const gtsam::ParameterMatrix& X); void insert_or_assign(size_t j, const gtsam::Point2& point2); void insert_or_assign(size_t j, const gtsam::Point3& point3); @@ -171,7 +168,6 @@ class Values { void insert_or_assign(size_t j, Vector vector); void insert_or_assign(size_t j, Matrix matrix); void insert_or_assign(size_t j, double c); - void insert_or_assign(size_t j, const gtsam::ParameterMatrix& X); template + double}> T at(size_t j); }; From 475184cb3ccd7a3109da42fabd33306ed01cea12 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Thu, 22 Jun 2023 09:46:21 -0500 Subject: [PATCH 70/78] Removed boost guards from testSerializationSlam and fixed CMakeLists --- tests/CMakeLists.txt | 2 +- tests/testSerializationSlam.cpp | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d7b68c4ec..44b7505fc 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -13,7 +13,7 @@ if (NOT GTSAM_USE_BOOST_FEATURES) endif() if (NOT GTSAM_ENABLE_BOOST_SERIALIZATION) - list(APPEND excluded_tests "testSerializationSLAM.cpp") + list(APPEND excluded_tests "testSerializationSlam.cpp") endif() # Build tests diff --git a/tests/testSerializationSlam.cpp b/tests/testSerializationSlam.cpp index 6b504a810..c4170b108 100644 --- a/tests/testSerializationSlam.cpp +++ b/tests/testSerializationSlam.cpp @@ -52,8 +52,6 @@ #include #include -#ifdef GTSAM_USE_BOOST_FEATURES - #include #include @@ -676,8 +674,6 @@ TEST(SubgraphSolver, Solves) { } } -#endif - /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ From 1de26020b3c50e6e26d64c821f00230d140f1656 Mon Sep 17 00:00:00 2001 From: IshitaTakeshi Date: Wed, 28 Jun 2023 20:21:55 +0900 Subject: [PATCH 71/78] Add explanation of the StereoPoint constructor --- gtsam/geometry/StereoPoint2.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gtsam/geometry/StereoPoint2.h b/gtsam/geometry/StereoPoint2.h index bd335dfbb..4383e212e 100644 --- a/gtsam/geometry/StereoPoint2.h +++ b/gtsam/geometry/StereoPoint2.h @@ -46,7 +46,9 @@ public: uL_(0), uR_(0), v_(0) { } - /** constructor */ + /** uL and uR represent the x-axis value of left and right frame coordinates respectively. + v represents the y coordinate value. The y-axis value should be the same under the + stereo constraint. */ StereoPoint2(double uL, double uR, double v) : uL_(uL), uR_(uR), v_(v) { } From c95bcae93a6353fd241b55d6c885c1e552bce148 Mon Sep 17 00:00:00 2001 From: Tal Regev Date: Sat, 1 Jul 2023 01:40:55 +0300 Subject: [PATCH 72/78] Add non concurrency to all workflows --- .github/workflows/build-linux.yml | 6 ++++++ .github/workflows/build-macos.yml | 6 ++++++ .github/workflows/build-python.yml | 6 ++++++ .github/workflows/build-special.yml | 6 ++++++ .github/workflows/build-windows.yml | 6 ++++++ 5 files changed, 30 insertions(+) diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 129b5aaaf..e4937ce06 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -2,6 +2,12 @@ name: Linux CI on: [pull_request] +# Every time you make a push to your PR, it cancel immediately the previous checks, +# and start a new one. The other runner will be available more quickly to your PR. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: build: name: ${{ matrix.name }} ${{ matrix.build_type }} diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 3fa3c15dd..541701836 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -2,6 +2,12 @@ name: macOS CI on: [pull_request] +# Every time you make a push to your PR, it cancel immediately the previous checks, +# and start a new one. The other runner will be available more quickly to your PR. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: build: name: ${{ matrix.name }} ${{ matrix.build_type }} diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index f09d589dc..ca4645a77 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -2,6 +2,12 @@ name: Python CI on: [pull_request] +# Every time you make a push to your PR, it cancel immediately the previous checks, +# and start a new one. The other runner will be available more quickly to your PR. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: build: name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }} diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml index f72dadbae..72466ffd6 100644 --- a/.github/workflows/build-special.yml +++ b/.github/workflows/build-special.yml @@ -2,6 +2,12 @@ name: Special Cases CI on: [pull_request] +# Every time you make a push to your PR, it cancel immediately the previous checks, +# and start a new one. The other runner will be available more quickly to your PR. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: build: name: ${{ matrix.name }} ${{ matrix.build_type }} diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml index 0434577c1..cbe0c10f1 100644 --- a/.github/workflows/build-windows.yml +++ b/.github/workflows/build-windows.yml @@ -2,6 +2,12 @@ name: Windows CI on: [pull_request] +# Every time you make a push to your PR, it cancel immediately the previous checks, +# and start a new one. The other runner will be available more quickly to your PR. +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: build: name: ${{ matrix.name }} ${{ matrix.build_type }} From d87db0a2f139384921ecea435d24bbdd308bedf2 Mon Sep 17 00:00:00 2001 From: Fan Jiang Date: Sat, 1 Jul 2023 13:57:11 -0700 Subject: [PATCH 73/78] Fix the CI under normal circumstances Signed-off-by: Fan Jiang --- cmake/HandleGeneralOptions.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/HandleGeneralOptions.cmake b/cmake/HandleGeneralOptions.cmake index eab40d16b..4474aa13f 100644 --- a/cmake/HandleGeneralOptions.cmake +++ b/cmake/HandleGeneralOptions.cmake @@ -30,7 +30,7 @@ option(GTSAM_ALLOW_DEPRECATED_SINCE_V43 "Allow use of methods/functions depr option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF) -option(GTSAM_SLOW_BUT_CORRECT_EXPMAP "Use slower but correct expmap" ON) +option(GTSAM_SLOW_BUT_CORRECT_EXPMAP "Use slower but correct expmap for Pose2" OFF) if (GTSAM_FORCE_SHARED_LIB) message(STATUS "GTSAM is a shared library due to GTSAM_FORCE_SHARED_LIB") From d3bcfddfb5269cff43e5fcbc5ad988e8e3c8758c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 30 Jan 2020 15:17:04 -0500 Subject: [PATCH 74/78] ISAM2: wrap full update method and error method --- gtsam/nonlinear/nonlinear.i | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index 6d101af98..a24bcbb75 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -507,18 +507,18 @@ class ISAM2 { gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::FactorIndices& removeFactorIndices, - gtsam::KeyGroupMap& constrainedKeys, + const gtsam::KeyGroupMap& constrainedKeys, const gtsam::KeyList& noRelinKeys); gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::FactorIndices& removeFactorIndices, - gtsam::KeyGroupMap& constrainedKeys, + const gtsam::KeyGroupMap& constrainedKeys, const gtsam::KeyList& noRelinKeys, const gtsam::KeyList& extraReelimKeys); gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::FactorIndices& removeFactorIndices, - gtsam::KeyGroupMap& constrainedKeys, + const gtsam::KeyGroupMap& constrainedKeys, const gtsam::KeyList& noRelinKeys, const gtsam::KeyList& extraReelimKeys, bool force_relinearize); @@ -527,6 +527,8 @@ class ISAM2 { const gtsam::Values& newTheta, const gtsam::ISAM2UpdateParams& updateParams); + double error(const gtsam::VectorValues& values) const; + gtsam::Values getLinearizationPoint() const; bool valueExists(gtsam::Key key) const; gtsam::Values calculateEstimate() const; From 6992f0d64cafbedea3ec12580bf0c9094ffa55f4 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 3 Feb 2020 16:50:32 -0500 Subject: [PATCH 75/78] added test for full ISAM2 update method --- python/gtsam/tests/test_VisualISAMExample.py | 52 +++++++++++++++++++- python/gtsam/utils/visual_isam.py | 4 +- 2 files changed, 52 insertions(+), 4 deletions(-) diff --git a/python/gtsam/tests/test_VisualISAMExample.py b/python/gtsam/tests/test_VisualISAMExample.py index ebc77e2e3..a25019213 100644 --- a/python/gtsam/tests/test_VisualISAMExample.py +++ b/python/gtsam/tests/test_VisualISAMExample.py @@ -6,15 +6,18 @@ All Rights Reserved See LICENSE for the license information visual_isam unit tests. -Author: Frank Dellaert & Duy Nguyen Ta (Python) +Author: Frank Dellaert & Duy Nguyen Ta & Varun Agrawal (Python) """ +# pylint: disable=maybe-no-member,invalid-name + import unittest import gtsam.utils.visual_data_generator as generator import gtsam.utils.visual_isam as visual_isam -from gtsam import symbol from gtsam.utils.test_case import GtsamTestCase +from gtsam import symbol + class TestVisualISAMExample(GtsamTestCase): """Test class for ISAM2 with visual landmarks.""" @@ -53,6 +56,51 @@ class TestVisualISAMExample(GtsamTestCase): point_j = result.atPoint3(symbol('l', j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) + def test_isam2_update(self): + """ + Test for full version of ISAM2::update method + """ + # Data Options + options = generator.Options() + options.triangle = False + options.nrCameras = 20 + + # iSAM Options + isamOptions = visual_isam.Options() + isamOptions.hardConstraint = False + isamOptions.pointPriors = False + isamOptions.batchInitialization = True + isamOptions.reorderInterval = 10 + isamOptions.alwaysRelinearize = False + + # Generate data + data, truth = generator.generate_data(options) + + # Initialize iSAM with the first pose and points + isam, result, nextPose = visual_isam.initialize(data, truth, isamOptions) + + remove_factor_indices = gtsam.FactorIndices() + constrained_keys = gtsam.KeyGroupMap() + no_relin_keys = gtsam.KeyList() + extra_reelim_keys = gtsam.KeyList() + isamArgs = (remove_factor_indices, + constrained_keys, + no_relin_keys, + extra_reelim_keys, + False) + + # Main loop for iSAM: stepping through all poses + for currentPose in range(nextPose, options.nrCameras): + isam, result = visual_isam.step(data, isam, result, truth, currentPose, isamArgs) + + for i in range(len(truth.cameras)): + pose_i = result.atPose3(symbol(ord('x'), i)) + self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) + + for j in range(len(truth.points)): + point_j = result.atPoint3(symbol(ord('l'), j)) + self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) + if __name__ == "__main__": unittest.main() diff --git a/python/gtsam/utils/visual_isam.py b/python/gtsam/utils/visual_isam.py index 4ebd8accd..52ec8e32f 100644 --- a/python/gtsam/utils/visual_isam.py +++ b/python/gtsam/utils/visual_isam.py @@ -79,7 +79,7 @@ def initialize(data, truth, options): return isam, result, nextPoseIndex -def step(data, isam, result, truth, currPoseIndex): +def step(data, isam, result, truth, currPoseIndex, isamArgs=()): ''' Do one step isam update @param[in] data: measurement data (odometry and visual measurements and their noiseModels) @@ -123,7 +123,7 @@ def step(data, isam, result, truth, currPoseIndex): # Update ISAM # figure(1)tic - isam.update(newFactors, initialEstimates) + isam.update(newFactors, initialEstimates, *isamArgs) # t=toc plot(frame_i,t,'r.') tic newResult = isam.calculateEstimate() # t=toc plot(frame_i,t,'g.') From 42e8f498e7adaac369e5f9ad21305f37d123094d Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 4 Feb 2020 17:57:44 -0500 Subject: [PATCH 76/78] added tests for newly wrapped isam2 methods --- python/gtsam/tests/test_VisualISAMExample.py | 95 ++++++++++++-------- 1 file changed, 56 insertions(+), 39 deletions(-) diff --git a/python/gtsam/tests/test_VisualISAMExample.py b/python/gtsam/tests/test_VisualISAMExample.py index a25019213..5f30e83ea 100644 --- a/python/gtsam/tests/test_VisualISAMExample.py +++ b/python/gtsam/tests/test_VisualISAMExample.py @@ -21,12 +21,13 @@ from gtsam import symbol class TestVisualISAMExample(GtsamTestCase): """Test class for ISAM2 with visual landmarks.""" - def test_VisualISAMExample(self): - """Test to see if ISAM works as expected for a simple visual SLAM example.""" + + def setUp(self): # Data Options options = generator.Options() options.triangle = False options.nrCameras = 20 + self.options = options # iSAM Options isamOptions = visual_isam.Options() @@ -35,71 +36,87 @@ class TestVisualISAMExample(GtsamTestCase): isamOptions.batchInitialization = True isamOptions.reorderInterval = 10 isamOptions.alwaysRelinearize = False + self.isamOptions = isamOptions # Generate data - data, truth = generator.generate_data(options) + self.data, self.truth = generator.generate_data(options) + def test_VisualISAMExample(self): + """Test to see if ISAM works as expected for a simple visual SLAM example.""" # Initialize iSAM with the first pose and points isam, result, nextPose = visual_isam.initialize( - data, truth, isamOptions) + self.data, self.truth, self.isamOptions) # Main loop for iSAM: stepping through all poses - for currentPose in range(nextPose, options.nrCameras): - isam, result = visual_isam.step(data, isam, result, truth, - currentPose) + for currentPose in range(nextPose, self.options.nrCameras): + isam, result = visual_isam.step(self.data, isam, result, + self.truth, currentPose) - for i, _ in enumerate(truth.cameras): + for i, true_camera in enumerate(self.truth.cameras): pose_i = result.atPose3(symbol('x', i)) - self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) + self.gtsamAssertEquals(pose_i, true_camera.pose(), 1e-5) - for j, _ in enumerate(truth.points): + for j, expected_point in enumerate(self.truth.points): point_j = result.atPoint3(symbol('l', j)) - self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) + self.gtsamAssertEquals(point_j, expected_point, 1e-5) + + @unittest.skip( + "Need to understand how to calculate error using VectorValues correctly" + ) + def test_isam2_error(self): + #TODO(Varun) fix + # Initialize iSAM with the first pose and points + isam, result, nextPose = visual_isam.initialize( + self.data, self.truth, self.isamOptions) + + # Main loop for iSAM: stepping through all poses + for currentPose in range(nextPose, self.options.nrCameras): + isam, result = visual_isam.step(self.data, isam, result, + self.truth, currentPose) + + values = gtsam.VectorValues() + + estimate = isam.calculateBestEstimate() + + keys = estimate.keys() + + for k in range(keys.size()): + key = keys.at(k) + try: + v = estimate.atPose3(key).matrix() + + except RuntimeError: + v = estimate.atPoint3(key).vector() + values.insert(key, v) + # print(isam.error(values)) def test_isam2_update(self): """ Test for full version of ISAM2::update method """ - # Data Options - options = generator.Options() - options.triangle = False - options.nrCameras = 20 - - # iSAM Options - isamOptions = visual_isam.Options() - isamOptions.hardConstraint = False - isamOptions.pointPriors = False - isamOptions.batchInitialization = True - isamOptions.reorderInterval = 10 - isamOptions.alwaysRelinearize = False - - # Generate data - data, truth = generator.generate_data(options) - # Initialize iSAM with the first pose and points - isam, result, nextPose = visual_isam.initialize(data, truth, isamOptions) + isam, result, nextPose = visual_isam.initialize( + self.data, self.truth, self.isamOptions) remove_factor_indices = gtsam.FactorIndices() constrained_keys = gtsam.KeyGroupMap() no_relin_keys = gtsam.KeyList() extra_reelim_keys = gtsam.KeyList() - isamArgs = (remove_factor_indices, - constrained_keys, - no_relin_keys, - extra_reelim_keys, - False) + isamArgs = (remove_factor_indices, constrained_keys, no_relin_keys, + extra_reelim_keys, False) # Main loop for iSAM: stepping through all poses - for currentPose in range(nextPose, options.nrCameras): - isam, result = visual_isam.step(data, isam, result, truth, currentPose, isamArgs) + for currentPose in range(nextPose, self.options.nrCameras): + isam, result = visual_isam.step(self.data, isam, result, + self.truth, currentPose, isamArgs) - for i in range(len(truth.cameras)): + for i in range(len(self.truth.cameras)): pose_i = result.atPose3(symbol(ord('x'), i)) - self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) + self.gtsamAssertEquals(pose_i, self.truth.cameras[i].pose(), 1e-5) - for j in range(len(truth.points)): + for j in range(len(self.truth.points)): point_j = result.atPoint3(symbol(ord('l'), j)) - self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) + self.gtsamAssertEquals(point_j, self.truth.points[j], 1e-5) if __name__ == "__main__": From 93ed850c6c01b7c5f66e6db291f00769b46ad95b Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 5 Jul 2023 13:14:16 -0400 Subject: [PATCH 77/78] get tests working --- gtsam/nonlinear/nonlinear.i | 10 ++----- python/gtsam/tests/test_VisualISAMExample.py | 29 ++++++++++---------- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index a24bcbb75..3721e27e8 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -512,16 +512,10 @@ class ISAM2 { gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::FactorIndices& removeFactorIndices, - const gtsam::KeyGroupMap& constrainedKeys, - const gtsam::KeyList& noRelinKeys, - const gtsam::KeyList& extraReelimKeys); - gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, - const gtsam::Values& newTheta, - const gtsam::FactorIndices& removeFactorIndices, - const gtsam::KeyGroupMap& constrainedKeys, + gtsam::KeyGroupMap& constrainedKeys, const gtsam::KeyList& noRelinKeys, const gtsam::KeyList& extraReelimKeys, - bool force_relinearize); + bool force_relinearize=false); gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, diff --git a/python/gtsam/tests/test_VisualISAMExample.py b/python/gtsam/tests/test_VisualISAMExample.py index 5f30e83ea..c7e72ff8b 100644 --- a/python/gtsam/tests/test_VisualISAMExample.py +++ b/python/gtsam/tests/test_VisualISAMExample.py @@ -16,6 +16,7 @@ import gtsam.utils.visual_data_generator as generator import gtsam.utils.visual_isam as visual_isam from gtsam.utils.test_case import GtsamTestCase +import gtsam from gtsam import symbol @@ -60,10 +61,8 @@ class TestVisualISAMExample(GtsamTestCase): point_j = result.atPoint3(symbol('l', j)) self.gtsamAssertEquals(point_j, expected_point, 1e-5) - @unittest.skip( - "Need to understand how to calculate error using VectorValues correctly" - ) def test_isam2_error(self): + """Test for isam2 error() method.""" #TODO(Varun) fix # Initialize iSAM with the first pose and points isam, result, nextPose = visual_isam.initialize( @@ -78,17 +77,19 @@ class TestVisualISAMExample(GtsamTestCase): estimate = isam.calculateBestEstimate() - keys = estimate.keys() - - for k in range(keys.size()): - key = keys.at(k) + for key in estimate.keys(): try: - v = estimate.atPose3(key).matrix() - + v = gtsam.Pose3.Logmap(estimate.atPose3(key)) except RuntimeError: - v = estimate.atPoint3(key).vector() + v = estimate.atPoint3(key) + + print(key) + print(type(v)) + print(v) values.insert(key, v) - # print(isam.error(values)) + print(isam.error(values)) + + self.assertEqual(isam.error(values), 34212421.14731998) def test_isam2_update(self): """ @@ -98,7 +99,7 @@ class TestVisualISAMExample(GtsamTestCase): isam, result, nextPose = visual_isam.initialize( self.data, self.truth, self.isamOptions) - remove_factor_indices = gtsam.FactorIndices() + remove_factor_indices = [] constrained_keys = gtsam.KeyGroupMap() no_relin_keys = gtsam.KeyList() extra_reelim_keys = gtsam.KeyList() @@ -111,11 +112,11 @@ class TestVisualISAMExample(GtsamTestCase): self.truth, currentPose, isamArgs) for i in range(len(self.truth.cameras)): - pose_i = result.atPose3(symbol(ord('x'), i)) + pose_i = result.atPose3(symbol('x', i)) self.gtsamAssertEquals(pose_i, self.truth.cameras[i].pose(), 1e-5) for j in range(len(self.truth.points)): - point_j = result.atPoint3(symbol(ord('l'), j)) + point_j = result.atPoint3(symbol('l', j)) self.gtsamAssertEquals(point_j, self.truth.points[j], 1e-5) From a14fb8db7d55da85e3d5f860572cae7bab7ee20f Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 6 Jul 2023 04:12:39 -0400 Subject: [PATCH 78/78] formatting and fix --- gtsam/nonlinear/nonlinear.i | 7 +++---- python/gtsam/tests/test_VisualISAMExample.py | 7 +------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index 3721e27e8..4d0d847c1 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -515,7 +515,7 @@ class ISAM2 { gtsam::KeyGroupMap& constrainedKeys, const gtsam::KeyList& noRelinKeys, const gtsam::KeyList& extraReelimKeys, - bool force_relinearize=false); + bool force_relinearize = false); gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, @@ -548,9 +548,8 @@ class ISAM2 { string dot(const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; - void saveGraph(string s, - const gtsam::KeyFormatter& keyFormatter = - gtsam::DefaultKeyFormatter) const; + void saveGraph(string s, const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; }; #include diff --git a/python/gtsam/tests/test_VisualISAMExample.py b/python/gtsam/tests/test_VisualISAMExample.py index c7e72ff8b..39f563a35 100644 --- a/python/gtsam/tests/test_VisualISAMExample.py +++ b/python/gtsam/tests/test_VisualISAMExample.py @@ -63,7 +63,6 @@ class TestVisualISAMExample(GtsamTestCase): def test_isam2_error(self): """Test for isam2 error() method.""" - #TODO(Varun) fix # Initialize iSAM with the first pose and points isam, result, nextPose = visual_isam.initialize( self.data, self.truth, self.isamOptions) @@ -83,13 +82,9 @@ class TestVisualISAMExample(GtsamTestCase): except RuntimeError: v = estimate.atPoint3(key) - print(key) - print(type(v)) - print(v) values.insert(key, v) - print(isam.error(values)) - self.assertEqual(isam.error(values), 34212421.14731998) + self.assertAlmostEqual(isam.error(values), 34212421.14731998) def test_isam2_update(self): """