diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index 3fc2d662f..f42939bc2 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -112,6 +112,11 @@ jobs: run: | echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV echo "GTSAM Uses TBB" + - name: Set Swap Space + if: runner.os == 'Linux' + uses: pierotofy/set-swap-space@master + with: + swap-size-gb: 6 - name: Build (Linux) if: runner.os == 'Linux' run: | diff --git a/.gitignore b/.gitignore index 0e34eed34..e3f7613fe 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,4 @@ CMakeLists.txt.user* xcode/ /Dockerfile +/python/gtsam/notebooks/.ipynb_checkpoints/ellipses-checkpoint.ipynb diff --git a/CMakeLists.txt b/CMakeLists.txt index cfb251663..74433f333 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,7 +10,7 @@ endif() set (GTSAM_VERSION_MAJOR 4) set (GTSAM_VERSION_MINOR 2) set (GTSAM_VERSION_PATCH 0) -set (GTSAM_PRERELEASE_VERSION "a6") +set (GTSAM_PRERELEASE_VERSION "a7") math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}") if (${GTSAM_VERSION_PATCH} EQUAL 0) diff --git a/cmake/HandlePython.cmake b/cmake/HandlePython.cmake index 0c24824bc..16215986b 100644 --- a/cmake/HandlePython.cmake +++ b/cmake/HandlePython.cmake @@ -16,6 +16,7 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX) set(Python_VERSION_MAJOR ${PYTHON_VERSION_MAJOR}) set(Python_VERSION_MINOR ${PYTHON_VERSION_MINOR}) + set(Python_VERSION_PATCH ${PYTHON_VERSION_PATCH}) set(Python_EXECUTABLE ${PYTHON_EXECUTABLE}) else() @@ -31,11 +32,12 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX) set(Python_VERSION_MAJOR ${Python3_VERSION_MAJOR}) set(Python_VERSION_MINOR ${Python3_VERSION_MINOR}) + set(Python_VERSION_PATCH ${Python3_VERSION_PATCH}) endif() set(GTSAM_PYTHON_VERSION - "${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}" + "${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}.${Python_VERSION_PATCH}" CACHE STRING "The version of Python to build the wrappers against." FORCE) diff --git a/examples/TriangulationLOSTExample.cpp b/examples/TriangulationLOSTExample.cpp new file mode 100644 index 000000000..a862026e0 --- /dev/null +++ b/examples/TriangulationLOSTExample.cpp @@ -0,0 +1,159 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file TriangulationLOSTExample.cpp + * @author Akshay Krishnan + * @brief This example runs triangulation several times using 3 different + * approaches: LOST, DLT, and DLT with optimization. It reports the covariance + * and the runtime for each approach. + * + * @date 2022-07-10 + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +using namespace std; +using namespace gtsam; + +static std::mt19937 rng(42); + +void PrintCovarianceStats(const Matrix& mat, const std::string& method) { + Matrix centered = mat.rowwise() - mat.colwise().mean(); + Matrix cov = (centered.adjoint() * centered) / double(mat.rows() - 1); + std::cout << method << " covariance: " << std::endl; + std::cout << cov << std::endl; + std::cout << "Trace sqrt: " << sqrt(cov.trace()) << std::endl << std::endl; +} + +void PrintDuration(const std::chrono::nanoseconds dur, double num_samples, + const std::string& method) { + double nanoseconds = dur.count() / num_samples; + std::cout << "Time taken by " << method << ": " << nanoseconds * 1e-3 + << std::endl; +} + +void GetLargeCamerasDataset(CameraSet>* cameras, + std::vector* poses, Point3* point, + Point2Vector* measurements) { + const double minXY = -10, maxXY = 10; + const double minZ = -20, maxZ = 0; + const int nrCameras = 500; + cameras->reserve(nrCameras); + poses->reserve(nrCameras); + measurements->reserve(nrCameras); + *point = Point3(0.0, 0.0, 10.0); + + std::uniform_real_distribution rand_xy(minXY, maxXY); + std::uniform_real_distribution rand_z(minZ, maxZ); + Cal3_S2 identityK; + + for (int i = 0; i < nrCameras; ++i) { + Point3 wti(rand_xy(rng), rand_xy(rng), rand_z(rng)); + Pose3 wTi(Rot3(), wti); + + poses->push_back(wTi); + cameras->emplace_back(wTi, identityK); + measurements->push_back(cameras->back().project(*point)); + } +} + +void GetSmallCamerasDataset(CameraSet>* cameras, + std::vector* poses, Point3* point, + Point2Vector* measurements) { + Pose3 pose1; + Pose3 pose2(Rot3(), Point3(5., 0., -5.)); + Cal3_S2 identityK; + PinholeCamera camera1(pose1, identityK); + PinholeCamera camera2(pose2, identityK); + + *point = Point3(0, 0, 1); + cameras->push_back(camera1); + cameras->push_back(camera2); + *poses = {pose1, pose2}; + *measurements = {camera1.project(*point), camera2.project(*point)}; +} + +Point2Vector AddNoiseToMeasurements(const Point2Vector& measurements, + const double measurementSigma) { + std::normal_distribution normal(0.0, measurementSigma); + + Point2Vector noisyMeasurements; + noisyMeasurements.reserve(measurements.size()); + for (const auto& p : measurements) { + noisyMeasurements.emplace_back(p.x() + normal(rng), p.y() + normal(rng)); + } + return noisyMeasurements; +} + +/* ************************************************************************* */ +int main(int argc, char* argv[]) { + CameraSet> cameras; + std::vector poses; + Point3 landmark; + Point2Vector measurements; + GetLargeCamerasDataset(&cameras, &poses, &landmark, &measurements); + // GetSmallCamerasDataset(&cameras, &poses, &landmark, &measurements); + const double measurementSigma = 1e-2; + SharedNoiseModel measurementNoise = + noiseModel::Isotropic::Sigma(2, measurementSigma); + + const long int nrTrials = 1000; + Matrix errorsDLT = Matrix::Zero(nrTrials, 3); + Matrix errorsLOST = Matrix::Zero(nrTrials, 3); + Matrix errorsDLTOpt = Matrix::Zero(nrTrials, 3); + + double rank_tol = 1e-9; + boost::shared_ptr calib = boost::make_shared(); + std::chrono::nanoseconds durationDLT; + std::chrono::nanoseconds durationDLTOpt; + std::chrono::nanoseconds durationLOST; + + for (int i = 0; i < nrTrials; i++) { + Point2Vector noisyMeasurements = + AddNoiseToMeasurements(measurements, measurementSigma); + + auto lostStart = std::chrono::high_resolution_clock::now(); + boost::optional estimateLOST = triangulatePoint3( + cameras, noisyMeasurements, rank_tol, false, measurementNoise, true); + durationLOST += std::chrono::high_resolution_clock::now() - lostStart; + + auto dltStart = std::chrono::high_resolution_clock::now(); + boost::optional estimateDLT = triangulatePoint3( + cameras, noisyMeasurements, rank_tol, false, measurementNoise, false); + durationDLT += std::chrono::high_resolution_clock::now() - dltStart; + + auto dltOptStart = std::chrono::high_resolution_clock::now(); + boost::optional estimateDLTOpt = triangulatePoint3( + cameras, noisyMeasurements, rank_tol, true, measurementNoise, false); + durationDLTOpt += std::chrono::high_resolution_clock::now() - dltOptStart; + + errorsLOST.row(i) = *estimateLOST - landmark; + errorsDLT.row(i) = *estimateDLT - landmark; + errorsDLTOpt.row(i) = *estimateDLTOpt - landmark; + } + PrintCovarianceStats(errorsLOST, "LOST"); + PrintCovarianceStats(errorsDLT, "DLT"); + PrintCovarianceStats(errorsDLTOpt, "DLT_OPT"); + + PrintDuration(durationLOST, nrTrials, "LOST"); + PrintDuration(durationDLT, nrTrials, "DLT"); + PrintDuration(durationDLTOpt, nrTrials, "DLT_OPT"); +} diff --git a/gtsam/CMakeLists.txt b/gtsam/CMakeLists.txt index a293c6ec2..09f1ea806 100644 --- a/gtsam/CMakeLists.txt +++ b/gtsam/CMakeLists.txt @@ -10,6 +10,7 @@ set (gtsam_subdirs inference symbolic discrete + hybrid linear nonlinear sam diff --git a/gtsam/base/ConcurrentMap.h b/gtsam/base/ConcurrentMap.h index 2d7cbd6db..cf4537d4c 100644 --- a/gtsam/base/ConcurrentMap.h +++ b/gtsam/base/ConcurrentMap.h @@ -113,6 +113,7 @@ private: template void load(Archive& ar, const unsigned int /*version*/) { + this->clear(); // Load into STL container and then fill our map FastVector > map; ar & BOOST_SERIALIZATION_NVP(map); diff --git a/gtsam/base/tests/testSerializationBase.cpp b/gtsam/base/tests/testSerializationBase.cpp index f7aa97b31..d2f4d5f51 100644 --- a/gtsam/base/tests/testSerializationBase.cpp +++ b/gtsam/base/tests/testSerializationBase.cpp @@ -18,6 +18,7 @@ #include +#include #include #include #include @@ -106,6 +107,39 @@ TEST (Serialization, matrix_vector) { EXPECT(equalityBinary((Matrix(2, 2) << 1.0, 2.0, 3.0, 4.0).finished())); } +/* ************************************************************************* */ +TEST (Serialization, ConcurrentMap) { + + ConcurrentMap map; + + map.insert(make_pair(1, "apple")); + map.insert(make_pair(2, "banana")); + + std::string binaryPath = "saved_map.dat"; + try { + std::ofstream outputStream(binaryPath); + boost::archive::binary_oarchive outputArchive(outputStream); + outputArchive << map; + } catch(...) { + EXPECT(false); + } + + // Verify that the existing map contents are replaced by the archive data + ConcurrentMap mapFromDisk; + mapFromDisk.insert(make_pair(3, "clam")); + EXPECT(mapFromDisk.exists(3)); + try { + std::ifstream ifs(binaryPath); + boost::archive::binary_iarchive inputArchive(ifs); + inputArchive >> mapFromDisk; + } catch(...) { + EXPECT(false); + } + EXPECT(mapFromDisk.exists(1)); + EXPECT(mapFromDisk.exists(2)); + EXPECT(!mapFromDisk.exists(3)); +} + /* ************************************************************************* */ diff --git a/gtsam/basis/Chebyshev2.h b/gtsam/basis/Chebyshev2.h index e306c93d5..4c3542d56 100644 --- a/gtsam/basis/Chebyshev2.h +++ b/gtsam/basis/Chebyshev2.h @@ -36,8 +36,6 @@ #include #include -#include - namespace gtsam { /** @@ -134,7 +132,7 @@ class GTSAM_EXPORT Chebyshev2 : public Basis { * Create matrix of values at Chebyshev points given vector-valued function. */ template - static Matrix matrix(boost::function(double)> f, + static Matrix matrix(std::function(double)> f, size_t N, double a = -1, double b = 1) { Matrix Xmat(M, N); for (size_t j = 0; j < N; j++) { diff --git a/gtsam/basis/tests/testChebyshev2.cpp b/gtsam/basis/tests/testChebyshev2.cpp index 9090757f4..73339e0f1 100644 --- a/gtsam/basis/tests/testChebyshev2.cpp +++ b/gtsam/basis/tests/testChebyshev2.cpp @@ -118,7 +118,7 @@ TEST(Chebyshev2, InterpolateVector) { EXPECT(assert_equal(expected, fx(X, actualH), 1e-9)); // Check derivative - boost::function)> f = boost::bind( + std::function)> f = boost::bind( &Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, _1, boost::none); Matrix numericalH = numericalDerivative11, 2 * N>(f, X); diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 99f29b8e5..f72743206 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -39,10 +39,10 @@ #include #include -using boost::assign::operator+=; - namespace gtsam { + using boost::assign::operator+=; + /****************************************************************************/ // Node /****************************************************************************/ @@ -291,10 +291,7 @@ namespace gtsam { } os << "\"" << this->id() << "\" -> \"" << branch->id() << "\""; - if (B == 2) { - if (i == 0) os << " [style=dashed]"; - if (i > 1) os << " [style=bold]"; - } + if (B == 2 && i == 0) os << " [style=dashed]"; os << std::endl; branch->dot(os, labelFormatter, valueFormatter, showZero); } diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index 1f45d320b..dede2a2f0 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -22,7 +22,7 @@ #include #include -#include +#include #include #include #include @@ -220,7 +220,7 @@ namespace gtsam { /// @{ /// Make virtual - virtual ~DecisionTree() {} + virtual ~DecisionTree() = default; /// Check if tree is empty. bool empty() const { return !root_; } diff --git a/gtsam/discrete/DiscreteFactorGraph.h b/gtsam/discrete/DiscreteFactorGraph.h index f962b1802..bfa57e7db 100644 --- a/gtsam/discrete/DiscreteFactorGraph.h +++ b/gtsam/discrete/DiscreteFactorGraph.h @@ -209,6 +209,10 @@ class GTSAM_EXPORT DiscreteFactorGraph /// @} }; // \ DiscreteFactorGraph +std::pair // +EliminateForMPE(const DiscreteFactorGraph& factors, + const Ordering& frontalKeys); + /// traits template <> struct traits : public Testable {}; diff --git a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp index 0a7d869ec..3d9621aff 100644 --- a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp +++ b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp @@ -415,16 +415,16 @@ TEST(DiscreteFactorGraph, DotWithNames) { "graph {\n" " size=\"5,5\";\n" "\n" - " varC[label=\"C\"];\n" - " varA[label=\"A\"];\n" - " varB[label=\"B\"];\n" + " var0[label=\"C\"];\n" + " var1[label=\"A\"];\n" + " var2[label=\"B\"];\n" "\n" " factor0[label=\"\", shape=point];\n" - " varC--factor0;\n" - " varA--factor0;\n" + " var0--factor0;\n" + " var1--factor0;\n" " factor1[label=\"\", shape=point];\n" - " varC--factor1;\n" - " varB--factor1;\n" + " var0--factor1;\n" + " var2--factor1;\n" "}\n"; EXPECT(actual == expected); } diff --git a/gtsam/geometry/Point3.h b/gtsam/geometry/Point3.h index 835c53d7c..13a435c24 100644 --- a/gtsam/geometry/Point3.h +++ b/gtsam/geometry/Point3.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -33,6 +34,7 @@ namespace gtsam { /// As of GTSAM 4, in order to make GTSAM more lean, /// it is now possible to just typedef Point3 to Vector3 typedef Vector3 Point3; +typedef std::vector > Point3Vector; // Convenience typedef using Point3Pair = std::pair; diff --git a/gtsam/geometry/Pose3.cpp b/gtsam/geometry/Pose3.cpp index 2da51a625..2938e90f5 100644 --- a/gtsam/geometry/Pose3.cpp +++ b/gtsam/geometry/Pose3.cpp @@ -489,6 +489,11 @@ boost::optional align(const Point3Pairs &baPointPairs) { } #endif +/* ************************************************************************* */ +Pose3 Pose3::slerp(double t, const Pose3& other, OptionalJacobian<6, 6> Hx, OptionalJacobian<6, 6> Hy) const { + return interpolate(*this, other, t, Hx, Hy); +} + /* ************************************************************************* */ std::ostream &operator<<(std::ostream &os, const Pose3& pose) { // Both Rot3 and Point3 have ostream definitions so we use them. diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index c36047349..33fb55250 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -379,6 +379,14 @@ public: return std::make_pair(0, 2); } + /** + * @brief Spherical Linear interpolation between *this and other + * @param s a value between 0 and 1.5 + * @param other final point of interpolation geodesic on manifold + */ + Pose3 slerp(double t, const Pose3& other, OptionalJacobian<6, 6> Hx = boost::none, + OptionalJacobian<6, 6> Hy = boost::none) const; + /// Output stream operator GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Pose3& p); diff --git a/gtsam/geometry/Rot3.cpp b/gtsam/geometry/Rot3.cpp index 8f8088a74..6db5e1919 100644 --- a/gtsam/geometry/Rot3.cpp +++ b/gtsam/geometry/Rot3.cpp @@ -228,6 +228,7 @@ double Rot3::yaw(OptionalJacobian<1, 3> H) const { } /* ************************************************************************* */ +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 Vector Rot3::quaternion() const { gtsam::Quaternion q = toQuaternion(); Vector v(4); @@ -237,6 +238,7 @@ Vector Rot3::quaternion() const { v(3) = q.z(); return v; } +#endif /* ************************************************************************* */ pair Rot3::axisAngle() const { @@ -292,8 +294,8 @@ pair RQ(const Matrix3& A, OptionalJacobian<3, 9> H) { (*H)(1, 8) = yHb22 * cx; // Next, calculate the derivate of z. We have - // c20 = a10 * cy + a11 * sx * sy + a12 * cx * sy - // c22 = a11 * cx - a12 * sx + // c10 = a10 * cy + a11 * sx * sy + a12 * cx * sy + // c11 = a11 * cx - a12 * sx const auto c10Hx = (A(1, 1) * cx - A(1, 2) * sx) * sy; const auto c10Hy = A(1, 2) * cx * cy + A(1, 1) * cy * sx - A(1, 0) * sy; Vector9 c10HA = c10Hx * H->row(0) + c10Hy * H->row(1); diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index 18bd88b52..01ca7778c 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -515,11 +515,16 @@ class GTSAM_EXPORT Rot3 : public LieGroup { */ gtsam::Quaternion toQuaternion() const; +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 /** * Converts to a generic matrix to allow for use with matlab * In format: w x y z + * @deprecated: use Rot3::toQuaternion() instead. + * If still using this API, remind that in the returned Vector `V`, + * `V.x()` denotes the actual `qw`, `V.y()` denotes 'qx', `V.z()` denotes `qy`, and `V.w()` denotes 'qz'. */ - Vector quaternion() const; + Vector GTSAM_DEPRECATED quaternion() const; +#endif /** * @brief Spherical Linear intERPolation between *this and other diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index 415aa0dc4..4ebff6c03 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -355,7 +355,7 @@ class Rot3 { double yaw() const; pair axisAngle() const; gtsam::Quaternion toQuaternion() const; - Vector quaternion() const; + // Vector quaternion() const; // @deprecated, see https://github.com/borglab/gtsam/pull/1219 gtsam::Rot3 slerp(double t, const gtsam::Rot3& other) const; // enabling serialization functionality @@ -446,24 +446,43 @@ class Pose3 { // Group static gtsam::Pose3 identity(); gtsam::Pose3 inverse() const; + gtsam::Pose3 inverse(Eigen::Ref H) const; gtsam::Pose3 compose(const gtsam::Pose3& pose) const; + gtsam::Pose3 compose(const gtsam::Pose3& pose, + Eigen::Ref H1, + Eigen::Ref H2) const; gtsam::Pose3 between(const gtsam::Pose3& pose) const; + gtsam::Pose3 between(const gtsam::Pose3& pose, + Eigen::Ref H1, + Eigen::Ref H2) const; + gtsam::Pose3 slerp(double t, const gtsam::Pose3& pose) const; + gtsam::Pose3 slerp(double t, const gtsam::Pose3& pose, + Eigen::Ref Hx, + Eigen::Ref Hy) const; // Operator Overloads gtsam::Pose3 operator*(const gtsam::Pose3& pose) const; // Manifold gtsam::Pose3 retract(Vector v) const; + gtsam::Pose3 retract(Vector v, Eigen::Ref Hxi) const; Vector localCoordinates(const gtsam::Pose3& pose) const; + Vector localCoordinates(const gtsam::Pose3& pose, Eigen::Ref Hxi) const; // Lie Group static gtsam::Pose3 Expmap(Vector v); + static gtsam::Pose3 Expmap(Vector v, Eigen::Ref Hxi); static Vector Logmap(const gtsam::Pose3& pose); + static Vector Logmap(const gtsam::Pose3& pose, Eigen::Ref Hpose); gtsam::Pose3 expmap(Vector v); Vector logmap(const gtsam::Pose3& pose); Matrix AdjointMap() const; Vector Adjoint(Vector xi) const; + Vector Adjoint(Vector xi, Eigen::Ref H_this, + Eigen::Ref H_xib) const; Vector AdjointTranspose(Vector xi) const; + Vector AdjointTranspose(Vector xi, Eigen::Ref H_this, + Eigen::Ref H_x) const; static Matrix adjointMap(Vector xi); static Vector adjoint(Vector xi, Vector y); static Matrix adjointMap_(Vector xi); @@ -476,7 +495,11 @@ class Pose3 { // Group Action on Point3 gtsam::Point3 transformFrom(const gtsam::Point3& point) const; + gtsam::Point3 transformFrom(const gtsam::Point3& point, Eigen::Ref Hself, + Eigen::Ref Hpoint) const; gtsam::Point3 transformTo(const gtsam::Point3& point) const; + gtsam::Point3 transformTo(const gtsam::Point3& point, Eigen::Ref Hself, + Eigen::Ref Hpoint) const; // Matrix versions Matrix transformFrom(const Matrix& points) const; @@ -484,15 +507,25 @@ class Pose3 { // Standard Interface gtsam::Rot3 rotation() const; + gtsam::Rot3 rotation(Eigen::Ref Hself) const; gtsam::Point3 translation() const; + gtsam::Point3 translation(Eigen::Ref Hself) const; double x() const; double y() const; double z() const; Matrix matrix() const; gtsam::Pose3 transformPoseFrom(const gtsam::Pose3& pose) const; + gtsam::Pose3 transformPoseFrom(const gtsam::Pose3& pose, Eigen::Ref Hself, + Eigen::Ref HaTb) const; gtsam::Pose3 transformPoseTo(const gtsam::Pose3& pose) const; + gtsam::Pose3 transformPoseTo(const gtsam::Pose3& pose, Eigen::Ref Hself, + Eigen::Ref HwTb) const; double range(const gtsam::Point3& point); + double range(const gtsam::Point3& point, Eigen::Ref Hself, + Eigen::Ref Hpoint); double range(const gtsam::Pose3& pose); + double range(const gtsam::Pose3& pose, Eigen::Ref Hself, + Eigen::Ref Hpose); // enabling serialization functionality void serialize() const; @@ -547,6 +580,12 @@ class EssentialMatrix { // Standard Constructors EssentialMatrix(const gtsam::Rot3& aRb, const gtsam::Unit3& aTb); + // Constructors from Pose3 + gtsam::EssentialMatrix FromPose3(const gtsam::Pose3& _1P2_); + + gtsam::EssentialMatrix FromPose3(const gtsam::Pose3& _1P2_, + Eigen::Ref H); + // Testable void print(string s = "") const; bool equals(const gtsam::EssentialMatrix& pose, double tol) const; @@ -867,7 +906,7 @@ template class PinholeCamera { // Standard Constructors and Named Constructors PinholeCamera(); - PinholeCamera(const gtsam::PinholeCamera other); + PinholeCamera(const This other); PinholeCamera(const gtsam::Pose3& pose); PinholeCamera(const gtsam::Pose3& pose, const CALIBRATION& K); static This Level(const CALIBRATION& K, const gtsam::Pose2& pose, @@ -904,6 +943,12 @@ class PinholeCamera { Eigen::Ref Dresult_dp, Eigen::Ref Dresult_ddepth, Eigen::Ref Dresult_dcal); + + gtsam::Point2 reprojectionError(const gtsam::Point3& pw, const gtsam::Point2& measured, + Eigen::Ref Dpose, + Eigen::Ref Dpoint, + Eigen::Ref Dcal); + double range(const gtsam::Point3& point); double range(const gtsam::Point3& point, Eigen::Ref Dcamera, Eigen::Ref Dpoint); @@ -914,7 +959,74 @@ class PinholeCamera { // enabling serialization functionality void serialize() const; }; - + +// Forward declaration of PinholeCameraCalX is defined here. +#include +// Some typedefs for common camera types +// PinholeCameraCal3_S2 is the same as SimpleCamera above +typedef gtsam::PinholeCamera PinholeCameraCal3_S2; +typedef gtsam::PinholeCamera PinholeCameraCal3DS2; +typedef gtsam::PinholeCamera PinholeCameraCal3Unified; +typedef gtsam::PinholeCamera PinholeCameraCal3Bundler; +typedef gtsam::PinholeCamera PinholeCameraCal3Fisheye; + +#include +template +class PinholePose { + // Standard Constructors and Named Constructors + PinholePose(); + PinholePose(const This other); + PinholePose(const gtsam::Pose3& pose); + PinholePose(const gtsam::Pose3& pose, const CALIBRATION* K); + static This Level(const gtsam::Pose2& pose, double height); + static This Lookat(const gtsam::Point3& eye, const gtsam::Point3& target, + const gtsam::Point3& upVector, const CALIBRATION* K); + + // Testable + void print(string s = "PinholePose") const; + bool equals(const This& camera, double tol) const; + + // Standard Interface + gtsam::Pose3 pose() const; + CALIBRATION calibration() const; + + // Manifold + This retract(Vector d) const; + Vector localCoordinates(const This& T2) const; + size_t dim() const; + static size_t Dim(); + + // Transformations and measurement functions + static gtsam::Point2 Project(const gtsam::Point3& cameraPoint); + pair projectSafe(const gtsam::Point3& pw) const; + gtsam::Point2 project(const gtsam::Point3& point); + gtsam::Point2 project(const gtsam::Point3& point, + Eigen::Ref Dpose, + Eigen::Ref Dpoint, + Eigen::Ref Dcal); + gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const; + gtsam::Point3 backproject(const gtsam::Point2& p, double depth, + Eigen::Ref Dresult_dpose, + Eigen::Ref Dresult_dp, + Eigen::Ref Dresult_ddepth, + Eigen::Ref Dresult_dcal); + double range(const gtsam::Point3& point); + double range(const gtsam::Point3& point, Eigen::Ref Dcamera, + Eigen::Ref Dpoint); + double range(const gtsam::Pose3& pose); + double range(const gtsam::Pose3& pose, Eigen::Ref Dcamera, + Eigen::Ref Dpose); + + // enabling serialization functionality + void serialize() const; +}; + +typedef gtsam::PinholePose PinholePoseCal3_S2; +typedef gtsam::PinholePose PinholePoseCal3DS2; +typedef gtsam::PinholePose PinholePoseCal3Unified; +typedef gtsam::PinholePose PinholePoseCal3Bundler; +typedef gtsam::PinholePose PinholePoseCal3Fisheye; + #include class Similarity2 { // Standard Constructors @@ -961,16 +1073,6 @@ class Similarity3 { double scale() const; }; -// Forward declaration of PinholeCameraCalX is defined here. -#include -// Some typedefs for common camera types -// PinholeCameraCal3_S2 is the same as SimpleCamera above -typedef gtsam::PinholeCamera PinholeCameraCal3_S2; -typedef gtsam::PinholeCamera PinholeCameraCal3DS2; -typedef gtsam::PinholeCamera PinholeCameraCal3Unified; -typedef gtsam::PinholeCamera PinholeCameraCal3Bundler; -typedef gtsam::PinholeCamera PinholeCameraCal3Fisheye; - template class CameraSet { CameraSet(); @@ -1019,58 +1121,154 @@ class StereoCamera { }; #include +class TriangulationResult { + enum Status { VALID, DEGENERATE, BEHIND_CAMERA, OUTLIER, FAR_POINT }; + Status status; + TriangulationResult(const gtsam::Point3& p); + const gtsam::Point3& get() const; + static TriangulationResult Degenerate(); + static TriangulationResult Outlier(); + static TriangulationResult FarPoint(); + static TriangulationResult BehindCamera(); + bool valid() const; + bool degenerate() const; + bool outlier() const; + bool farPoint() const; + bool behindCamera() const; +}; + +class TriangulationParameters { + double rankTolerance; + bool enableEPI; + double landmarkDistanceThreshold; + double dynamicOutlierRejectionThreshold; + SharedNoiseModel noiseModel; + TriangulationParameters(const double rankTolerance = 1.0, + const bool enableEPI = false, + double landmarkDistanceThreshold = -1, + double dynamicOutlierRejectionThreshold = -1, + const gtsam::SharedNoiseModel& noiseModel = nullptr); +}; // Templates appear not yet supported for free functions - issue raised at // borglab/wrap#14 to add support + +// Cal3_S2 versions gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, gtsam::Cal3_S2* sharedCal, const gtsam::Point2Vector& measurements, double rank_tol, bool optimize, const gtsam::SharedNoiseModel& model = nullptr); -gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, - gtsam::Cal3DS2* sharedCal, - const gtsam::Point2Vector& measurements, - double rank_tol, bool optimize, - const gtsam::SharedNoiseModel& model = nullptr); -gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, - gtsam::Cal3Bundler* sharedCal, - const gtsam::Point2Vector& measurements, - double rank_tol, bool optimize, - const gtsam::SharedNoiseModel& model = nullptr); gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3_S2& cameras, + const gtsam::Point2Vector& measurements, + double rank_tol, bool optimize, + const gtsam::SharedNoiseModel& model = nullptr, + const bool useLOST = false); +gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, + gtsam::Cal3_S2* sharedCal, + const gtsam::Point2Vector& measurements, + const gtsam::Point3& initialEstimate); +gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3_S2& cameras, + const gtsam::Point2Vector& measurements, + const gtsam::Point3& initialEstimate); +gtsam::TriangulationResult triangulateSafe( + const gtsam::CameraSetCal3_S2& cameras, + const gtsam::Point2Vector& measurements, + const gtsam::TriangulationParameters& params); + +// Cal3DS2 versions +gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, + gtsam::Cal3DS2* sharedCal, + const gtsam::Point2Vector& measurements, + double rank_tol, bool optimize, + const gtsam::SharedNoiseModel& model = nullptr); +gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3DS2& cameras, + const gtsam::Point2Vector& measurements, + double rank_tol, bool optimize, + const gtsam::SharedNoiseModel& model = nullptr, + const bool useLOST = false); +gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, + gtsam::Cal3DS2* sharedCal, + const gtsam::Point2Vector& measurements, + const gtsam::Point3& initialEstimate); +gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3DS2& cameras, + const gtsam::Point2Vector& measurements, + const gtsam::Point3& initialEstimate); +gtsam::TriangulationResult triangulateSafe( + const gtsam::CameraSetCal3DS2& cameras, + const gtsam::Point2Vector& measurements, + const gtsam::TriangulationParameters& params); + +// Cal3Bundler versions +gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, + gtsam::Cal3Bundler* sharedCal, const gtsam::Point2Vector& measurements, double rank_tol, bool optimize, const gtsam::SharedNoiseModel& model = nullptr); gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Bundler& cameras, + const gtsam::Point2Vector& measurements, + double rank_tol, bool optimize, + const gtsam::SharedNoiseModel& model = nullptr, + const bool useLOST = false); +gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, + gtsam::Cal3Bundler* sharedCal, + const gtsam::Point2Vector& measurements, + const gtsam::Point3& initialEstimate); +gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Bundler& cameras, + const gtsam::Point2Vector& measurements, + const gtsam::Point3& initialEstimate); +gtsam::TriangulationResult triangulateSafe( + const gtsam::CameraSetCal3Bundler& cameras, + const gtsam::Point2Vector& measurements, + const gtsam::TriangulationParameters& params); + +// Cal3Fisheye versions +gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, + gtsam::Cal3Fisheye* sharedCal, const gtsam::Point2Vector& measurements, double rank_tol, bool optimize, const gtsam::SharedNoiseModel& model = nullptr); gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Fisheye& cameras, + const gtsam::Point2Vector& measurements, + double rank_tol, bool optimize, + const gtsam::SharedNoiseModel& model = nullptr, + const bool useLOST = false); +gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, + gtsam::Cal3Fisheye* sharedCal, + const gtsam::Point2Vector& measurements, + const gtsam::Point3& initialEstimate); +gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Fisheye& cameras, + const gtsam::Point2Vector& measurements, + const gtsam::Point3& initialEstimate); +gtsam::TriangulationResult triangulateSafe( + const gtsam::CameraSetCal3Fisheye& cameras, + const gtsam::Point2Vector& measurements, + const gtsam::TriangulationParameters& params); + +// Cal3Unified versions +gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, + gtsam::Cal3Unified* sharedCal, const gtsam::Point2Vector& measurements, double rank_tol, bool optimize, const gtsam::SharedNoiseModel& model = nullptr); gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Unified& cameras, const gtsam::Point2Vector& measurements, double rank_tol, bool optimize, - const gtsam::SharedNoiseModel& model = nullptr); + const gtsam::SharedNoiseModel& model = nullptr, + const bool useLOST = false); gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, - gtsam::Cal3_S2* sharedCal, + gtsam::Cal3Unified* sharedCal, const gtsam::Point2Vector& measurements, const gtsam::Point3& initialEstimate); -gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, - gtsam::Cal3DS2* sharedCal, - const gtsam::Point2Vector& measurements, - const gtsam::Point3& initialEstimate); -gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, - gtsam::Cal3Bundler* sharedCal, - const gtsam::Point2Vector& measurements, - const gtsam::Point3& initialEstimate); -gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3_S2& cameras, - const gtsam::Point2Vector& measurements, - const gtsam::Point3& initialEstimate); -gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Bundler& cameras, +gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Unified& cameras, const gtsam::Point2Vector& measurements, const gtsam::Point3& initialEstimate); +gtsam::TriangulationResult triangulateSafe( + const gtsam::CameraSetCal3Unified& cameras, + const gtsam::Point2Vector& measurements, + const gtsam::TriangulationParameters& params); + + #include template @@ -1086,5 +1284,11 @@ class BearingRange { typedef gtsam::BearingRange BearingRange2D; +typedef gtsam::BearingRange + BearingRangePose2; +typedef gtsam::BearingRange + BearingRange3D; +typedef gtsam::BearingRange + BearingRangePose3; } // namespace gtsam diff --git a/gtsam/geometry/tests/testPoint3.cpp b/gtsam/geometry/tests/testPoint3.cpp index 315391ac8..e373e1d52 100644 --- a/gtsam/geometry/tests/testPoint3.cpp +++ b/gtsam/geometry/tests/testPoint3.cpp @@ -19,8 +19,6 @@ #include #include -#include - using namespace std::placeholders; using namespace gtsam; diff --git a/gtsam/geometry/tests/testTriangulation.cpp b/gtsam/geometry/tests/testTriangulation.cpp index fb66fb6a2..bc843ad75 100644 --- a/gtsam/geometry/tests/testTriangulation.cpp +++ b/gtsam/geometry/tests/testTriangulation.cpp @@ -15,6 +15,7 @@ * @date July 30th, 2013 * @author Chris Beall (cbeall3) * @author Luca Carlone + * @author Akshay Krishnan */ #include @@ -38,24 +39,24 @@ using namespace boost::assign; // Some common constants -static const boost::shared_ptr sharedCal = // +static const boost::shared_ptr kSharedCal = // boost::make_shared(1500, 1200, 0.1, 640, 480); // Looking along X-axis, 1 meter above ground plane (x-y) static const Rot3 upright = Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2); -static const Pose3 pose1 = Pose3(upright, gtsam::Point3(0, 0, 1)); -PinholeCamera camera1(pose1, *sharedCal); +static const Pose3 kPose1 = Pose3(upright, gtsam::Point3(0, 0, 1)); +static const PinholeCamera kCamera1(kPose1, *kSharedCal); // create second camera 1 meter to the right of first camera -static const Pose3 pose2 = pose1 * Pose3(Rot3(), Point3(1, 0, 0)); -PinholeCamera camera2(pose2, *sharedCal); +static const Pose3 kPose2 = kPose1 * Pose3(Rot3(), Point3(1, 0, 0)); +static const PinholeCamera kCamera2(kPose2, *kSharedCal); // landmark ~5 meters infront of camera -static const Point3 landmark(5, 0.5, 1.2); +static const Point3 kLandmark(5, 0.5, 1.2); // 1. Project two landmarks into two cameras and triangulate -Point2 z1 = camera1.project(landmark); -Point2 z2 = camera2.project(landmark); +static const Point2 kZ1 = kCamera1.project(kLandmark); +static const Point2 kZ2 = kCamera2.project(kLandmark); //****************************************************************************** // Simple test with a well-behaved two camera situation @@ -63,22 +64,22 @@ TEST(triangulation, twoPoses) { vector poses; Point2Vector measurements; - poses += pose1, pose2; - measurements += z1, z2; + poses += kPose1, kPose2; + measurements += kZ1, kZ2; double rank_tol = 1e-9; // 1. Test simple DLT, perfect in no noise situation bool optimize = false; boost::optional actual1 = // - triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); - EXPECT(assert_equal(landmark, *actual1, 1e-7)); + triangulatePoint3(poses, kSharedCal, measurements, rank_tol, optimize); + EXPECT(assert_equal(kLandmark, *actual1, 1e-7)); // 2. test with optimization on, same answer optimize = true; boost::optional actual2 = // - triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); - EXPECT(assert_equal(landmark, *actual2, 1e-7)); + triangulatePoint3(poses, kSharedCal, measurements, rank_tol, optimize); + EXPECT(assert_equal(kLandmark, *actual2, 1e-7)); // 3. Add some noise and try again: result should be ~ (4.995, // 0.499167, 1.19814) @@ -86,16 +87,79 @@ TEST(triangulation, twoPoses) { measurements.at(1) += Point2(-0.2, 0.3); optimize = false; boost::optional actual3 = // - triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); + triangulatePoint3(poses, kSharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-4)); // 4. Now with optimization on optimize = true; boost::optional actual4 = // - triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); + triangulatePoint3(poses, kSharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4)); } +TEST(triangulation, twoCamerasUsingLOST) { + CameraSet> cameras; + cameras.push_back(kCamera1); + cameras.push_back(kCamera2); + + Point2Vector measurements = {kZ1, kZ2}; + SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-4); + double rank_tol = 1e-9; + + // 1. Test simple triangulation, perfect in no noise situation + boost::optional actual1 = // + triangulatePoint3>(cameras, measurements, rank_tol, + /*optimize=*/false, + measurementNoise, + /*useLOST=*/true); + EXPECT(assert_equal(kLandmark, *actual1, 1e-12)); + + // 3. Add some noise and try again: result should be ~ (4.995, + // 0.499167, 1.19814) + measurements[0] += Point2(0.1, 0.5); + measurements[1] += Point2(-0.2, 0.3); + boost::optional actual2 = // + triangulatePoint3>( + cameras, measurements, rank_tol, + /*optimize=*/false, measurementNoise, + /*use_lost_triangulation=*/true); + EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual2, 1e-4)); +} + +TEST(triangulation, twoCamerasLOSTvsDLT) { + // LOST has been shown to preform better when the point is much closer to one + // camera compared to another. This unit test checks this configuration. + const Cal3_S2 identityK; + const Pose3 pose1; + const Pose3 pose2(Rot3(), Point3(5., 0., -5.)); + CameraSet> cameras; + cameras.emplace_back(pose1, identityK); + cameras.emplace_back(pose2, identityK); + + Point3 landmark(0, 0, 1); + Point2 x1_noisy = cameras[0].project(landmark) + Point2(0.00817, 0.00977); + Point2 x2_noisy = cameras[1].project(landmark) + Point2(-0.00610, 0.01969); + Point2Vector measurements = {x1_noisy, x2_noisy}; + + const double rank_tol = 1e-9; + SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-2); + + boost::optional estimateLOST = // + triangulatePoint3>(cameras, measurements, rank_tol, + /*optimize=*/false, + measurementNoise, + /*useLOST=*/true); + + // These values are from a MATLAB implementation. + EXPECT(assert_equal(Point3(0.007, 0.011, 0.945), *estimateLOST, 1e-3)); + + boost::optional estimateDLT = + triangulatePoint3(cameras, measurements, rank_tol, false); + + // The LOST estimate should have a smaller error. + EXPECT((landmark - *estimateLOST).norm() <= (landmark - *estimateDLT).norm()); +} + //****************************************************************************** // Simple test with a well-behaved two camera situation with Cal3DS2 calibration. TEST(triangulation, twoPosesCal3DS2) { @@ -103,18 +167,18 @@ TEST(triangulation, twoPosesCal3DS2) { boost::make_shared(1500, 1200, 0, 640, 480, -.3, 0.1, 0.0001, -0.0003); - PinholeCamera camera1Distorted(pose1, *sharedDistortedCal); + PinholeCamera camera1Distorted(kPose1, *sharedDistortedCal); - PinholeCamera camera2Distorted(pose2, *sharedDistortedCal); + PinholeCamera camera2Distorted(kPose2, *sharedDistortedCal); // 0. Project two landmarks into two cameras and triangulate - Point2 z1Distorted = camera1Distorted.project(landmark); - Point2 z2Distorted = camera2Distorted.project(landmark); + Point2 z1Distorted = camera1Distorted.project(kLandmark); + Point2 z2Distorted = camera2Distorted.project(kLandmark); vector poses; Point2Vector measurements; - poses += pose1, pose2; + poses += kPose1, kPose2; measurements += z1Distorted, z2Distorted; double rank_tol = 1e-9; @@ -124,14 +188,14 @@ TEST(triangulation, twoPosesCal3DS2) { boost::optional actual1 = // triangulatePoint3(poses, sharedDistortedCal, measurements, rank_tol, optimize); - EXPECT(assert_equal(landmark, *actual1, 1e-7)); + EXPECT(assert_equal(kLandmark, *actual1, 1e-7)); // 2. test with optimization on, same answer optimize = true; boost::optional actual2 = // triangulatePoint3(poses, sharedDistortedCal, measurements, rank_tol, optimize); - EXPECT(assert_equal(landmark, *actual2, 1e-7)); + EXPECT(assert_equal(kLandmark, *actual2, 1e-7)); // 3. Add some noise and try again: result should be ~ (4.995, // 0.499167, 1.19814) @@ -160,18 +224,18 @@ TEST(triangulation, twoPosesFisheye) { boost::make_shared(1500, 1200, .1, 640, 480, -.3, 0.1, 0.0001, -0.0003); - PinholeCamera camera1Distorted(pose1, *sharedDistortedCal); + PinholeCamera camera1Distorted(kPose1, *sharedDistortedCal); - PinholeCamera camera2Distorted(pose2, *sharedDistortedCal); + PinholeCamera camera2Distorted(kPose2, *sharedDistortedCal); // 0. Project two landmarks into two cameras and triangulate - Point2 z1Distorted = camera1Distorted.project(landmark); - Point2 z2Distorted = camera2Distorted.project(landmark); + Point2 z1Distorted = camera1Distorted.project(kLandmark); + Point2 z2Distorted = camera2Distorted.project(kLandmark); vector poses; Point2Vector measurements; - poses += pose1, pose2; + poses += kPose1, kPose2; measurements += z1Distorted, z2Distorted; double rank_tol = 1e-9; @@ -181,14 +245,14 @@ TEST(triangulation, twoPosesFisheye) { boost::optional actual1 = // triangulatePoint3(poses, sharedDistortedCal, measurements, rank_tol, optimize); - EXPECT(assert_equal(landmark, *actual1, 1e-7)); + EXPECT(assert_equal(kLandmark, *actual1, 1e-7)); // 2. test with optimization on, same answer optimize = true; boost::optional actual2 = // triangulatePoint3(poses, sharedDistortedCal, measurements, rank_tol, optimize); - EXPECT(assert_equal(landmark, *actual2, 1e-7)); + EXPECT(assert_equal(kLandmark, *actual2, 1e-7)); // 3. Add some noise and try again: result should be ~ (4.995, // 0.499167, 1.19814) @@ -213,17 +277,17 @@ TEST(triangulation, twoPosesFisheye) { TEST(triangulation, twoPosesBundler) { boost::shared_ptr bundlerCal = // boost::make_shared(1500, 0.1, 0.2, 640, 480); - PinholeCamera camera1(pose1, *bundlerCal); - PinholeCamera camera2(pose2, *bundlerCal); + PinholeCamera camera1(kPose1, *bundlerCal); + PinholeCamera camera2(kPose2, *bundlerCal); // 1. Project two landmarks into two cameras and triangulate - Point2 z1 = camera1.project(landmark); - Point2 z2 = camera2.project(landmark); + Point2 z1 = camera1.project(kLandmark); + Point2 z2 = camera2.project(kLandmark); vector poses; Point2Vector measurements; - poses += pose1, pose2; + poses += kPose1, kPose2; measurements += z1, z2; bool optimize = true; @@ -232,7 +296,7 @@ TEST(triangulation, twoPosesBundler) { boost::optional actual = // triangulatePoint3(poses, bundlerCal, measurements, rank_tol, optimize); - EXPECT(assert_equal(landmark, *actual, 1e-7)); + EXPECT(assert_equal(kLandmark, *actual, 1e-7)); // Add some noise and try again measurements.at(0) += Point2(0.1, 0.5); @@ -249,12 +313,12 @@ TEST(triangulation, fourPoses) { vector poses; Point2Vector measurements; - poses += pose1, pose2; - measurements += z1, z2; + poses += kPose1, kPose2; + measurements += kZ1, kZ2; boost::optional actual = - triangulatePoint3(poses, sharedCal, measurements); - EXPECT(assert_equal(landmark, *actual, 1e-2)); + triangulatePoint3(poses, kSharedCal, measurements); + EXPECT(assert_equal(kLandmark, *actual, 1e-2)); // 2. Add some noise and try again: result should be ~ (4.995, // 0.499167, 1.19814) @@ -262,37 +326,37 @@ TEST(triangulation, fourPoses) { measurements.at(1) += Point2(-0.2, 0.3); boost::optional actual2 = // - triangulatePoint3(poses, sharedCal, measurements); - EXPECT(assert_equal(landmark, *actual2, 1e-2)); + triangulatePoint3(poses, kSharedCal, measurements); + EXPECT(assert_equal(kLandmark, *actual2, 1e-2)); // 3. Add a slightly rotated third camera above, again with measurement noise - Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); - PinholeCamera camera3(pose3, *sharedCal); - Point2 z3 = camera3.project(landmark); + Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); + PinholeCamera camera3(pose3, *kSharedCal); + Point2 z3 = camera3.project(kLandmark); poses += pose3; measurements += z3 + Point2(0.1, -0.1); boost::optional triangulated_3cameras = // - triangulatePoint3(poses, sharedCal, measurements); - EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2)); + triangulatePoint3(poses, kSharedCal, measurements); + EXPECT(assert_equal(kLandmark, *triangulated_3cameras, 1e-2)); // Again with nonlinear optimization boost::optional triangulated_3cameras_opt = - triangulatePoint3(poses, sharedCal, measurements, 1e-9, true); - EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2)); + triangulatePoint3(poses, kSharedCal, measurements, 1e-9, true); + EXPECT(assert_equal(kLandmark, *triangulated_3cameras_opt, 1e-2)); // 4. Test failure: Add a 4th camera facing the wrong way Pose3 pose4 = Pose3(Rot3::Ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1)); - PinholeCamera camera4(pose4, *sharedCal); + PinholeCamera camera4(pose4, *kSharedCal); #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION - CHECK_EXCEPTION(camera4.project(landmark), CheiralityException); + CHECK_EXCEPTION(camera4.project(kLandmark), CheiralityException); poses += pose4; measurements += Point2(400, 400); - CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), + CHECK_EXCEPTION(triangulatePoint3(poses, kSharedCal, measurements), TriangulationCheiralityException); #endif } @@ -300,33 +364,33 @@ TEST(triangulation, fourPoses) { //****************************************************************************** TEST(triangulation, threePoses_robustNoiseModel) { - Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); - PinholeCamera camera3(pose3, *sharedCal); - Point2 z3 = camera3.project(landmark); + Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); + PinholeCamera camera3(pose3, *kSharedCal); + Point2 z3 = camera3.project(kLandmark); vector poses; Point2Vector measurements; - poses += pose1, pose2, pose3; - measurements += z1, z2, z3; + poses += kPose1, kPose2, pose3; + measurements += kZ1, kZ2, z3; // noise free, so should give exactly the landmark boost::optional actual = - triangulatePoint3(poses, sharedCal, measurements); - EXPECT(assert_equal(landmark, *actual, 1e-2)); + triangulatePoint3(poses, kSharedCal, measurements); + EXPECT(assert_equal(kLandmark, *actual, 1e-2)); // Add outlier measurements.at(0) += Point2(100, 120); // very large pixel noise! // now estimate does not match landmark boost::optional actual2 = // - triangulatePoint3(poses, sharedCal, measurements); + triangulatePoint3(poses, kSharedCal, measurements); // DLT is surprisingly robust, but still off (actual error is around 0.26m): - EXPECT( (landmark - *actual2).norm() >= 0.2); - EXPECT( (landmark - *actual2).norm() <= 0.5); + EXPECT( (kLandmark - *actual2).norm() >= 0.2); + EXPECT( (kLandmark - *actual2).norm() <= 0.5); // Again with nonlinear optimization boost::optional actual3 = - triangulatePoint3(poses, sharedCal, measurements, 1e-9, true); + triangulatePoint3(poses, kSharedCal, measurements, 1e-9, true); // result from nonlinear (but non-robust optimization) is close to DLT and still off EXPECT(assert_equal(*actual2, *actual3, 0.1)); @@ -334,27 +398,27 @@ TEST(triangulation, threePoses_robustNoiseModel) { auto model = noiseModel::Robust::Create( noiseModel::mEstimator::Huber::Create(1.345), noiseModel::Unit::Create(2)); boost::optional actual4 = triangulatePoint3( - poses, sharedCal, measurements, 1e-9, true, model); + poses, kSharedCal, measurements, 1e-9, true, model); // using the Huber loss we now have a quite small error!! nice! - EXPECT(assert_equal(landmark, *actual4, 0.05)); + EXPECT(assert_equal(kLandmark, *actual4, 0.05)); } //****************************************************************************** TEST(triangulation, fourPoses_robustNoiseModel) { - Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); - PinholeCamera camera3(pose3, *sharedCal); - Point2 z3 = camera3.project(landmark); + Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); + PinholeCamera camera3(pose3, *kSharedCal); + Point2 z3 = camera3.project(kLandmark); vector poses; Point2Vector measurements; - poses += pose1, pose1, pose2, pose3; // 2 measurements from pose 1 - measurements += z1, z1, z2, z3; + poses += kPose1, kPose1, kPose2, pose3; // 2 measurements from pose 1 + measurements += kZ1, kZ1, kZ2, z3; // noise free, so should give exactly the landmark boost::optional actual = - triangulatePoint3(poses, sharedCal, measurements); - EXPECT(assert_equal(landmark, *actual, 1e-2)); + triangulatePoint3(poses, kSharedCal, measurements); + EXPECT(assert_equal(kLandmark, *actual, 1e-2)); // Add outlier measurements.at(0) += Point2(100, 120); // very large pixel noise! @@ -365,14 +429,14 @@ TEST(triangulation, fourPoses_robustNoiseModel) { // now estimate does not match landmark boost::optional actual2 = // - triangulatePoint3(poses, sharedCal, measurements); + triangulatePoint3(poses, kSharedCal, measurements); // DLT is surprisingly robust, but still off (actual error is around 0.17m): - EXPECT( (landmark - *actual2).norm() >= 0.1); - EXPECT( (landmark - *actual2).norm() <= 0.5); + EXPECT( (kLandmark - *actual2).norm() >= 0.1); + EXPECT( (kLandmark - *actual2).norm() <= 0.5); // Again with nonlinear optimization boost::optional actual3 = - triangulatePoint3(poses, sharedCal, measurements, 1e-9, true); + triangulatePoint3(poses, kSharedCal, measurements, 1e-9, true); // result from nonlinear (but non-robust optimization) is close to DLT and still off EXPECT(assert_equal(*actual2, *actual3, 0.1)); @@ -380,24 +444,24 @@ TEST(triangulation, fourPoses_robustNoiseModel) { auto model = noiseModel::Robust::Create( noiseModel::mEstimator::Huber::Create(1.345), noiseModel::Unit::Create(2)); boost::optional actual4 = triangulatePoint3( - poses, sharedCal, measurements, 1e-9, true, model); + poses, kSharedCal, measurements, 1e-9, true, model); // using the Huber loss we now have a quite small error!! nice! - EXPECT(assert_equal(landmark, *actual4, 0.05)); + EXPECT(assert_equal(kLandmark, *actual4, 0.05)); } //****************************************************************************** TEST(triangulation, fourPoses_distinct_Ks) { Cal3_S2 K1(1500, 1200, 0, 640, 480); // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) - PinholeCamera camera1(pose1, K1); + PinholeCamera camera1(kPose1, K1); // create second camera 1 meter to the right of first camera Cal3_S2 K2(1600, 1300, 0, 650, 440); - PinholeCamera camera2(pose2, K2); + PinholeCamera camera2(kPose2, K2); // 1. Project two landmarks into two cameras and triangulate - Point2 z1 = camera1.project(landmark); - Point2 z2 = camera2.project(landmark); + Point2 z1 = camera1.project(kLandmark); + Point2 z2 = camera2.project(kLandmark); CameraSet> cameras; Point2Vector measurements; @@ -407,7 +471,7 @@ TEST(triangulation, fourPoses_distinct_Ks) { boost::optional actual = // triangulatePoint3(cameras, measurements); - EXPECT(assert_equal(landmark, *actual, 1e-2)); + EXPECT(assert_equal(kLandmark, *actual, 1e-2)); // 2. Add some noise and try again: result should be ~ (4.995, // 0.499167, 1.19814) @@ -416,25 +480,25 @@ TEST(triangulation, fourPoses_distinct_Ks) { boost::optional actual2 = // triangulatePoint3(cameras, measurements); - EXPECT(assert_equal(landmark, *actual2, 1e-2)); + EXPECT(assert_equal(kLandmark, *actual2, 1e-2)); // 3. Add a slightly rotated third camera above, again with measurement noise - Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); + Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); Cal3_S2 K3(700, 500, 0, 640, 480); PinholeCamera camera3(pose3, K3); - Point2 z3 = camera3.project(landmark); + Point2 z3 = camera3.project(kLandmark); cameras += camera3; measurements += z3 + Point2(0.1, -0.1); boost::optional triangulated_3cameras = // triangulatePoint3(cameras, measurements); - EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2)); + EXPECT(assert_equal(kLandmark, *triangulated_3cameras, 1e-2)); // Again with nonlinear optimization boost::optional triangulated_3cameras_opt = triangulatePoint3(cameras, measurements, 1e-9, true); - EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2)); + EXPECT(assert_equal(kLandmark, *triangulated_3cameras_opt, 1e-2)); // 4. Test failure: Add a 4th camera facing the wrong way Pose3 pose4 = Pose3(Rot3::Ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1)); @@ -442,7 +506,7 @@ TEST(triangulation, fourPoses_distinct_Ks) { PinholeCamera camera4(pose4, K4); #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION - CHECK_EXCEPTION(camera4.project(landmark), CheiralityException); + CHECK_EXCEPTION(camera4.project(kLandmark), CheiralityException); cameras += camera4; measurements += Point2(400, 400); @@ -455,15 +519,15 @@ TEST(triangulation, fourPoses_distinct_Ks) { TEST(triangulation, fourPoses_distinct_Ks_distortion) { Cal3DS2 K1(1500, 1200, 0, 640, 480, -.3, 0.1, 0.0001, -0.0003); // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) - PinholeCamera camera1(pose1, K1); + PinholeCamera camera1(kPose1, K1); // create second camera 1 meter to the right of first camera Cal3DS2 K2(1600, 1300, 0, 650, 440, -.2, 0.05, 0.0002, -0.0001); - PinholeCamera camera2(pose2, K2); + PinholeCamera camera2(kPose2, K2); // 1. Project two landmarks into two cameras and triangulate - Point2 z1 = camera1.project(landmark); - Point2 z2 = camera2.project(landmark); + Point2 z1 = camera1.project(kLandmark); + Point2 z2 = camera2.project(kLandmark); CameraSet> cameras; Point2Vector measurements; @@ -473,22 +537,22 @@ TEST(triangulation, fourPoses_distinct_Ks_distortion) { boost::optional actual = // triangulatePoint3(cameras, measurements); - EXPECT(assert_equal(landmark, *actual, 1e-2)); + EXPECT(assert_equal(kLandmark, *actual, 1e-2)); } //****************************************************************************** TEST(triangulation, outliersAndFarLandmarks) { Cal3_S2 K1(1500, 1200, 0, 640, 480); // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) - PinholeCamera camera1(pose1, K1); + PinholeCamera camera1(kPose1, K1); // create second camera 1 meter to the right of first camera Cal3_S2 K2(1600, 1300, 0, 650, 440); - PinholeCamera camera2(pose2, K2); + PinholeCamera camera2(kPose2, K2); // 1. Project two landmarks into two cameras and triangulate - Point2 z1 = camera1.project(landmark); - Point2 z2 = camera2.project(landmark); + Point2 z1 = camera1.project(kLandmark); + Point2 z2 = camera2.project(kLandmark); CameraSet> cameras; Point2Vector measurements; @@ -501,7 +565,7 @@ TEST(triangulation, outliersAndFarLandmarks) { 1.0, false, landmarkDistanceThreshold); // all default except // landmarkDistanceThreshold TriangulationResult actual = triangulateSafe(cameras, measurements, params); - EXPECT(assert_equal(landmark, *actual, 1e-2)); + EXPECT(assert_equal(kLandmark, *actual, 1e-2)); EXPECT(actual.valid()); landmarkDistanceThreshold = 4; // landmark is farther than that @@ -513,10 +577,10 @@ TEST(triangulation, outliersAndFarLandmarks) { // 3. Add a slightly rotated third camera above with a wrong measurement // (OUTLIER) - Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); + Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); Cal3_S2 K3(700, 500, 0, 640, 480); PinholeCamera camera3(pose3, K3); - Point2 z3 = camera3.project(landmark); + Point2 z3 = camera3.project(kLandmark); cameras += camera3; measurements += z3 + Point2(10, -10); @@ -539,18 +603,18 @@ TEST(triangulation, outliersAndFarLandmarks) { //****************************************************************************** TEST(triangulation, twoIdenticalPoses) { // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) - PinholeCamera camera1(pose1, *sharedCal); + PinholeCamera camera1(kPose1, *kSharedCal); // 1. Project two landmarks into two cameras and triangulate - Point2 z1 = camera1.project(landmark); + Point2 z1 = camera1.project(kLandmark); vector poses; Point2Vector measurements; - poses += pose1, pose1; + poses += kPose1, kPose1; measurements += z1, z1; - CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), + CHECK_EXCEPTION(triangulatePoint3(poses, kSharedCal, measurements), TriangulationUnderconstrainedException); } @@ -565,7 +629,7 @@ TEST(triangulation, onePose) { poses += Pose3(); measurements += Point2(0, 0); - CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), + CHECK_EXCEPTION(triangulatePoint3(poses, kSharedCal, measurements), TriangulationUnderconstrainedException); } @@ -681,12 +745,12 @@ TEST(triangulation, twoPoses_sphericalCamera) { std::vector measurements; // Project landmark into two cameras and triangulate - SphericalCamera cam1(pose1); - SphericalCamera cam2(pose2); - Unit3 u1 = cam1.project(landmark); - Unit3 u2 = cam2.project(landmark); + SphericalCamera cam1(kPose1); + SphericalCamera cam2(kPose2); + Unit3 u1 = cam1.project(kLandmark); + Unit3 u2 = cam2.project(kLandmark); - poses += pose1, pose2; + poses += kPose1, kPose2; measurements += u1, u2; CameraSet cameras; @@ -698,25 +762,25 @@ TEST(triangulation, twoPoses_sphericalCamera) { // 1. Test linear triangulation via DLT auto projection_matrices = projectionMatricesFromCameras(cameras); Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol); - EXPECT(assert_equal(landmark, point, 1e-7)); + EXPECT(assert_equal(kLandmark, point, 1e-7)); // 2. Test nonlinear triangulation point = triangulateNonlinear(cameras, measurements, point); - EXPECT(assert_equal(landmark, point, 1e-7)); + EXPECT(assert_equal(kLandmark, point, 1e-7)); // 3. Test simple DLT, now within triangulatePoint3 bool optimize = false; boost::optional actual1 = // triangulatePoint3(cameras, measurements, rank_tol, optimize); - EXPECT(assert_equal(landmark, *actual1, 1e-7)); + EXPECT(assert_equal(kLandmark, *actual1, 1e-7)); // 4. test with optimization on, same answer optimize = true; boost::optional actual2 = // triangulatePoint3(cameras, measurements, rank_tol, optimize); - EXPECT(assert_equal(landmark, *actual2, 1e-7)); + EXPECT(assert_equal(kLandmark, *actual2, 1e-7)); // 5. Add some noise and try again: result should be ~ (4.995, // 0.499167, 1.19814) @@ -761,7 +825,7 @@ TEST(triangulation, twoPoses_sphericalCamera_extremeFOV) { EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, -1.0)), u2, 1e-7)); // behind and to the right of PoseB - poses += pose1, pose2; + poses += kPose1, kPose2; measurements += u1, u2; CameraSet cameras; diff --git a/gtsam/geometry/triangulation.cpp b/gtsam/geometry/triangulation.cpp index 026afef24..acb0ddf0e 100644 --- a/gtsam/geometry/triangulation.cpp +++ b/gtsam/geometry/triangulation.cpp @@ -14,6 +14,7 @@ * @brief Functions for triangulation * @date July 31, 2013 * @author Chris Beall + * @author Akshay Krishnan */ #include @@ -24,9 +25,9 @@ namespace gtsam { Vector4 triangulateHomogeneousDLT( - const std::vector>& projection_matrices, + const std::vector>& + projection_matrices, const Point2Vector& measurements, double rank_tol) { - // number of cameras size_t m = projection_matrices.size(); @@ -39,6 +40,11 @@ Vector4 triangulateHomogeneousDLT( const Point2& p = measurements.at(i); // build system of equations + // [A_1; A_2; A_3] x = [b_1; b_2; b_3] + // [b_3 * A_1 - b_1 * A_3] x = 0 + // [b_3 * A_2 - b_2 * A_3] x = 0 + // A' x = 0 + // A' 2x4 = [b_3 * A_1 - b_1 * A_3; b_3 * A_2 - b_2 * A_3] A.row(row) = p.x() * projection.row(2) - projection.row(0); A.row(row + 1) = p.y() * projection.row(2) - projection.row(1); } @@ -47,16 +53,15 @@ Vector4 triangulateHomogeneousDLT( Vector v; boost::tie(rank, error, v) = DLT(A, rank_tol); - if (rank < 3) - throw(TriangulationUnderconstrainedException()); + if (rank < 3) throw(TriangulationUnderconstrainedException()); return v; } Vector4 triangulateHomogeneousDLT( - const std::vector>& projection_matrices, + const std::vector>& + projection_matrices, const std::vector& measurements, double rank_tol) { - // number of cameras size_t m = projection_matrices.size(); @@ -66,7 +71,9 @@ Vector4 triangulateHomogeneousDLT( for (size_t i = 0; i < m; i++) { size_t row = i * 2; const Matrix34& projection = projection_matrices.at(i); - const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector + const Point3& p = + measurements.at(i) + .point3(); // to get access to x,y,z of the bearing vector // build system of equations A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0); @@ -77,34 +84,67 @@ Vector4 triangulateHomogeneousDLT( Vector v; boost::tie(rank, error, v) = DLT(A, rank_tol); - if (rank < 3) - throw(TriangulationUnderconstrainedException()); + if (rank < 3) throw(TriangulationUnderconstrainedException()); return v; } -Point3 triangulateDLT( - const std::vector>& projection_matrices, - const Point2Vector& measurements, double rank_tol) { +Point3 triangulateLOST(const std::vector& poses, + const Point3Vector& calibratedMeasurements, + const SharedIsotropic& measurementNoise) { + size_t m = calibratedMeasurements.size(); + assert(m == poses.size()); - Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, - rank_tol); + // Construct the system matrices. + Matrix A = Matrix::Zero(m * 2, 3); + Matrix b = Matrix::Zero(m * 2, 1); + + for (size_t i = 0; i < m; i++) { + const Pose3& wTi = poses[i]; + // TODO(akshay-krishnan): are there better ways to select j? + const int j = (i + 1) % m; + const Pose3& wTj = poses[j]; + + const Point3 d_ij = wTj.translation() - wTi.translation(); + + const Point3 wZi = wTi.rotation().rotate(calibratedMeasurements[i]); + const Point3 wZj = wTj.rotation().rotate(calibratedMeasurements[j]); + + // Note: Setting q_i = 1.0 gives same results as DLT. + const double q_i = wZi.cross(wZj).norm() / + (measurementNoise->sigma() * d_ij.cross(wZj).norm()); + + const Matrix23 coefficientMat = + q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) * + wTi.rotation().matrix().transpose(); + + A.block<2, 3>(2 * i, 0) << coefficientMat; + b.block<2, 1>(2 * i, 0) << coefficientMat * wTi.translation(); + } + return A.colPivHouseholderQr().solve(b); +} + +Point3 triangulateDLT( + const std::vector>& + projection_matrices, + const Point2Vector& measurements, double rank_tol) { + Vector4 v = + triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); // Create 3D point from homogeneous coordinates return Point3(v.head<3>() / v[3]); } Point3 triangulateDLT( - const std::vector>& projection_matrices, + const std::vector>& + projection_matrices, const std::vector& measurements, double rank_tol) { - // contrary to previous triangulateDLT, this is now taking Unit3 inputs - Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, - rank_tol); - // Create 3D point from homogeneous coordinates - return Point3(v.head<3>() / v[3]); + Vector4 v = + triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); + // Create 3D point from homogeneous coordinates + return Point3(v.head<3>() / v[3]); } -/// /** * Optimize for triangulation * @param graph nonlinear factors for projection @@ -132,4 +172,4 @@ Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, return result.at(landmarkKey); } -} // \namespace gtsam +} // namespace gtsam diff --git a/gtsam/geometry/triangulation.h b/gtsam/geometry/triangulation.h index 49b5aef04..e0473f8c3 100644 --- a/gtsam/geometry/triangulation.h +++ b/gtsam/geometry/triangulation.h @@ -15,6 +15,7 @@ * @date July 31, 2013 * @author Chris Beall * @author Luca Carlone + * @author Akshay Krishnan */ #pragma once @@ -23,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -93,6 +95,20 @@ GTSAM_EXPORT Point3 triangulateDLT( const std::vector& measurements, double rank_tol = 1e-9); +/** + * @brief Triangulation using the LOST (Linear Optimal Sine Triangulation) + * algorithm proposed in https://arxiv.org/pdf/2205.12197.pdf by Sebastien Henry + * and John Christian. + * @param poses camera poses in world frame + * @param calibratedMeasurements measurements in homogeneous coordinates in each + * camera pose + * @param measurementNoise isotropic noise model for the measurements + * @return triangulated point in world coordinates + */ +GTSAM_EXPORT Point3 triangulateLOST(const std::vector& poses, + const Point3Vector& calibratedMeasurements, + const SharedIsotropic& measurementNoise); + /** * Create a factor graph with projection factors from poses and one calibration * @param poses Camera poses @@ -107,17 +123,16 @@ std::pair triangulationGraph( const std::vector& poses, boost::shared_ptr sharedCal, const Point2Vector& measurements, Key landmarkKey, const Point3& initialEstimate, - const SharedNoiseModel& model = nullptr) { + const SharedNoiseModel& model = noiseModel::Unit::Create(2)) { Values values; values.insert(landmarkKey, initialEstimate); // Initial landmark value NonlinearFactorGraph graph; - static SharedNoiseModel unit2(noiseModel::Unit::Create(2)); for (size_t i = 0; i < measurements.size(); i++) { const Pose3& pose_i = poses[i]; typedef PinholePose Camera; Camera camera_i(pose_i, sharedCal); graph.emplace_shared > // - (camera_i, measurements[i], model? model : unit2, landmarkKey); + (camera_i, measurements[i], model, landmarkKey); } return std::make_pair(graph, values); } @@ -301,10 +316,10 @@ template typename CAMERA::MeasurementVector undistortMeasurements( const CameraSet& cameras, const typename CAMERA::MeasurementVector& measurements) { - const size_t num_meas = cameras.size(); - assert(num_meas == measurements.size()); - typename CAMERA::MeasurementVector undistortedMeasurements(num_meas); - for (size_t ii = 0; ii < num_meas; ++ii) { + const size_t nrMeasurements = measurements.size(); + assert(nrMeasurements == cameras.size()); + typename CAMERA::MeasurementVector undistortedMeasurements(nrMeasurements); + for (size_t ii = 0; ii < nrMeasurements; ++ii) { // Calibrate with cal and uncalibrate with pinhole version of cal so that // measurements are undistorted. undistortedMeasurements[ii] = @@ -330,6 +345,65 @@ inline SphericalCamera::MeasurementVector undistortMeasurements( return measurements; } +/** Convert pixel measurements in image to homogeneous measurements in the image + * plane using shared camera intrinsics. + * + * @tparam CALIBRATION Calibration type to use. + * @param cal Calibration with which measurements were taken. + * @param measurements Vector of measurements to undistort. + * @return homogeneous measurements in image plane + */ +template +inline Point3Vector calibrateMeasurementsShared( + const CALIBRATION& cal, const Point2Vector& measurements) { + Point3Vector calibratedMeasurements; + // Calibrate with cal and uncalibrate with pinhole version of cal so that + // measurements are undistorted. + std::transform(measurements.begin(), measurements.end(), + std::back_inserter(calibratedMeasurements), + [&cal](const Point2& measurement) { + Point3 p; + p << cal.calibrate(measurement), 1.0; + return p; + }); + return calibratedMeasurements; +} + +/** Convert pixel measurements in image to homogeneous measurements in the image + * plane using camera intrinsics of each measurement. + * + * @tparam CAMERA Camera type to use. + * @param cameras Cameras corresponding to each measurement. + * @param measurements Vector of measurements to undistort. + * @return homogeneous measurements in image plane + */ +template +inline Point3Vector calibrateMeasurements( + const CameraSet& cameras, + const typename CAMERA::MeasurementVector& measurements) { + const size_t nrMeasurements = measurements.size(); + assert(nrMeasurements == cameras.size()); + Point3Vector calibratedMeasurements(nrMeasurements); + for (size_t ii = 0; ii < nrMeasurements; ++ii) { + calibratedMeasurements[ii] + << cameras[ii].calibration().calibrate(measurements[ii]), + 1.0; + } + return calibratedMeasurements; +} + +/** Specialize for SphericalCamera to do nothing. */ +template +inline Point3Vector calibrateMeasurements( + const CameraSet& cameras, + const SphericalCamera::MeasurementVector& measurements) { + Point3Vector calibratedMeasurements(measurements.size()); + for (size_t ii = 0; ii < measurements.size(); ++ii) { + calibratedMeasurements[ii] << measurements[ii].point3(); + } + return calibratedMeasurements; +} + /** * Function to triangulate 3D landmark point from an arbitrary number * of poses (at least 2) using the DLT. The function checks that the @@ -340,41 +414,55 @@ inline SphericalCamera::MeasurementVector undistortMeasurements( * @param measurements A vector of camera measurements * @param rank_tol rank tolerance, default 1e-9 * @param optimize Flag to turn on nonlinear refinement of triangulation + * @param useLOST whether to use the LOST algorithm instead of DLT * @return Returns a Point3 */ -template +template Point3 triangulatePoint3(const std::vector& poses, - boost::shared_ptr sharedCal, - const Point2Vector& measurements, double rank_tol = 1e-9, - bool optimize = false, - const SharedNoiseModel& model = nullptr) { - + boost::shared_ptr sharedCal, + const Point2Vector& measurements, + double rank_tol = 1e-9, bool optimize = false, + const SharedNoiseModel& model = nullptr, + const bool useLOST = false) { assert(poses.size() == measurements.size()); - if (poses.size() < 2) - throw(TriangulationUnderconstrainedException()); - - // construct projection matrices from poses & calibration - auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal); - - // Undistort the measurements, leaving only the pinhole elements in effect. - auto undistortedMeasurements = - undistortMeasurements(*sharedCal, measurements); + if (poses.size() < 2) throw(TriangulationUnderconstrainedException()); // Triangulate linearly - Point3 point = - triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + Point3 point; + if (useLOST) { + // Reduce input noise model to an isotropic noise model using the mean of + // the diagonal. + const double measurementSigma = model ? model->sigmas().mean() : 1e-4; + SharedIsotropic measurementNoise = + noiseModel::Isotropic::Sigma(2, measurementSigma); + // calibrate the measurements to obtain homogenous coordinates in image + // plane. + auto calibratedMeasurements = + calibrateMeasurementsShared(*sharedCal, measurements); + + point = triangulateLOST(poses, calibratedMeasurements, measurementNoise); + } else { + // construct projection matrices from poses & calibration + auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal); + + // Undistort the measurements, leaving only the pinhole elements in effect. + auto undistortedMeasurements = + undistortMeasurements(*sharedCal, measurements); + + point = + triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + } // Then refine using non-linear optimization if (optimize) - point = triangulateNonlinear // + point = triangulateNonlinear // (poses, sharedCal, measurements, point, model); #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION // verify that the triangulated point lies in front of all cameras - for(const Pose3& pose: poses) { + for (const Pose3& pose : poses) { const Point3& p_local = pose.transformTo(point); - if (p_local.z() <= 0) - throw(TriangulationCheiralityException()); + if (p_local.z() <= 0) throw(TriangulationCheiralityException()); } #endif @@ -391,41 +479,63 @@ Point3 triangulatePoint3(const std::vector& poses, * @param measurements A vector of camera measurements * @param rank_tol rank tolerance, default 1e-9 * @param optimize Flag to turn on nonlinear refinement of triangulation + * @param useLOST whether to use the LOST algorithm instead of + * DLT * @return Returns a Point3 */ -template -Point3 triangulatePoint3( - const CameraSet& cameras, - const typename CAMERA::MeasurementVector& measurements, double rank_tol = 1e-9, - bool optimize = false, - const SharedNoiseModel& model = nullptr) { - +template +Point3 triangulatePoint3(const CameraSet& cameras, + const typename CAMERA::MeasurementVector& measurements, + double rank_tol = 1e-9, bool optimize = false, + const SharedNoiseModel& model = nullptr, + const bool useLOST = false) { size_t m = cameras.size(); assert(measurements.size() == m); - if (m < 2) - throw(TriangulationUnderconstrainedException()); + if (m < 2) throw(TriangulationUnderconstrainedException()); - // construct projection matrices from poses & calibration - auto projection_matrices = projectionMatricesFromCameras(cameras); + // Triangulate linearly + Point3 point; + if (useLOST) { + // Reduce input noise model to an isotropic noise model using the mean of + // the diagonal. + const double measurementSigma = model ? model->sigmas().mean() : 1e-4; + SharedIsotropic measurementNoise = + noiseModel::Isotropic::Sigma(2, measurementSigma); - // Undistort the measurements, leaving only the pinhole elements in effect. - auto undistortedMeasurements = - undistortMeasurements(cameras, measurements); + // construct poses from cameras. + std::vector poses; + poses.reserve(cameras.size()); + for (const auto& camera : cameras) poses.push_back(camera.pose()); - Point3 point = - triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + // calibrate the measurements to obtain homogenous coordinates in image + // plane. + auto calibratedMeasurements = + calibrateMeasurements(cameras, measurements); - // The n refine using non-linear optimization - if (optimize) + point = triangulateLOST(poses, calibratedMeasurements, measurementNoise); + } else { + // construct projection matrices from poses & calibration + auto projection_matrices = projectionMatricesFromCameras(cameras); + + // Undistort the measurements, leaving only the pinhole elements in effect. + auto undistortedMeasurements = + undistortMeasurements(cameras, measurements); + + point = + triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + } + + // Then refine using non-linear optimization + if (optimize) { point = triangulateNonlinear(cameras, measurements, point, model); + } #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION // verify that the triangulated point lies in front of all cameras - for(const CAMERA& camera: cameras) { + for (const CAMERA& camera : cameras) { const Point3& p_local = camera.pose().transformTo(point); - if (p_local.z() <= 0) - throw(TriangulationCheiralityException()); + if (p_local.z() <= 0) throw(TriangulationCheiralityException()); } #endif @@ -433,14 +543,14 @@ Point3 triangulatePoint3( } /// Pinhole-specific version -template -Point3 triangulatePoint3( - const CameraSet >& cameras, - const Point2Vector& measurements, double rank_tol = 1e-9, - bool optimize = false, - const SharedNoiseModel& model = nullptr) { - return triangulatePoint3 > // - (cameras, measurements, rank_tol, optimize, model); +template +Point3 triangulatePoint3(const CameraSet>& cameras, + const Point2Vector& measurements, + double rank_tol = 1e-9, bool optimize = false, + const SharedNoiseModel& model = nullptr, + const bool useLOST = false) { + return triangulatePoint3> // + (cameras, measurements, rank_tol, optimize, model, useLOST); } struct GTSAM_EXPORT TriangulationParameters { @@ -510,18 +620,18 @@ private: }; /** - * TriangulationResult is an optional point, along with the reasons why it is invalid. + * TriangulationResult is an optional point, along with the reasons why it is + * invalid. */ -class TriangulationResult: public boost::optional { - enum Status { - VALID, DEGENERATE, BEHIND_CAMERA, OUTLIER, FAR_POINT - }; - Status status_; - TriangulationResult(Status s) : - status_(s) { - } -public: +class TriangulationResult : public boost::optional { + public: + enum Status { VALID, DEGENERATE, BEHIND_CAMERA, OUTLIER, FAR_POINT }; + Status status; + private: + TriangulationResult(Status s) : status(s) {} + + public: /** * Default constructor, only for serialization */ @@ -530,54 +640,38 @@ public: /** * Constructor */ - TriangulationResult(const Point3& p) : - status_(VALID) { - reset(p); - } + TriangulationResult(const Point3& p) : status(VALID) { reset(p); } static TriangulationResult Degenerate() { return TriangulationResult(DEGENERATE); } - static TriangulationResult Outlier() { - return TriangulationResult(OUTLIER); - } + static TriangulationResult Outlier() { return TriangulationResult(OUTLIER); } static TriangulationResult FarPoint() { return TriangulationResult(FAR_POINT); } static TriangulationResult BehindCamera() { return TriangulationResult(BEHIND_CAMERA); } - bool valid() const { - return status_ == VALID; - } - bool degenerate() const { - return status_ == DEGENERATE; - } - bool outlier() const { - return status_ == OUTLIER; - } - bool farPoint() const { - return status_ == FAR_POINT; - } - bool behindCamera() const { - return status_ == BEHIND_CAMERA; - } + bool valid() const { return status == VALID; } + bool degenerate() const { return status == DEGENERATE; } + bool outlier() const { return status == OUTLIER; } + bool farPoint() const { return status == FAR_POINT; } + bool behindCamera() const { return status == BEHIND_CAMERA; } // stream to output - friend std::ostream &operator<<(std::ostream &os, - const TriangulationResult& result) { + friend std::ostream& operator<<(std::ostream& os, + const TriangulationResult& result) { if (result) os << "point = " << *result << std::endl; else - os << "no point, status = " << result.status_ << std::endl; + os << "no point, status = " << result.status << std::endl; return os; } -private: - + private: /// Serialization function friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int version) { - ar & BOOST_SERIALIZATION_NVP(status_); + template + void serialize(ARCHIVE& ar, const unsigned int version) { + ar& BOOST_SERIALIZATION_NVP(status); } }; @@ -644,6 +738,7 @@ TriangulationResult triangulateSafe(const CameraSet& cameras, // Vector of Cameras - used by the Python/MATLAB wrapper using CameraSetCal3Bundler = CameraSet>; using CameraSetCal3_S2 = CameraSet>; +using CameraSetCal3DS2 = CameraSet>; using CameraSetCal3Fisheye = CameraSet>; using CameraSetCal3Unified = CameraSet>; using CameraSetSpherical = CameraSet; diff --git a/gtsam/hybrid/CMakeLists.txt b/gtsam/hybrid/CMakeLists.txt new file mode 100644 index 000000000..f1cfcd5c4 --- /dev/null +++ b/gtsam/hybrid/CMakeLists.txt @@ -0,0 +1,8 @@ +# Install headers +set(subdir hybrid) +file(GLOB hybrid_headers "*.h") +# FIXME: exclude headers +install(FILES ${hybrid_headers} DESTINATION include/gtsam/hybrid) + +# Add all tests +add_subdirectory(tests) diff --git a/gtsam/hybrid/GaussianMixture.cpp b/gtsam/hybrid/GaussianMixture.cpp new file mode 100644 index 000000000..b04800d21 --- /dev/null +++ b/gtsam/hybrid/GaussianMixture.cpp @@ -0,0 +1,129 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussianMixture.cpp + * @brief A hybrid conditional in the Conditional Linear Gaussian scheme + * @author Fan Jiang + * @author Varun Agrawal + * @author Frank Dellaert + * @date Mar 12, 2022 + */ + +#include +#include +#include +#include +#include + +namespace gtsam { + +GaussianMixture::GaussianMixture( + const KeyVector &continuousFrontals, const KeyVector &continuousParents, + const DiscreteKeys &discreteParents, + const GaussianMixture::Conditionals &conditionals) + : BaseFactor(CollectKeys(continuousFrontals, continuousParents), + discreteParents), + BaseConditional(continuousFrontals.size()), + conditionals_(conditionals) {} + +/* *******************************************************************************/ +const GaussianMixture::Conditionals &GaussianMixture::conditionals() { + return conditionals_; +} + +/* *******************************************************************************/ +GaussianMixture GaussianMixture::FromConditionals( + const KeyVector &continuousFrontals, const KeyVector &continuousParents, + const DiscreteKeys &discreteParents, + const std::vector &conditionalsList) { + Conditionals dt(discreteParents, conditionalsList); + + return GaussianMixture(continuousFrontals, continuousParents, discreteParents, + dt); +} + +/* *******************************************************************************/ +GaussianMixture::Sum GaussianMixture::add( + const GaussianMixture::Sum &sum) const { + using Y = GaussianFactorGraph; + auto add = [](const Y &graph1, const Y &graph2) { + auto result = graph1; + result.push_back(graph2); + return result; + }; + const Sum tree = asGaussianFactorGraphTree(); + return sum.empty() ? tree : sum.apply(tree, add); +} + +/* *******************************************************************************/ +GaussianMixture::Sum GaussianMixture::asGaussianFactorGraphTree() const { + auto lambda = [](const GaussianFactor::shared_ptr &factor) { + GaussianFactorGraph result; + result.push_back(factor); + return result; + }; + return {conditionals_, lambda}; +} + +/* *******************************************************************************/ +size_t GaussianMixture::nrComponents() const { + size_t total = 0; + conditionals_.visit([&total](const GaussianFactor::shared_ptr &node) { + if (node) total += 1; + }); + return total; +} + +/* *******************************************************************************/ +GaussianConditional::shared_ptr GaussianMixture::operator()( + const DiscreteValues &discreteVals) const { + auto &ptr = conditionals_(discreteVals); + if (!ptr) return nullptr; + auto conditional = boost::dynamic_pointer_cast(ptr); + if (conditional) + return conditional; + else + throw std::logic_error( + "A GaussianMixture unexpectedly contained a non-conditional"); +} + +/* *******************************************************************************/ +bool GaussianMixture::equals(const HybridFactor &lf, double tol) const { + const This *e = dynamic_cast(&lf); + return e != nullptr && BaseFactor::equals(*e, tol); +} + +/* *******************************************************************************/ +void GaussianMixture::print(const std::string &s, + const KeyFormatter &formatter) const { + std::cout << s; + if (isContinuous()) std::cout << "Continuous "; + if (isDiscrete()) std::cout << "Discrete "; + if (isHybrid()) std::cout << "Hybrid "; + BaseConditional::print("", formatter); + std::cout << " Discrete Keys = "; + for (auto &dk : discreteKeys()) { + std::cout << "(" << formatter(dk.first) << ", " << dk.second << "), "; + } + std::cout << "\n"; + conditionals_.print( + "", [&](Key k) { return formatter(k); }, + [&](const GaussianConditional::shared_ptr &gf) -> std::string { + RedirectCout rd; + if (!gf->empty()) + gf->print("", formatter); + else + return {"nullptr"}; + return rd.str(); + }); +} +} // namespace gtsam diff --git a/gtsam/hybrid/GaussianMixture.h b/gtsam/hybrid/GaussianMixture.h new file mode 100644 index 000000000..fc1eb0f06 --- /dev/null +++ b/gtsam/hybrid/GaussianMixture.h @@ -0,0 +1,145 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussianMixture.h + * @brief A hybrid conditional in the Conditional Linear Gaussian scheme + * @author Fan Jiang + * @author Varun Agrawal + * @date Mar 12, 2022 + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +/** + * @brief A conditional of gaussian mixtures indexed by discrete variables, as + * part of a Bayes Network. + * + * Represents the conditional density P(X | M, Z) where X is a continuous random + * variable, M is the selection of discrete variables corresponding to a subset + * of the Gaussian variables and Z is parent of this node + * + * The probability P(x|y,z,...) is proportional to + * \f$ \sum_i k_i \exp - \frac{1}{2} |R_i x - (d_i - S_i y - T_i z - ...)|^2 \f$ + * where i indexes the components and k_i is a component-wise normalization + * constant. + * + */ +class GTSAM_EXPORT GaussianMixture + : public HybridFactor, + public Conditional { + public: + using This = GaussianMixture; + using shared_ptr = boost::shared_ptr; + using BaseFactor = HybridFactor; + using BaseConditional = Conditional; + + /// Alias for DecisionTree of GaussianFactorGraphs + using Sum = DecisionTree; + + /// typedef for Decision Tree of Gaussian Conditionals + using Conditionals = DecisionTree; + + private: + Conditionals conditionals_; + + /** + * @brief Convert a DecisionTree of factors into a DT of Gaussian FGs. + */ + Sum asGaussianFactorGraphTree() const; + + public: + /// @name Constructors + /// @{ + + /// Defaut constructor, mainly for serialization. + GaussianMixture() = default; + + /** + * @brief Construct a new GaussianMixture object. + * + * @param continuousFrontals the continuous frontals. + * @param continuousParents the continuous parents. + * @param discreteParents the discrete parents. Will be placed last. + * @param conditionals a decision tree of GaussianConditionals. The number of + * conditionals should be C^(number of discrete parents), where C is the + * cardinality of the DiscreteKeys in discreteParents, since the + * discreteParents will be used as the labels in the decision tree. + */ + GaussianMixture(const KeyVector &continuousFrontals, + const KeyVector &continuousParents, + const DiscreteKeys &discreteParents, + const Conditionals &conditionals); + + /** + * @brief Make a Gaussian Mixture from a list of Gaussian conditionals + * + * @param continuousFrontals The continuous frontal variables + * @param continuousParents The continuous parent variables + * @param discreteParents Discrete parents variables + * @param conditionals List of conditionals + */ + static This FromConditionals( + const KeyVector &continuousFrontals, const KeyVector &continuousParents, + const DiscreteKeys &discreteParents, + const std::vector &conditionals); + + /// @} + /// @name Standard API + /// @{ + + GaussianConditional::shared_ptr operator()( + const DiscreteValues &discreteVals) const; + + /// Returns the total number of continuous components + size_t nrComponents() const; + + /// @} + /// @name Testable + /// @{ + + /// Test equality with base HybridFactor + bool equals(const HybridFactor &lf, double tol = 1e-9) const override; + + /* print utility */ + void print( + const std::string &s = "GaussianMixture\n", + const KeyFormatter &formatter = DefaultKeyFormatter) const override; + + /// @} + + /// Getter for the underlying Conditionals DecisionTree + const Conditionals &conditionals(); + + /** + * @brief Merge the Gaussian Factor Graphs in `this` and `sum` while + * maintaining the decision tree structure. + * + * @param sum Decision Tree of Gaussian Factor Graphs + * @return Sum + */ + Sum add(const Sum &sum) const; +}; + +// traits +template <> +struct traits : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/hybrid/GaussianMixtureFactor.cpp b/gtsam/hybrid/GaussianMixtureFactor.cpp new file mode 100644 index 000000000..8f832d8ea --- /dev/null +++ b/gtsam/hybrid/GaussianMixtureFactor.cpp @@ -0,0 +1,97 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussianMixtureFactor.cpp + * @brief A set of Gaussian factors indexed by a set of discrete keys. + * @author Fan Jiang + * @author Varun Agrawal + * @author Frank Dellaert + * @date Mar 12, 2022 + */ + +#include +#include +#include +#include +#include + +namespace gtsam { + +/* *******************************************************************************/ +GaussianMixtureFactor::GaussianMixtureFactor(const KeyVector &continuousKeys, + const DiscreteKeys &discreteKeys, + const Factors &factors) + : Base(continuousKeys, discreteKeys), factors_(factors) {} + +/* *******************************************************************************/ +bool GaussianMixtureFactor::equals(const HybridFactor &lf, double tol) const { + const This *e = dynamic_cast(&lf); + return e != nullptr && Base::equals(*e, tol); +} + +/* *******************************************************************************/ +GaussianMixtureFactor GaussianMixtureFactor::FromFactors( + const KeyVector &continuousKeys, const DiscreteKeys &discreteKeys, + const std::vector &factors) { + Factors dt(discreteKeys, factors); + + return GaussianMixtureFactor(continuousKeys, discreteKeys, dt); +} + +/* *******************************************************************************/ +void GaussianMixtureFactor::print(const std::string &s, + const KeyFormatter &formatter) const { + HybridFactor::print(s, formatter); + std::cout << "]{\n"; + factors_.print( + "", [&](Key k) { return formatter(k); }, + [&](const GaussianFactor::shared_ptr &gf) -> std::string { + RedirectCout rd; + std::cout << ":\n"; + if (gf) + gf->print("", formatter); + else + return {"nullptr"}; + return rd.str(); + }); + std::cout << "}" << std::endl; +} + +/* *******************************************************************************/ +const GaussianMixtureFactor::Factors &GaussianMixtureFactor::factors() { + return factors_; +} + +/* *******************************************************************************/ +GaussianMixtureFactor::Sum GaussianMixtureFactor::add( + const GaussianMixtureFactor::Sum &sum) const { + using Y = GaussianFactorGraph; + auto add = [](const Y &graph1, const Y &graph2) { + auto result = graph1; + result.push_back(graph2); + return result; + }; + const Sum tree = asGaussianFactorGraphTree(); + return sum.empty() ? tree : sum.apply(tree, add); +} + +/* *******************************************************************************/ +GaussianMixtureFactor::Sum GaussianMixtureFactor::asGaussianFactorGraphTree() + const { + auto wrap = [](const GaussianFactor::shared_ptr &factor) { + GaussianFactorGraph result; + result.push_back(factor); + return result; + }; + return {factors_, wrap}; +} +} // namespace gtsam diff --git a/gtsam/hybrid/GaussianMixtureFactor.h b/gtsam/hybrid/GaussianMixtureFactor.h new file mode 100644 index 000000000..6c90ee6a7 --- /dev/null +++ b/gtsam/hybrid/GaussianMixtureFactor.h @@ -0,0 +1,140 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussianMixtureFactor.h + * @brief A set of GaussianFactors, indexed by a set of discrete keys. + * @author Fan Jiang + * @author Varun Agrawal + * @author Frank Dellaert + * @date Mar 12, 2022 + */ + +#pragma once + +#include +#include +#include +#include + +namespace gtsam { + +class GaussianFactorGraph; + +using GaussianFactorVector = std::vector; + +/** + * @brief Implementation of a discrete conditional mixture factor. + * Implements a joint discrete-continuous factor where the discrete variable + * serves to "select" a mixture component corresponding to a GaussianFactor type + * of measurement. + * + * Represents the underlying Gaussian Mixture as a Decision Tree, where the set + * of discrete variables indexes to the continuous gaussian distribution. + * + */ +class GTSAM_EXPORT GaussianMixtureFactor : public HybridFactor { + public: + using Base = HybridFactor; + using This = GaussianMixtureFactor; + using shared_ptr = boost::shared_ptr; + + using Sum = DecisionTree; + + /// typedef for Decision Tree of Gaussian Factors + using Factors = DecisionTree; + + private: + /// Decision tree of Gaussian factors indexed by discrete keys. + Factors factors_; + + /** + * @brief Helper function to return factors and functional to create a + * DecisionTree of Gaussian Factor Graphs. + * + * @return Sum (DecisionTree) + */ + Sum asGaussianFactorGraphTree() const; + + public: + /// @name Constructors + /// @{ + + /// Default constructor, mainly for serialization. + GaussianMixtureFactor() = default; + + /** + * @brief Construct a new Gaussian Mixture Factor object. + * + * @param continuousKeys A vector of keys representing continuous variables. + * @param discreteKeys A vector of keys representing discrete variables and + * their cardinalities. + * @param factors The decision tree of Gaussian Factors stored as the mixture + * density. + */ + GaussianMixtureFactor(const KeyVector &continuousKeys, + const DiscreteKeys &discreteKeys, + const Factors &factors); + + /** + * @brief Construct a new GaussianMixtureFactor object using a vector of + * GaussianFactor shared pointers. + * + * @param keys Vector of keys for continuous factors. + * @param discreteKeys Vector of discrete keys. + * @param factors Vector of gaussian factor shared pointers. + */ + GaussianMixtureFactor(const KeyVector &keys, const DiscreteKeys &discreteKeys, + const std::vector &factors) + : GaussianMixtureFactor(keys, discreteKeys, + Factors(discreteKeys, factors)) {} + + static This FromFactors( + const KeyVector &continuousKeys, const DiscreteKeys &discreteKeys, + const std::vector &factors); + + /// @} + /// @name Testable + /// @{ + + bool equals(const HybridFactor &lf, double tol = 1e-9) const override; + + void print( + const std::string &s = "HybridFactor\n", + const KeyFormatter &formatter = DefaultKeyFormatter) const override; + /// @} + + /// Getter for the underlying Gaussian Factor Decision Tree. + const Factors &factors(); + + /** + * @brief Combine the Gaussian Factor Graphs in `sum` and `this` while + * maintaining the original tree structure. + * + * @param sum Decision Tree of Gaussian Factor Graphs indexed by the + * variables. + * @return Sum + */ + Sum add(const Sum &sum) const; + + /// Add MixtureFactor to a Sum, syntactic sugar. + friend Sum &operator+=(Sum &sum, const GaussianMixtureFactor &factor) { + sum = factor.add(sum); + return sum; + } +}; + +// traits +template <> +struct traits : public Testable { +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp new file mode 100644 index 000000000..1292711d8 --- /dev/null +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -0,0 +1,16 @@ +/* ---------------------------------------------------------------------------- + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + * See LICENSE for the license information + * -------------------------------------------------------------------------- */ + +/** + * @file HybridBayesNet.cpp + * @brief A bayes net of Gaussian Conditionals indexed by discrete keys. + * @author Fan Jiang + * @date January 2022 + */ + +#include diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h new file mode 100644 index 000000000..43eead280 --- /dev/null +++ b/gtsam/hybrid/HybridBayesNet.h @@ -0,0 +1,41 @@ +/* ---------------------------------------------------------------------------- + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + * See LICENSE for the license information + * -------------------------------------------------------------------------- */ + +/** + * @file HybridBayesNet.h + * @brief A bayes net of Gaussian Conditionals indexed by discrete keys. + * @author Varun Agrawal + * @author Fan Jiang + * @author Frank Dellaert + * @date December 2021 + */ + +#pragma once + +#include +#include + +namespace gtsam { + +/** + * A hybrid Bayes net is a collection of HybridConditionals, which can have + * discrete conditionals, Gaussian mixtures, or pure Gaussian conditionals. + */ +class GTSAM_EXPORT HybridBayesNet : public BayesNet { + public: + using Base = BayesNet; + using This = HybridBayesNet; + using ConditionalType = HybridConditional; + using shared_ptr = boost::shared_ptr; + using sharedConditional = boost::shared_ptr; + + /** Construct empty bayes net */ + HybridBayesNet() = default; +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridBayesTree.cpp b/gtsam/hybrid/HybridBayesTree.cpp new file mode 100644 index 000000000..d65270f91 --- /dev/null +++ b/gtsam/hybrid/HybridBayesTree.cpp @@ -0,0 +1,38 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridBayesTree.cpp + * @brief Hybrid Bayes Tree, the result of eliminating a + * HybridJunctionTree + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#include +#include +#include +#include +#include + +namespace gtsam { + +// Instantiate base class +template class BayesTreeCliqueBase; +template class BayesTree; + +/* ************************************************************************* */ +bool HybridBayesTree::equals(const This& other, double tol) const { + return Base::equals(other, tol); +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridBayesTree.h b/gtsam/hybrid/HybridBayesTree.h new file mode 100644 index 000000000..0b89ca8c4 --- /dev/null +++ b/gtsam/hybrid/HybridBayesTree.h @@ -0,0 +1,117 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridBayesTree.h + * @brief Hybrid Bayes Tree, the result of eliminating a + * HybridJunctionTree + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace gtsam { + +// Forward declarations +class HybridConditional; +class VectorValues; + +/* ************************************************************************* */ +/** A clique in a HybridBayesTree + * which is a HybridConditional internally. + */ +class GTSAM_EXPORT HybridBayesTreeClique + : public BayesTreeCliqueBase { + public: + typedef HybridBayesTreeClique This; + typedef BayesTreeCliqueBase + Base; + typedef boost::shared_ptr shared_ptr; + typedef boost::weak_ptr weak_ptr; + HybridBayesTreeClique() {} + virtual ~HybridBayesTreeClique() {} + HybridBayesTreeClique(const boost::shared_ptr& conditional) + : Base(conditional) {} +}; + +/* ************************************************************************* */ +/** A Bayes tree representing a Hybrid density */ +class GTSAM_EXPORT HybridBayesTree : public BayesTree { + private: + typedef BayesTree Base; + + public: + typedef HybridBayesTree This; + typedef boost::shared_ptr shared_ptr; + + /// @name Standard interface + /// @{ + /** Default constructor, creates an empty Bayes tree */ + HybridBayesTree() = default; + + /** Check equality */ + bool equals(const This& other, double tol = 1e-9) const; + + /// @} +}; + +/** + * @brief Class for Hybrid Bayes tree orphan subtrees. + * + * This does special stuff for the hybrid case + * + * @tparam CLIQUE + */ +template +class BayesTreeOrphanWrapper< + CLIQUE, typename std::enable_if< + boost::is_same::value> > + : public CLIQUE::ConditionalType { + public: + typedef CLIQUE CliqueType; + typedef typename CLIQUE::ConditionalType Base; + + boost::shared_ptr clique; + + /** + * @brief Construct a new Bayes Tree Orphan Wrapper object. + * + * @param clique Bayes tree clique. + */ + BayesTreeOrphanWrapper(const boost::shared_ptr& clique) + : clique(clique) { + // Store parent keys in our base type factor so that eliminating those + // parent keys will pull this subtree into the elimination. + this->keys_.assign(clique->conditional()->beginParents(), + clique->conditional()->endParents()); + this->discreteKeys_.assign(clique->conditional()->discreteKeys().begin(), + clique->conditional()->discreteKeys().end()); + } + + /// print utility + void print( + const std::string& s = "", + const KeyFormatter& formatter = DefaultKeyFormatter) const override { + clique->print(s + "stored clique", formatter); + } +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp new file mode 100644 index 000000000..8e071532d --- /dev/null +++ b/gtsam/hybrid/HybridConditional.cpp @@ -0,0 +1,108 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridConditional.cpp + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#include +#include +#include +#include + +namespace gtsam { + +/* ************************************************************************ */ +HybridConditional::HybridConditional(const KeyVector &continuousFrontals, + const DiscreteKeys &discreteFrontals, + const KeyVector &continuousParents, + const DiscreteKeys &discreteParents) + : HybridConditional( + CollectKeys( + {continuousFrontals.begin(), continuousFrontals.end()}, + KeyVector{continuousParents.begin(), continuousParents.end()}), + CollectDiscreteKeys( + {discreteFrontals.begin(), discreteFrontals.end()}, + {discreteParents.begin(), discreteParents.end()}), + continuousFrontals.size() + discreteFrontals.size()) {} + +/* ************************************************************************ */ +HybridConditional::HybridConditional( + boost::shared_ptr continuousConditional) + : HybridConditional(continuousConditional->keys(), {}, + continuousConditional->nrFrontals()) { + inner_ = continuousConditional; +} + +/* ************************************************************************ */ +HybridConditional::HybridConditional( + boost::shared_ptr discreteConditional) + : HybridConditional({}, discreteConditional->discreteKeys(), + discreteConditional->nrFrontals()) { + inner_ = discreteConditional; +} + +/* ************************************************************************ */ +HybridConditional::HybridConditional( + boost::shared_ptr gaussianMixture) + : BaseFactor(KeyVector(gaussianMixture->keys().begin(), + gaussianMixture->keys().begin() + + gaussianMixture->nrContinuous()), + gaussianMixture->discreteKeys()), + BaseConditional(gaussianMixture->nrFrontals()) { + inner_ = gaussianMixture; +} + +/* ************************************************************************ */ +void HybridConditional::print(const std::string &s, + const KeyFormatter &formatter) const { + std::cout << s; + + if (inner_) { + inner_->print("", formatter); + + } else { + if (isContinuous()) std::cout << "Continuous "; + if (isDiscrete()) std::cout << "Discrete "; + if (isHybrid()) std::cout << "Hybrid "; + BaseConditional::print("", formatter); + + std::cout << "P("; + size_t index = 0; + const size_t N = keys().size(); + const size_t contN = N - discreteKeys_.size(); + while (index < N) { + if (index > 0) { + if (index == nrFrontals_) + std::cout << " | "; + else + std::cout << ", "; + } + if (index < contN) { + std::cout << formatter(keys()[index]); + } else { + auto &dk = discreteKeys_[index - contN]; + std::cout << "(" << formatter(dk.first) << ", " << dk.second << ")"; + } + index++; + } + } +} + +/* ************************************************************************ */ +bool HybridConditional::equals(const HybridFactor &other, double tol) const { + const This *e = dynamic_cast(&other); + return e != nullptr && BaseFactor::equals(*e, tol); +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridConditional.h b/gtsam/hybrid/HybridConditional.h new file mode 100644 index 000000000..3ba5da393 --- /dev/null +++ b/gtsam/hybrid/HybridConditional.h @@ -0,0 +1,177 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridConditional.h + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +class HybridGaussianFactorGraph; + +/** + * Hybrid Conditional Density + * + * As a type-erased variant of: + * - DiscreteConditional + * - GaussianConditional + * - GaussianMixture + * + * The reason why this is important is that `Conditional` is a CRTP class. + * CRTP is static polymorphism such that all CRTP classes, while bearing the + * same name, are different classes not sharing a vtable. This prevents them + * from being contained in any container, and thus it is impossible to + * dynamically cast between them. A better option, as illustrated here, is + * treating them as an implementation detail - such that the hybrid mechanism + * does not know what is inside the HybridConditional. This prevents us from + * having diamond inheritances, and neutralized the need to change other + * components of GTSAM to make hybrid elimination work. + * + * A great reference to the type-erasure pattern is Eduaado Madrid's CppCon + * talk (https://www.youtube.com/watch?v=s082Qmd_nHs). + */ +class GTSAM_EXPORT HybridConditional + : public HybridFactor, + public Conditional { + public: + // typedefs needed to play nice with gtsam + typedef HybridConditional This; ///< Typedef to this class + typedef boost::shared_ptr shared_ptr; ///< shared_ptr to this class + typedef HybridFactor BaseFactor; ///< Typedef to our factor base class + typedef Conditional + BaseConditional; ///< Typedef to our conditional base class + + protected: + // Type-erased pointer to the inner type + boost::shared_ptr inner_; + + public: + /// @name Standard Constructors + /// @{ + + /// Default constructor needed for serialization. + HybridConditional() = default; + + /** + * @brief Construct a new Hybrid Conditional object + * + * @param continuousKeys Vector of keys for continuous variables. + * @param discreteKeys Keys and cardinalities for discrete variables. + * @param nFrontals The number of frontal variables in the conditional. + */ + HybridConditional(const KeyVector& continuousKeys, + const DiscreteKeys& discreteKeys, size_t nFrontals) + : BaseFactor(continuousKeys, discreteKeys), BaseConditional(nFrontals) {} + + /** + * @brief Construct a new Hybrid Conditional object + * + * @param continuousFrontals Vector of keys for continuous variables. + * @param discreteFrontals Keys and cardinalities for discrete variables. + * @param continuousParents Vector of keys for parent continuous variables. + * @param discreteParents Keys and cardinalities for parent discrete + * variables. + */ + HybridConditional(const KeyVector& continuousFrontals, + const DiscreteKeys& discreteFrontals, + const KeyVector& continuousParents, + const DiscreteKeys& discreteParents); + + /** + * @brief Construct a new Hybrid Conditional object + * + * @param continuousConditional Conditional used to create the + * HybridConditional. + */ + HybridConditional( + boost::shared_ptr continuousConditional); + + /** + * @brief Construct a new Hybrid Conditional object + * + * @param discreteConditional Conditional used to create the + * HybridConditional. + */ + HybridConditional(boost::shared_ptr discreteConditional); + + /** + * @brief Construct a new Hybrid Conditional object + * + * @param gaussianMixture Gaussian Mixture Conditional used to create the + * HybridConditional. + */ + HybridConditional( + boost::shared_ptr gaussianMixture); + + /** + * @brief Return HybridConditional as a GaussianMixture + * + * @return GaussianMixture::shared_ptr + */ + GaussianMixture::shared_ptr asMixture() { + if (!isHybrid()) throw std::invalid_argument("Not a mixture"); + return boost::static_pointer_cast(inner_); + } + + /** + * @brief Return conditional as a DiscreteConditional + * + * @return DiscreteConditional::shared_ptr + */ + DiscreteConditional::shared_ptr asDiscreteConditional() { + if (!isDiscrete()) + throw std::invalid_argument("Not a discrete conditional"); + return boost::static_pointer_cast(inner_); + } + + /// @} + /// @name Testable + /// @{ + + /// GTSAM-style print + void print( + const std::string& s = "Hybrid Conditional: ", + const KeyFormatter& formatter = DefaultKeyFormatter) const override; + + /// GTSAM-style equals + bool equals(const HybridFactor& other, double tol = 1e-9) const override; + + /// @} + + /// Get the type-erased pointer to the inner type + boost::shared_ptr inner() { return inner_; } + +}; // DiscreteConditional + +// traits +template <> +struct traits : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridDiscreteFactor.cpp b/gtsam/hybrid/HybridDiscreteFactor.cpp new file mode 100644 index 000000000..0455e1e90 --- /dev/null +++ b/gtsam/hybrid/HybridDiscreteFactor.cpp @@ -0,0 +1,53 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridDiscreteFactor.cpp + * @brief Wrapper for a discrete factor + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#include + +#include + +#include "gtsam/discrete/DecisionTreeFactor.h" + +namespace gtsam { + +/* ************************************************************************ */ +// TODO(fan): THIS IS VERY VERY DIRTY! We need to get DiscreteFactor right! +HybridDiscreteFactor::HybridDiscreteFactor(DiscreteFactor::shared_ptr other) + : Base(boost::dynamic_pointer_cast(other) + ->discreteKeys()), + inner_(other) {} + +/* ************************************************************************ */ +HybridDiscreteFactor::HybridDiscreteFactor(DecisionTreeFactor &&dtf) + : Base(dtf.discreteKeys()), + inner_(boost::make_shared(std::move(dtf))) {} + +/* ************************************************************************ */ +bool HybridDiscreteFactor::equals(const HybridFactor &lf, double tol) const { + const This *e = dynamic_cast(&lf); + // TODO(Varun) How to compare inner_ when they are abstract types? + return e != nullptr && Base::equals(*e, tol); +} + +/* ************************************************************************ */ +void HybridDiscreteFactor::print(const std::string &s, + const KeyFormatter &formatter) const { + HybridFactor::print(s, formatter); + inner_->print("\n", formatter); +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridDiscreteFactor.h b/gtsam/hybrid/HybridDiscreteFactor.h new file mode 100644 index 000000000..9cbea8170 --- /dev/null +++ b/gtsam/hybrid/HybridDiscreteFactor.h @@ -0,0 +1,69 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridDiscreteFactor.h + * @date Mar 11, 2022 + * @author Fan Jiang + * @author Varun Agrawal + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +/** + * A HybridDiscreteFactor is a thin container for DiscreteFactor, which allows + * us to hide the implementation of DiscreteFactor and thus avoid diamond + * inheritance. + */ +class GTSAM_EXPORT HybridDiscreteFactor : public HybridFactor { + private: + DiscreteFactor::shared_ptr inner_; + + public: + using Base = HybridFactor; + using This = HybridDiscreteFactor; + using shared_ptr = boost::shared_ptr; + + /// @name Constructors + /// @{ + + // Implicit conversion from a shared ptr of DF + HybridDiscreteFactor(DiscreteFactor::shared_ptr other); + + // Forwarding constructor from concrete DecisionTreeFactor + HybridDiscreteFactor(DecisionTreeFactor &&dtf); + + /// @} + /// @name Testable + /// @{ + virtual bool equals(const HybridFactor &lf, double tol) const override; + + void print( + const std::string &s = "HybridFactor\n", + const KeyFormatter &formatter = DefaultKeyFormatter) const override; + + /// @} + + /// Return pointer to the internal discrete factor + DiscreteFactor::shared_ptr inner() const { return inner_; } +}; + +// traits +template <> +struct traits : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridEliminationTree.cpp b/gtsam/hybrid/HybridEliminationTree.cpp new file mode 100644 index 000000000..c2df2dd60 --- /dev/null +++ b/gtsam/hybrid/HybridEliminationTree.cpp @@ -0,0 +1,42 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridEliminationTree.cpp + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#include +#include + +namespace gtsam { + +// Instantiate base class +template class EliminationTree; + +/* ************************************************************************* */ +HybridEliminationTree::HybridEliminationTree( + const HybridGaussianFactorGraph& factorGraph, + const VariableIndex& structure, const Ordering& order) + : Base(factorGraph, structure, order) {} + +/* ************************************************************************* */ +HybridEliminationTree::HybridEliminationTree( + const HybridGaussianFactorGraph& factorGraph, const Ordering& order) + : Base(factorGraph, order) {} + +/* ************************************************************************* */ +bool HybridEliminationTree::equals(const This& other, double tol) const { + return Base::equals(other, tol); +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridEliminationTree.h b/gtsam/hybrid/HybridEliminationTree.h new file mode 100644 index 000000000..77a84fea8 --- /dev/null +++ b/gtsam/hybrid/HybridEliminationTree.h @@ -0,0 +1,69 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridEliminationTree.h + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +/** + * Elimination Tree type for Hybrid + */ +class GTSAM_EXPORT HybridEliminationTree + : public EliminationTree { + private: + friend class ::EliminationTreeTester; + + public: + typedef EliminationTree + Base; ///< Base class + typedef HybridEliminationTree This; ///< This class + typedef boost::shared_ptr shared_ptr; ///< Shared pointer to this class + + /// @name Constructors + /// @{ + + /** + * Build the elimination tree of a factor graph using pre-computed column + * structure. + * @param factorGraph The factor graph for which to build the elimination tree + * @param structure The set of factors involving each variable. If this is + * not precomputed, you can call the Create(const FactorGraph&) + * named constructor instead. + * @return The elimination tree + */ + HybridEliminationTree(const HybridGaussianFactorGraph& factorGraph, + const VariableIndex& structure, const Ordering& order); + + /** Build the elimination tree of a factor graph. Note that this has to + * compute the column structure as a VariableIndex, so if you already have + * this precomputed, use the other constructor instead. + * @param factorGraph The factor graph for which to build the elimination tree + */ + HybridEliminationTree(const HybridGaussianFactorGraph& factorGraph, + const Ordering& order); + + /// @} + + /** Test whether the tree is equal to another */ + bool equals(const This& other, double tol = 1e-9) const; +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridFactor.cpp b/gtsam/hybrid/HybridFactor.cpp new file mode 100644 index 000000000..8df2d524f --- /dev/null +++ b/gtsam/hybrid/HybridFactor.cpp @@ -0,0 +1,104 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridFactor.cpp + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#include + +namespace gtsam { + +/* ************************************************************************ */ +KeyVector CollectKeys(const KeyVector &continuousKeys, + const DiscreteKeys &discreteKeys) { + KeyVector allKeys; + std::copy(continuousKeys.begin(), continuousKeys.end(), + std::back_inserter(allKeys)); + std::transform(discreteKeys.begin(), discreteKeys.end(), + std::back_inserter(allKeys), + [](const DiscreteKey &k) { return k.first; }); + return allKeys; +} + +/* ************************************************************************ */ +KeyVector CollectKeys(const KeyVector &keys1, const KeyVector &keys2) { + KeyVector allKeys; + std::copy(keys1.begin(), keys1.end(), std::back_inserter(allKeys)); + std::copy(keys2.begin(), keys2.end(), std::back_inserter(allKeys)); + return allKeys; +} + +/* ************************************************************************ */ +DiscreteKeys CollectDiscreteKeys(const DiscreteKeys &key1, + const DiscreteKeys &key2) { + DiscreteKeys allKeys; + std::copy(key1.begin(), key1.end(), std::back_inserter(allKeys)); + std::copy(key2.begin(), key2.end(), std::back_inserter(allKeys)); + return allKeys; +} + +/* ************************************************************************ */ +HybridFactor::HybridFactor(const KeyVector &keys) + : Base(keys), + isContinuous_(true), + nrContinuous_(keys.size()), + continuousKeys_(keys) {} + +/* ************************************************************************ */ +HybridFactor::HybridFactor(const KeyVector &continuousKeys, + const DiscreteKeys &discreteKeys) + : Base(CollectKeys(continuousKeys, discreteKeys)), + isDiscrete_((continuousKeys.size() == 0) && (discreteKeys.size() != 0)), + isContinuous_((continuousKeys.size() != 0) && (discreteKeys.size() == 0)), + isHybrid_((continuousKeys.size() != 0) && (discreteKeys.size() != 0)), + nrContinuous_(continuousKeys.size()), + discreteKeys_(discreteKeys), + continuousKeys_(continuousKeys) {} + +/* ************************************************************************ */ +HybridFactor::HybridFactor(const DiscreteKeys &discreteKeys) + : Base(CollectKeys({}, discreteKeys)), + isDiscrete_(true), + discreteKeys_(discreteKeys), + continuousKeys_({}) {} + +/* ************************************************************************ */ +bool HybridFactor::equals(const HybridFactor &lf, double tol) const { + const This *e = dynamic_cast(&lf); + return e != nullptr && Base::equals(*e, tol) && + isDiscrete_ == e->isDiscrete_ && isContinuous_ == e->isContinuous_ && + isHybrid_ == e->isHybrid_ && nrContinuous_ == e->nrContinuous_; +} + +/* ************************************************************************ */ +void HybridFactor::print(const std::string &s, + const KeyFormatter &formatter) const { + std::cout << s; + if (isContinuous_) std::cout << "Continuous "; + if (isDiscrete_) std::cout << "Discrete "; + if (isHybrid_) std::cout << "Hybrid "; + for (size_t c=0; c +#include +#include +#include + +#include +#include +namespace gtsam { + +KeyVector CollectKeys(const KeyVector &continuousKeys, + const DiscreteKeys &discreteKeys); +KeyVector CollectKeys(const KeyVector &keys1, const KeyVector &keys2); +DiscreteKeys CollectDiscreteKeys(const DiscreteKeys &key1, + const DiscreteKeys &key2); + +/** + * Base class for hybrid probabilistic factors + * + * Examples: + * - HybridGaussianFactor + * - HybridDiscreteFactor + * - GaussianMixtureFactor + * - GaussianMixture + */ +class GTSAM_EXPORT HybridFactor : public Factor { + private: + bool isDiscrete_ = false; + bool isContinuous_ = false; + bool isHybrid_ = false; + + size_t nrContinuous_ = 0; + + protected: + DiscreteKeys discreteKeys_; + + /// Record continuous keys for book-keeping + KeyVector continuousKeys_; + + public: + // typedefs needed to play nice with gtsam + typedef HybridFactor This; ///< This class + typedef boost::shared_ptr + shared_ptr; ///< shared_ptr to this class + typedef Factor Base; ///< Our base class + + /// @name Standard Constructors + /// @{ + + /** Default constructor creates empty factor */ + HybridFactor() = default; + + /** + * @brief Construct hybrid factor from continuous keys. + * + * @param keys Vector of continuous keys. + */ + explicit HybridFactor(const KeyVector &keys); + + /** + * @brief Construct hybrid factor from discrete keys. + * + * @param keys Vector of discrete keys. + */ + explicit HybridFactor(const DiscreteKeys &discreteKeys); + + /** + * @brief Construct a new Hybrid Factor object. + * + * @param continuousKeys Vector of keys for continuous variables. + * @param discreteKeys Vector of keys for discrete variables. + */ + HybridFactor(const KeyVector &continuousKeys, + const DiscreteKeys &discreteKeys); + + /// Virtual destructor + virtual ~HybridFactor() = default; + + /// @} + /// @name Testable + /// @{ + + /// equals + virtual bool equals(const HybridFactor &lf, double tol = 1e-9) const; + + /// print + void print( + const std::string &s = "HybridFactor\n", + const KeyFormatter &formatter = DefaultKeyFormatter) const override; + + /// @} + /// @name Standard Interface + /// @{ + + /// True if this is a factor of discrete variables only. + bool isDiscrete() const { return isDiscrete_; } + + /// True if this is a factor of continuous variables only. + bool isContinuous() const { return isContinuous_; } + + /// True is this is a Discrete-Continuous factor. + bool isHybrid() const { return isHybrid_; } + + /// Return the number of continuous variables in this factor. + size_t nrContinuous() const { return nrContinuous_; } + + /// Return vector of discrete keys. + DiscreteKeys discreteKeys() const { return discreteKeys_; } + + /// @} +}; +// HybridFactor + +// traits +template <> +struct traits : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp new file mode 100644 index 000000000..4d7490e49 --- /dev/null +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -0,0 +1,47 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianFactor.cpp + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#include + +#include + +namespace gtsam { + +/* ************************************************************************* */ +HybridGaussianFactor::HybridGaussianFactor(GaussianFactor::shared_ptr other) + : Base(other->keys()), inner_(other) {} + +/* ************************************************************************* */ +HybridGaussianFactor::HybridGaussianFactor(JacobianFactor &&jf) + : Base(jf.keys()), + inner_(boost::make_shared(std::move(jf))) {} + +/* ************************************************************************* */ +bool HybridGaussianFactor::equals(const HybridFactor &other, double tol) const { + const This *e = dynamic_cast(&other); + // TODO(Varun) How to compare inner_ when they are abstract types? + return e != nullptr && Base::equals(*e, tol); +} + +/* ************************************************************************* */ +void HybridGaussianFactor::print(const std::string &s, + const KeyFormatter &formatter) const { + HybridFactor::print(s, formatter); + inner_->print("\n", formatter); +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h new file mode 100644 index 000000000..2a92c717c --- /dev/null +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -0,0 +1,67 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianFactor.h + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +/** + * A HybridGaussianFactor is a layer over GaussianFactor so that we do not have + * a diamond inheritance i.e. an extra factor type that inherits from both + * HybridFactor and GaussianFactor. + */ +class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { + private: + GaussianFactor::shared_ptr inner_; + + public: + using Base = HybridFactor; + using This = HybridGaussianFactor; + using shared_ptr = boost::shared_ptr; + + // Explicit conversion from a shared ptr of GF + explicit HybridGaussianFactor(GaussianFactor::shared_ptr other); + + // Forwarding constructor from concrete JacobianFactor + explicit HybridGaussianFactor(JacobianFactor &&jf); + + public: + /// @name Testable + /// @{ + + /// Check equality. + virtual bool equals(const HybridFactor &lf, double tol) const override; + + /// GTSAM print utility. + void print( + const std::string &s = "HybridFactor\n", + const KeyFormatter &formatter = DefaultKeyFormatter) const override; + + /// @} + + GaussianFactor::shared_ptr inner() const { return inner_; } +}; + +// traits +template <> +struct traits : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp new file mode 100644 index 000000000..88730cae9 --- /dev/null +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -0,0 +1,369 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianFactorGraph.cpp + * @brief Hybrid factor graph that uses type erasure + * @author Fan Jiang + * @author Varun Agrawal + * @author Frank Dellaert + * @date Mar 11, 2022 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +template class EliminateableFactorGraph; + +/* ************************************************************************ */ +static GaussianMixtureFactor::Sum &addGaussian( + GaussianMixtureFactor::Sum &sum, const GaussianFactor::shared_ptr &factor) { + using Y = GaussianFactorGraph; + // If the decision tree is not intiialized, then intialize it. + if (sum.empty()) { + GaussianFactorGraph result; + result.push_back(factor); + sum = GaussianMixtureFactor::Sum(result); + + } else { + auto add = [&factor](const Y &graph) { + auto result = graph; + result.push_back(factor); + return result; + }; + sum = sum.apply(add); + } + return sum; +} + +/* ************************************************************************ */ +GaussianMixtureFactor::Sum sumFrontals( + const HybridGaussianFactorGraph &factors) { + // sum out frontals, this is the factor on the separator + gttic(sum); + + GaussianMixtureFactor::Sum sum; + std::vector deferredFactors; + + for (auto &f : factors) { + if (f->isHybrid()) { + if (auto cgmf = boost::dynamic_pointer_cast(f)) { + sum = cgmf->add(sum); + } + + if (auto gm = boost::dynamic_pointer_cast(f)) { + sum = gm->asMixture()->add(sum); + } + + } else if (f->isContinuous()) { + deferredFactors.push_back( + boost::dynamic_pointer_cast(f)->inner()); + } else { + // We need to handle the case where the object is actually an + // BayesTreeOrphanWrapper! + auto orphan = boost::dynamic_pointer_cast< + BayesTreeOrphanWrapper>(f); + if (!orphan) { + auto &fr = *f; + throw std::invalid_argument( + std::string("factor is discrete in continuous elimination") + + typeid(fr).name()); + } + } + } + + for (auto &f : deferredFactors) { + sum = addGaussian(sum, f); + } + + gttoc(sum); + + return sum; +} + +/* ************************************************************************ */ +std::pair +continuousElimination(const HybridGaussianFactorGraph &factors, + const Ordering &frontalKeys) { + GaussianFactorGraph gfg; + for (auto &fp : factors) { + if (auto ptr = boost::dynamic_pointer_cast(fp)) { + gfg.push_back(ptr->inner()); + } else if (auto p = + boost::static_pointer_cast(fp)->inner()) { + gfg.push_back(boost::static_pointer_cast(p)); + } else { + // It is an orphan wrapped conditional + } + } + + auto result = EliminatePreferCholesky(gfg, frontalKeys); + return {boost::make_shared(result.first), + boost::make_shared(result.second)}; +} + +/* ************************************************************************ */ +std::pair +discreteElimination(const HybridGaussianFactorGraph &factors, + const Ordering &frontalKeys) { + DiscreteFactorGraph dfg; + for (auto &fp : factors) { + if (auto ptr = boost::dynamic_pointer_cast(fp)) { + dfg.push_back(ptr->inner()); + } else if (auto p = + boost::static_pointer_cast(fp)->inner()) { + dfg.push_back(boost::static_pointer_cast(p)); + } else { + // It is an orphan wrapper + } + } + + auto result = EliminateDiscrete(dfg, frontalKeys); + + return {boost::make_shared(result.first), + boost::make_shared(result.second)}; +} + +/* ************************************************************************ */ +std::pair +hybridElimination(const HybridGaussianFactorGraph &factors, + const Ordering &frontalKeys, + const KeySet &continuousSeparator, + const std::set &discreteSeparatorSet) { + // NOTE: since we use the special JunctionTree, + // only possiblity is continuous conditioned on discrete. + DiscreteKeys discreteSeparator(discreteSeparatorSet.begin(), + discreteSeparatorSet.end()); + + // sum out frontals, this is the factor on the separator + GaussianMixtureFactor::Sum sum = sumFrontals(factors); + + using EliminationPair = GaussianFactorGraph::EliminationResult; + + KeyVector keysOfEliminated; // Not the ordering + KeyVector keysOfSeparator; // TODO(frank): Is this just (keys - ordering)? + + // This is the elimination method on the leaf nodes + auto eliminate = [&](const GaussianFactorGraph &graph) + -> GaussianFactorGraph::EliminationResult { + if (graph.empty()) { + return {nullptr, nullptr}; + } + auto result = EliminatePreferCholesky(graph, frontalKeys); + if (keysOfEliminated.empty()) { + keysOfEliminated = + result.first->keys(); // Initialize the keysOfEliminated to be the + } + // keysOfEliminated of the GaussianConditional + if (keysOfSeparator.empty()) { + keysOfSeparator = result.second->keys(); + } + return result; + }; + + // Perform elimination! + DecisionTree eliminationResults(sum, eliminate); + + // Separate out decision tree into conditionals and remaining factors. + auto pair = unzip(eliminationResults); + + const GaussianMixtureFactor::Factors &separatorFactors = pair.second; + + // Create the GaussianMixture from the conditionals + auto conditional = boost::make_shared( + frontalKeys, keysOfSeparator, discreteSeparator, pair.first); + + // If there are no more continuous parents, then we should create here a + // DiscreteFactor, with the error for each discrete choice. + if (keysOfSeparator.empty()) { + VectorValues empty_values; + auto factorError = [&](const GaussianFactor::shared_ptr &factor) { + if (!factor) return 0.0; // TODO(fan): does this make sense? + return exp(-factor->error(empty_values)); + }; + DecisionTree fdt(separatorFactors, factorError); + auto discreteFactor = + boost::make_shared(discreteSeparator, fdt); + + return {boost::make_shared(conditional), + boost::make_shared(discreteFactor)}; + + } else { + // Create a resulting DCGaussianMixture on the separator. + auto factor = boost::make_shared( + KeyVector(continuousSeparator.begin(), continuousSeparator.end()), + discreteSeparator, separatorFactors); + return {boost::make_shared(conditional), factor}; + } +} +/* ************************************************************************ */ +std::pair // +EliminateHybrid(const HybridGaussianFactorGraph &factors, + const Ordering &frontalKeys) { + // NOTE: Because we are in the Conditional Gaussian regime there are only + // a few cases: + // 1. continuous variable, make a Gaussian Mixture if there are hybrid + // factors; + // 2. continuous variable, we make a Gaussian Factor if there are no hybrid + // factors; + // 3. discrete variable, no continuous factor is allowed + // (escapes Conditional Gaussian regime), if discrete only we do the discrete + // elimination. + + // However it is not that simple. During elimination it is possible that the + // multifrontal needs to eliminate an ordering that contains both Gaussian and + // hybrid variables, for example x1, c1. + // In this scenario, we will have a density P(x1, c1) that is a Conditional + // Linear Gaussian P(x1|c1)P(c1) (see Murphy02). + + // The issue here is that, how can we know which variable is discrete if we + // unify Values? Obviously we can tell using the factors, but is that fast? + + // In the case of multifrontal, we will need to use a constrained ordering + // so that the discrete parts will be guaranteed to be eliminated last! + // Because of all these reasons, we carefully consider how to + // implement the hybrid factors so that we do not get poor performance. + + // The first thing is how to represent the GaussianMixture. + // A very possible scenario is that the incoming factors will have different + // levels of discrete keys. For example, imagine we are going to eliminate the + // fragment: $\phi(x1,c1,c2)$, $\phi(x1,c2,c3)$, which is perfectly valid. + // Now we will need to know how to retrieve the corresponding continuous + // densities for the assignment (c1,c2,c3) (OR (c2,c3,c1), note there is NO + // defined order!). We also need to consider when there is pruning. Two + // mixture factors could have different pruning patterns - one could have + // (c1=0,c2=1) pruned, and another could have (c2=0,c3=1) pruned, and this + // creates a big problem in how to identify the intersection of non-pruned + // branches. + + // Our approach is first building the collection of all discrete keys. After + // that we enumerate the space of all key combinations *lazily* so that the + // exploration branch terminates whenever an assignment yields NULL in any of + // the hybrid factors. + + // When the number of assignments is large we may encounter stack overflows. + // However this is also the case with iSAM2, so no pressure :) + + // PREPROCESS: Identify the nature of the current elimination + std::unordered_map mapFromKeyToDiscreteKey; + std::set discreteSeparatorSet; + std::set discreteFrontals; + + KeySet separatorKeys; + KeySet allContinuousKeys; + KeySet continuousFrontals; + KeySet continuousSeparator; + + // This initializes separatorKeys and mapFromKeyToDiscreteKey + for (auto &&factor : factors) { + separatorKeys.insert(factor->begin(), factor->end()); + if (!factor->isContinuous()) { + for (auto &k : factor->discreteKeys()) { + mapFromKeyToDiscreteKey[k.first] = k; + } + } + } + + // remove frontals from separator + for (auto &k : frontalKeys) { + separatorKeys.erase(k); + } + + // Fill in discrete frontals and continuous frontals for the end result + for (auto &k : frontalKeys) { + if (mapFromKeyToDiscreteKey.find(k) != mapFromKeyToDiscreteKey.end()) { + discreteFrontals.insert(mapFromKeyToDiscreteKey.at(k)); + } else { + continuousFrontals.insert(k); + allContinuousKeys.insert(k); + } + } + + // Fill in discrete frontals and continuous frontals for the end result + for (auto &k : separatorKeys) { + if (mapFromKeyToDiscreteKey.find(k) != mapFromKeyToDiscreteKey.end()) { + discreteSeparatorSet.insert(mapFromKeyToDiscreteKey.at(k)); + } else { + continuousSeparator.insert(k); + allContinuousKeys.insert(k); + } + } + + // NOTE: We should really defer the product here because of pruning + + // Case 1: we are only dealing with continuous + if (mapFromKeyToDiscreteKey.empty() && !allContinuousKeys.empty()) { + return continuousElimination(factors, frontalKeys); + } + + // Case 2: we are only dealing with discrete + if (allContinuousKeys.empty()) { + return discreteElimination(factors, frontalKeys); + } + + // Case 3: We are now in the hybrid land! + return hybridElimination(factors, frontalKeys, continuousSeparator, + discreteSeparatorSet); +} + +/* ************************************************************************ */ +void HybridGaussianFactorGraph::add(JacobianFactor &&factor) { + FactorGraph::add(boost::make_shared(std::move(factor))); +} + +/* ************************************************************************ */ +void HybridGaussianFactorGraph::add(JacobianFactor::shared_ptr factor) { + FactorGraph::add(boost::make_shared(factor)); +} + +/* ************************************************************************ */ +void HybridGaussianFactorGraph::add(DecisionTreeFactor &&factor) { + FactorGraph::add(boost::make_shared(std::move(factor))); +} + +/* ************************************************************************ */ +void HybridGaussianFactorGraph::add(DecisionTreeFactor::shared_ptr factor) { + FactorGraph::add(boost::make_shared(factor)); +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h new file mode 100644 index 000000000..0188aa652 --- /dev/null +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -0,0 +1,118 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianFactorGraph.h + * @brief Linearized Hybrid factor graph that uses type erasure + * @author Fan Jiang + * @date Mar 11, 2022 + */ + +#pragma once + +#include +#include +#include +#include + +namespace gtsam { + +// Forward declarations +class HybridGaussianFactorGraph; +class HybridConditional; +class HybridBayesNet; +class HybridEliminationTree; +class HybridBayesTree; +class HybridJunctionTree; +class DecisionTreeFactor; + +class JacobianFactor; + +/** Main elimination function for HybridGaussianFactorGraph */ +GTSAM_EXPORT +std::pair, HybridFactor::shared_ptr> +EliminateHybrid(const HybridGaussianFactorGraph& factors, const Ordering& keys); + +/* ************************************************************************* */ +template <> +struct EliminationTraits { + typedef HybridFactor FactorType; ///< Type of factors in factor graph + typedef HybridGaussianFactorGraph + FactorGraphType; ///< Type of the factor graph (e.g. + ///< HybridGaussianFactorGraph) + typedef HybridConditional + ConditionalType; ///< Type of conditionals from elimination + typedef HybridBayesNet + BayesNetType; ///< Type of Bayes net from sequential elimination + typedef HybridEliminationTree + EliminationTreeType; ///< Type of elimination tree + typedef HybridBayesTree BayesTreeType; ///< Type of Bayes tree + typedef HybridJunctionTree + JunctionTreeType; ///< Type of Junction tree + /// The default dense elimination function + static std::pair, + boost::shared_ptr > + DefaultEliminate(const FactorGraphType& factors, const Ordering& keys) { + return EliminateHybrid(factors, keys); + } +}; + +/** + * Gaussian Hybrid Factor Graph + * ----------------------- + * This is the linearized version of a hybrid factor graph. + * Everything inside needs to be hybrid factor or hybrid conditional. + */ +class GTSAM_EXPORT HybridGaussianFactorGraph + : public FactorGraph, + public EliminateableFactorGraph { + public: + using Base = FactorGraph; + using This = HybridGaussianFactorGraph; ///< this class + using BaseEliminateable = + EliminateableFactorGraph; ///< for elimination + using shared_ptr = boost::shared_ptr; ///< shared_ptr to This + + using Values = gtsam::Values; ///< backwards compatibility + using Indices = KeyVector; ///> map from keys to values + + /// @name Constructors + /// @{ + + HybridGaussianFactorGraph() = default; + + /** + * Implicit copy/downcast constructor to override explicit template container + * constructor. In BayesTree this is used for: + * `cachedSeparatorMarginal_.reset(*separatorMarginal)` + * */ + template + HybridGaussianFactorGraph(const FactorGraph& graph) + : Base(graph) {} + + /// @} + + using FactorGraph::add; + + /// Add a Jacobian factor to the factor graph. + void add(JacobianFactor&& factor); + + /// Add a Jacobian factor as a shared ptr. + void add(boost::shared_ptr factor); + + /// Add a DecisionTreeFactor to the factor graph. + void add(DecisionTreeFactor&& factor); + + /// Add a DecisionTreeFactor as a shared ptr. + void add(boost::shared_ptr factor); +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianISAM.cpp b/gtsam/hybrid/HybridGaussianISAM.cpp new file mode 100644 index 000000000..7783a88dd --- /dev/null +++ b/gtsam/hybrid/HybridGaussianISAM.cpp @@ -0,0 +1,101 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianISAM.h + * @date March 31, 2022 + * @author Fan Jiang + * @author Frank Dellaert + * @author Richard Roberts + */ + +#include +#include +#include +#include +#include + +#include + +namespace gtsam { + +// Instantiate base class +// template class ISAM; + +/* ************************************************************************* */ +HybridGaussianISAM::HybridGaussianISAM() {} + +/* ************************************************************************* */ +HybridGaussianISAM::HybridGaussianISAM(const HybridBayesTree& bayesTree) + : Base(bayesTree) {} + +/* ************************************************************************* */ +void HybridGaussianISAM::updateInternal( + const HybridGaussianFactorGraph& newFactors, + HybridBayesTree::Cliques* orphans, + const HybridBayesTree::Eliminate& function) { + // Remove the contaminated part of the Bayes tree + BayesNetType bn; + const KeySet newFactorKeys = newFactors.keys(); + if (!this->empty()) { + KeyVector keyVector(newFactorKeys.begin(), newFactorKeys.end()); + this->removeTop(keyVector, &bn, orphans); + } + + // Add the removed top and the new factors + FactorGraphType factors; + factors += bn; + factors += newFactors; + + // Add the orphaned subtrees + for (const sharedClique& orphan : *orphans) + factors += boost::make_shared >(orphan); + + KeySet allDiscrete; + for (auto& factor : factors) { + for (auto& k : factor->discreteKeys()) { + allDiscrete.insert(k.first); + } + } + KeyVector newKeysDiscreteLast; + for (auto& k : newFactorKeys) { + if (!allDiscrete.exists(k)) { + newKeysDiscreteLast.push_back(k); + } + } + std::copy(allDiscrete.begin(), allDiscrete.end(), + std::back_inserter(newKeysDiscreteLast)); + + // KeyVector new + + // Get an ordering where the new keys are eliminated last + const VariableIndex index(factors); + const Ordering ordering = Ordering::ColamdConstrainedLast( + index, KeyVector(newKeysDiscreteLast.begin(), newKeysDiscreteLast.end()), + true); + + // eliminate all factors (top, added, orphans) into a new Bayes tree + auto bayesTree = factors.eliminateMultifrontal(ordering, function, index); + + // Re-add into Bayes tree data structures + this->roots_.insert(this->roots_.end(), bayesTree->roots().begin(), + bayesTree->roots().end()); + this->nodes_.insert(bayesTree->nodes().begin(), bayesTree->nodes().end()); +} + +/* ************************************************************************* */ +void HybridGaussianISAM::update(const HybridGaussianFactorGraph& newFactors, + const HybridBayesTree::Eliminate& function) { + Cliques orphans; + this->updateInternal(newFactors, &orphans, function); +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianISAM.h b/gtsam/hybrid/HybridGaussianISAM.h new file mode 100644 index 000000000..d5b6271da --- /dev/null +++ b/gtsam/hybrid/HybridGaussianISAM.h @@ -0,0 +1,70 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianISAM.h + * @date March 31, 2022 + * @author Fan Jiang + * @author Frank Dellaert + * @author Richard Roberts + */ + +#pragma once + +#include +#include +#include +#include + +namespace gtsam { + +class GTSAM_EXPORT HybridGaussianISAM : public ISAM { + public: + typedef ISAM Base; + typedef HybridGaussianISAM This; + typedef boost::shared_ptr shared_ptr; + + /// @name Standard Constructors + /// @{ + + /** Create an empty Bayes Tree */ + HybridGaussianISAM(); + + /** Copy constructor */ + HybridGaussianISAM(const HybridBayesTree& bayesTree); + + /// @} + + private: + /// Internal method that performs the ISAM update. + void updateInternal( + const HybridGaussianFactorGraph& newFactors, + HybridBayesTree::Cliques* orphans, + const HybridBayesTree::Eliminate& function = + HybridBayesTree::EliminationTraitsType::DefaultEliminate); + + public: + /** + * @brief Perform update step with new factors. + * + * @param newFactors Factor graph of new factors to add and eliminate. + * @param function Elimination function. + */ + void update(const HybridGaussianFactorGraph& newFactors, + const HybridBayesTree::Eliminate& function = + HybridBayesTree::EliminationTraitsType::DefaultEliminate); +}; + +/// traits +template <> +struct traits : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridJunctionTree.cpp b/gtsam/hybrid/HybridJunctionTree.cpp new file mode 100644 index 000000000..7725742cf --- /dev/null +++ b/gtsam/hybrid/HybridJunctionTree.cpp @@ -0,0 +1,173 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridJunctionTree.cpp + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#include +#include +#include +#include +#include + +#include + +namespace gtsam { + +// Instantiate base classes +template class EliminatableClusterTree; +template class JunctionTree; + +struct HybridConstructorTraversalData { + typedef + typename JunctionTree::Node + Node; + typedef + typename JunctionTree::sharedNode sharedNode; + + HybridConstructorTraversalData* const parentData; + sharedNode junctionTreeNode; + FastVector childSymbolicConditionals; + FastVector childSymbolicFactors; + KeySet discreteKeys; + + // Small inner class to store symbolic factors + class SymbolicFactors : public FactorGraph {}; + + HybridConstructorTraversalData(HybridConstructorTraversalData* _parentData) + : parentData(_parentData) {} + + // Pre-order visitor function + static HybridConstructorTraversalData ConstructorTraversalVisitorPre( + const boost::shared_ptr& node, + HybridConstructorTraversalData& parentData) { + // On the pre-order pass, before children have been visited, we just set up + // a traversal data structure with its own JT node, and create a child + // pointer in its parent. + HybridConstructorTraversalData data = + HybridConstructorTraversalData(&parentData); + data.junctionTreeNode = boost::make_shared(node->key, node->factors); + parentData.junctionTreeNode->addChild(data.junctionTreeNode); + + for (HybridFactor::shared_ptr& f : node->factors) { + for (auto& k : f->discreteKeys()) { + data.discreteKeys.insert(k.first); + } + } + + return data; + } + + // Post-order visitor function + static void ConstructorTraversalVisitorPostAlg2( + const boost::shared_ptr& ETreeNode, + const HybridConstructorTraversalData& data) { + // In this post-order visitor, we combine the symbolic elimination results + // from the elimination tree children and symbolically eliminate the current + // elimination tree node. We then check whether each of our elimination + // tree child nodes should be merged with us. The check for this is that + // our number of symbolic elimination parents is exactly 1 less than + // our child's symbolic elimination parents - this condition indicates that + // eliminating the current node did not introduce any parents beyond those + // already in the child-> + + // Do symbolic elimination for this node + SymbolicFactors symbolicFactors; + symbolicFactors.reserve(ETreeNode->factors.size() + + data.childSymbolicFactors.size()); + // Add ETree node factors + symbolicFactors += ETreeNode->factors; + // Add symbolic factors passed up from children + symbolicFactors += data.childSymbolicFactors; + + Ordering keyAsOrdering; + keyAsOrdering.push_back(ETreeNode->key); + SymbolicConditional::shared_ptr conditional; + SymbolicFactor::shared_ptr separatorFactor; + boost::tie(conditional, separatorFactor) = + internal::EliminateSymbolic(symbolicFactors, keyAsOrdering); + + // Store symbolic elimination results in the parent + data.parentData->childSymbolicConditionals.push_back(conditional); + data.parentData->childSymbolicFactors.push_back(separatorFactor); + data.parentData->discreteKeys.merge(data.discreteKeys); + + sharedNode node = data.junctionTreeNode; + const FastVector& childConditionals = + data.childSymbolicConditionals; + node->problemSize_ = (int)(conditional->size() * symbolicFactors.size()); + + // Merge our children if they are in our clique - if our conditional has + // exactly one fewer parent than our child's conditional. + const size_t nrParents = conditional->nrParents(); + const size_t nrChildren = node->nrChildren(); + assert(childConditionals.size() == nrChildren); + + // decide which children to merge, as index into children + std::vector nrChildrenFrontals = node->nrFrontalsOfChildren(); + std::vector merge(nrChildren, false); + size_t nrFrontals = 1; + for (size_t i = 0; i < nrChildren; i++) { + // Check if we should merge the i^th child + if (nrParents + nrFrontals == childConditionals[i]->nrParents()) { + const bool myType = + data.discreteKeys.exists(conditional->frontals()[0]); + const bool theirType = + data.discreteKeys.exists(childConditionals[i]->frontals()[0]); + + if (myType == theirType) { + // Increment number of frontal variables + nrFrontals += nrChildrenFrontals[i]; + merge[i] = true; + } + } + } + + // now really merge + node->mergeChildren(merge); + } +}; + +/* ************************************************************************* */ +HybridJunctionTree::HybridJunctionTree( + const HybridEliminationTree& eliminationTree) { + gttic(JunctionTree_FromEliminationTree); + // Here we rely on the BayesNet having been produced by this elimination tree, + // such that the conditionals are arranged in DFS post-order. We traverse the + // elimination tree, and inspect the symbolic conditional corresponding to + // each node. The elimination tree node is added to the same clique with its + // parent if it has exactly one more Bayes net conditional parent than + // does its elimination tree parent. + + // Traverse the elimination tree, doing symbolic elimination and merging nodes + // as we go. Gather the created junction tree roots in a dummy Node. + typedef HybridConstructorTraversalData Data; + Data rootData(0); + rootData.junctionTreeNode = + boost::make_shared(); // Make a dummy node to gather + // the junction tree roots + treeTraversal::DepthFirstForest(eliminationTree, rootData, + Data::ConstructorTraversalVisitorPre, + Data::ConstructorTraversalVisitorPostAlg2); + + // Assign roots from the dummy node + this->addChildrenAsRoots(rootData.junctionTreeNode); + + // Transfer remaining factors from elimination tree + Base::remainingFactors_ = eliminationTree.remainingFactors(); +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridJunctionTree.h b/gtsam/hybrid/HybridJunctionTree.h new file mode 100644 index 000000000..cad1e15a1 --- /dev/null +++ b/gtsam/hybrid/HybridJunctionTree.h @@ -0,0 +1,71 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridJunctionTree.h + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +// Forward declarations +class HybridEliminationTree; + +/** + * An EliminatableClusterTree, i.e., a set of variable clusters with factors, + * arranged in a tree, with the additional property that it represents the + * clique tree associated with a Bayes net. + * + * In GTSAM a junction tree is an intermediate data structure in multifrontal + * variable elimination. Each node is a cluster of factors, along with a + * clique of variables that are eliminated all at once. In detail, every node k + * represents a clique (maximal fully connected subset) of an associated chordal + * graph, such as a chordal Bayes net resulting from elimination. + * + * The difference with the BayesTree is that a JunctionTree stores factors, + * whereas a BayesTree stores conditionals, that are the product of eliminating + * the factors in the corresponding JunctionTree cliques. + * + * The tree structure and elimination method are exactly analogous to the + * EliminationTree, except that in the JunctionTree, at each node multiple + * variables are eliminated at a time. + * + * \addtogroup Multifrontal + * \nosubgrouping + */ +class GTSAM_EXPORT HybridJunctionTree + : public JunctionTree { + public: + typedef JunctionTree + Base; ///< Base class + typedef HybridJunctionTree This; ///< This class + typedef boost::shared_ptr shared_ptr; ///< Shared pointer to this class + + /** + * Build the elimination tree of a factor graph using precomputed column + * structure. + * @param factorGraph The factor graph for which to build the elimination tree + * @param structure The set of factors involving each variable. If this is + * not precomputed, you can call the Create(const FactorGraph&) + * named constructor instead. + * @return The elimination tree + */ + HybridJunctionTree(const HybridEliminationTree& eliminationTree); +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/hybrid.i b/gtsam/hybrid/hybrid.i new file mode 100644 index 000000000..bbe1e2400 --- /dev/null +++ b/gtsam/hybrid/hybrid.i @@ -0,0 +1,145 @@ +//************************************************************************* +// hybrid +//************************************************************************* + +namespace gtsam { + +#include +virtual class HybridFactor { + void print(string s = "HybridFactor\n", + const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; + bool equals(const gtsam::HybridFactor& other, double tol = 1e-9) const; + bool empty() const; + size_t size() const; + gtsam::KeyVector keys() const; +}; + +#include +virtual class HybridConditional { + void print(string s = "Hybrid Conditional\n", + const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; + bool equals(const gtsam::HybridConditional& other, double tol = 1e-9) const; + size_t nrFrontals() const; + size_t nrParents() const; + Factor* inner(); +}; + +#include +class GaussianMixtureFactor : gtsam::HybridFactor { + static GaussianMixtureFactor FromFactors( + const gtsam::KeyVector& continuousKeys, + const gtsam::DiscreteKeys& discreteKeys, + const std::vector& factorsList); + + void print(string s = "GaussianMixtureFactor\n", + const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; +}; + +#include +class GaussianMixture : gtsam::HybridFactor { + static GaussianMixture FromConditionals( + const gtsam::KeyVector& continuousFrontals, + const gtsam::KeyVector& continuousParents, + const gtsam::DiscreteKeys& discreteParents, + const std::vector& + conditionalsList); + + void print(string s = "GaussianMixture\n", + const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; +}; + +#include +class HybridBayesTreeClique { + HybridBayesTreeClique(); + HybridBayesTreeClique(const gtsam::HybridConditional* conditional); + const gtsam::HybridConditional* conditional() const; + bool isRoot() const; + // double evaluate(const gtsam::HybridValues& values) const; +}; + +#include +class HybridBayesTree { + HybridBayesTree(); + void print(string s = "HybridBayesTree\n", + const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; + bool equals(const gtsam::HybridBayesTree& other, double tol = 1e-9) const; + + size_t size() const; + bool empty() const; + const HybridBayesTreeClique* operator[](size_t j) const; + + string dot(const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; +}; + +class HybridBayesNet { + HybridBayesNet(); + void add(const gtsam::HybridConditional& s); + bool empty() const; + size_t size() const; + gtsam::KeySet keys() const; + const gtsam::HybridConditional* at(size_t i) const; + void print(string s = "HybridBayesNet\n", + const gtsam::KeyFormatter& keyFormatter = + gtsam::DefaultKeyFormatter) const; + bool equals(const gtsam::HybridBayesNet& other, double tol = 1e-9) const; + + string dot( + const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter, + const gtsam::DotWriter& writer = gtsam::DotWriter()) const; + void saveGraph( + string s, + const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter, + const gtsam::DotWriter& writer = gtsam::DotWriter()) const; +}; + +#include +class HybridGaussianFactorGraph { + HybridGaussianFactorGraph(); + HybridGaussianFactorGraph(const gtsam::HybridBayesNet& bayesNet); + + // Building the graph + void push_back(const gtsam::HybridFactor* factor); + void push_back(const gtsam::HybridConditional* conditional); + void push_back(const gtsam::HybridGaussianFactorGraph& graph); + void push_back(const gtsam::HybridBayesNet& bayesNet); + void push_back(const gtsam::HybridBayesTree& bayesTree); + void push_back(const gtsam::GaussianMixtureFactor* gmm); + + void add(gtsam::DecisionTreeFactor* factor); + void add(gtsam::JacobianFactor* factor); + + bool empty() const; + size_t size() const; + gtsam::KeySet keys() const; + const gtsam::HybridFactor* at(size_t i) const; + + void print(string s = "") const; + bool equals(const gtsam::HybridGaussianFactorGraph& fg, double tol = 1e-9) const; + + gtsam::HybridBayesNet* eliminateSequential(); + gtsam::HybridBayesNet* eliminateSequential( + gtsam::Ordering::OrderingType type); + gtsam::HybridBayesNet* eliminateSequential(const gtsam::Ordering& ordering); + pair + eliminatePartialSequential(const gtsam::Ordering& ordering); + + gtsam::HybridBayesTree* eliminateMultifrontal(); + gtsam::HybridBayesTree* eliminateMultifrontal( + gtsam::Ordering::OrderingType type); + gtsam::HybridBayesTree* eliminateMultifrontal( + const gtsam::Ordering& ordering); + pair + eliminatePartialMultifrontal(const gtsam::Ordering& ordering); + + string dot( + const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter, + const gtsam::DotWriter& writer = gtsam::DotWriter()) const; +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/tests/CMakeLists.txt b/gtsam/hybrid/tests/CMakeLists.txt new file mode 100644 index 000000000..06ad2c505 --- /dev/null +++ b/gtsam/hybrid/tests/CMakeLists.txt @@ -0,0 +1 @@ +gtsamAddTestsGlob(hybrid "test*.cpp" "" "gtsam") diff --git a/gtsam/hybrid/tests/Switching.h b/gtsam/hybrid/tests/Switching.h new file mode 100644 index 000000000..c081b8e87 --- /dev/null +++ b/gtsam/hybrid/tests/Switching.h @@ -0,0 +1,86 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * @file Switching.h + * @date Mar 11, 2022 + * @author Varun Agrawal + * @author Fan Jiang + */ + +#include +#include +#include +#include +#include +#include + +#pragma once + +using gtsam::symbol_shorthand::C; +using gtsam::symbol_shorthand::X; + +namespace gtsam { +inline HybridGaussianFactorGraph::shared_ptr makeSwitchingChain( + size_t n, std::function keyFunc = X, + std::function dKeyFunc = C) { + HybridGaussianFactorGraph hfg; + + hfg.add(JacobianFactor(keyFunc(1), I_3x3, Z_3x1)); + + // keyFunc(1) to keyFunc(n+1) + for (size_t t = 1; t < n; t++) { + hfg.add(GaussianMixtureFactor::FromFactors( + {keyFunc(t), keyFunc(t + 1)}, {{dKeyFunc(t), 2}}, + {boost::make_shared(keyFunc(t), I_3x3, keyFunc(t + 1), + I_3x3, Z_3x1), + boost::make_shared(keyFunc(t), I_3x3, keyFunc(t + 1), + I_3x3, Vector3::Ones())})); + + if (t > 1) { + hfg.add(DecisionTreeFactor({{dKeyFunc(t - 1), 2}, {dKeyFunc(t), 2}}, + "0 1 1 3")); + } + } + + return boost::make_shared(std::move(hfg)); +} + +inline std::pair> makeBinaryOrdering( + std::vector &input) { + KeyVector new_order; + std::vector levels(input.size()); + std::function::iterator, std::vector::iterator, + int)> + bsg = [&bsg, &new_order, &levels, &input]( + std::vector::iterator begin, + std::vector::iterator end, int lvl) { + if (std::distance(begin, end) > 1) { + std::vector::iterator pivot = + begin + std::distance(begin, end) / 2; + + new_order.push_back(*pivot); + levels[std::distance(input.begin(), pivot)] = lvl; + bsg(begin, pivot, lvl + 1); + bsg(pivot + 1, end, lvl + 1); + } else if (std::distance(begin, end) == 1) { + new_order.push_back(*begin); + levels[std::distance(input.begin(), begin)] = lvl; + } + }; + + bsg(input.begin(), input.end(), 0); + std::reverse(new_order.begin(), new_order.end()); + // std::reverse(levels.begin(), levels.end()); + return {new_order, levels}; +} + +} // namespace gtsam diff --git a/gtsam/hybrid/tests/testGaussianHybridFactorGraph.cpp b/gtsam/hybrid/tests/testGaussianHybridFactorGraph.cpp new file mode 100644 index 000000000..552bb18f5 --- /dev/null +++ b/gtsam/hybrid/tests/testGaussianHybridFactorGraph.cpp @@ -0,0 +1,593 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * @file testHybridGaussianFactorGraph.cpp + * @date Mar 11, 2022 + * @author Fan Jiang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "Switching.h" + +using namespace boost::assign; + +using namespace std; +using namespace gtsam; + +using gtsam::symbol_shorthand::C; +using gtsam::symbol_shorthand::D; +using gtsam::symbol_shorthand::X; +using gtsam::symbol_shorthand::Y; + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, creation) { + HybridConditional test; + + HybridGaussianFactorGraph hfg; + + hfg.add(HybridGaussianFactor(JacobianFactor(0, I_3x3, Z_3x1))); + + GaussianMixture clgc( + {X(0)}, {X(1)}, DiscreteKeys(DiscreteKey{C(0), 2}), + GaussianMixture::Conditionals( + C(0), + boost::make_shared(X(0), Z_3x1, I_3x3, X(1), + I_3x3), + boost::make_shared(X(0), Vector3::Ones(), I_3x3, + X(1), I_3x3))); + GTSAM_PRINT(clgc); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, eliminate) { + HybridGaussianFactorGraph hfg; + + hfg.add(HybridGaussianFactor(JacobianFactor(0, I_3x3, Z_3x1))); + + auto result = hfg.eliminatePartialSequential(KeyVector{0}); + + EXPECT_LONGS_EQUAL(result.first->size(), 1); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, eliminateMultifrontal) { + HybridGaussianFactorGraph hfg; + + DiscreteKey c(C(1), 2); + + hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); + hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c, {2, 8}))); + + Ordering ordering; + ordering.push_back(X(0)); + auto result = hfg.eliminatePartialMultifrontal(ordering); + + EXPECT_LONGS_EQUAL(result.first->size(), 1); + EXPECT_LONGS_EQUAL(result.second->size(), 1); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, eliminateFullSequentialEqualChance) { + HybridGaussianFactorGraph hfg; + + DiscreteKey c1(C(1), 2); + + hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); + hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); + + DecisionTree dt( + C(1), boost::make_shared(X(1), I_3x3, Z_3x1), + boost::make_shared(X(1), I_3x3, Vector3::Ones())); + + hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt)); + + auto result = + hfg.eliminateSequential(Ordering::ColamdConstrainedLast(hfg, {C(1)})); + + auto dc = result->at(2)->asDiscreteConditional(); + DiscreteValues dv; + dv[C(1)] = 0; + EXPECT_DOUBLES_EQUAL(0.6225, dc->operator()(dv), 1e-3); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { + HybridGaussianFactorGraph hfg; + + DiscreteKey c1(C(1), 2); + + hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); + hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); + + DecisionTree dt( + C(1), boost::make_shared(X(1), I_3x3, Z_3x1), + boost::make_shared(X(1), I_3x3, Vector3::Ones())); + + hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt)); + // hfg.add(GaussianMixtureFactor({X(0)}, {c1}, dt)); + hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c1, {2, 8}))); + hfg.add(HybridDiscreteFactor( + DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 2 3 4"))); + // hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(2), 2}, {C(3), 2}}, "1 + // 2 3 4"))); hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(3), 2}, + // {C(1), 2}}, "1 2 2 1"))); + + auto result = hfg.eliminateSequential( + Ordering::ColamdConstrainedLast(hfg, {C(1), C(2)})); + + GTSAM_PRINT(*result); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) { + HybridGaussianFactorGraph hfg; + + DiscreteKey c1(C(1), 2); + + hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); + hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); + + // DecisionTree dt( + // C(1), boost::make_shared(X(1), I_3x3, Z_3x1), + // boost::make_shared(X(1), I_3x3, Vector3::Ones())); + + // hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt)); + hfg.add(GaussianMixtureFactor::FromFactors( + {X(1)}, {{C(1), 2}}, + {boost::make_shared(X(1), I_3x3, Z_3x1), + boost::make_shared(X(1), I_3x3, Vector3::Ones())})); + + // hfg.add(GaussianMixtureFactor({X(0)}, {c1}, dt)); + hfg.add(DecisionTreeFactor(c1, {2, 8})); + hfg.add(DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 2 3 4")); + // hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(2), 2}, {C(3), 2}}, "1 + // 2 3 4"))); hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(3), 2}, + // {C(1), 2}}, "1 2 2 1"))); + + auto result = hfg.eliminateMultifrontal( + Ordering::ColamdConstrainedLast(hfg, {C(1), C(2)})); + + GTSAM_PRINT(*result); + GTSAM_PRINT(*result->marginalFactor(C(2))); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalCLG) { + HybridGaussianFactorGraph hfg; + + DiscreteKey c(C(1), 2); + + hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); + hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); + + DecisionTree dt( + C(1), boost::make_shared(X(1), I_3x3, Z_3x1), + boost::make_shared(X(1), I_3x3, Vector3::Ones())); + + hfg.add(GaussianMixtureFactor({X(1)}, {c}, dt)); + hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c, {2, 8}))); + // hfg.add(HybridDiscreteFactor(DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 + // 2 3 4"))); + + auto ordering_full = Ordering::ColamdConstrainedLast(hfg, {C(1)}); + + HybridBayesTree::shared_ptr hbt = hfg.eliminateMultifrontal(ordering_full); + + GTSAM_PRINT(*hbt); + /* + Explanation: the Junction tree will need to reeliminate to get to the marginal + on X(1), which is not possible because it involves eliminating discrete before + continuous. The solution to this, however, is in Murphy02. TLDR is that this + is 1. expensive and 2. inexact. neverless it is doable. And I believe that we + should do this. + */ +} + +/* ************************************************************************* */ +/* + * This test is about how to assemble the Bayes Tree roots after we do partial + * elimination + */ +TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { + HybridGaussianFactorGraph hfg; + + hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); + hfg.add(JacobianFactor(X(1), I_3x3, X(2), -I_3x3, Z_3x1)); + + { + // DecisionTree dt( + // C(0), boost::make_shared(X(0), I_3x3, Z_3x1), + // boost::make_shared(X(0), I_3x3, Vector3::Ones())); + + hfg.add(GaussianMixtureFactor::FromFactors( + {X(0)}, {{C(0), 2}}, + {boost::make_shared(X(0), I_3x3, Z_3x1), + boost::make_shared(X(0), I_3x3, Vector3::Ones())})); + + DecisionTree dt1( + C(1), boost::make_shared(X(2), I_3x3, Z_3x1), + boost::make_shared(X(2), I_3x3, Vector3::Ones())); + + hfg.add(GaussianMixtureFactor({X(2)}, {{C(1), 2}}, dt1)); + } + + // hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c, {2, 8}))); + hfg.add(HybridDiscreteFactor( + DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 2 3 4"))); + + hfg.add(JacobianFactor(X(3), I_3x3, X(4), -I_3x3, Z_3x1)); + hfg.add(JacobianFactor(X(4), I_3x3, X(5), -I_3x3, Z_3x1)); + + { + DecisionTree dt( + C(3), boost::make_shared(X(3), I_3x3, Z_3x1), + boost::make_shared(X(3), I_3x3, Vector3::Ones())); + + hfg.add(GaussianMixtureFactor({X(3)}, {{C(3), 2}}, dt)); + + DecisionTree dt1( + C(2), boost::make_shared(X(5), I_3x3, Z_3x1), + boost::make_shared(X(5), I_3x3, Vector3::Ones())); + + hfg.add(GaussianMixtureFactor({X(5)}, {{C(2), 2}}, dt1)); + } + + auto ordering_full = + Ordering::ColamdConstrainedLast(hfg, {C(0), C(1), C(2), C(3)}); + + GTSAM_PRINT(ordering_full); + + HybridBayesTree::shared_ptr hbt; + HybridGaussianFactorGraph::shared_ptr remaining; + std::tie(hbt, remaining) = hfg.eliminatePartialMultifrontal(ordering_full); + + GTSAM_PRINT(*hbt); + + GTSAM_PRINT(*remaining); + + hbt->dot(std::cout); + /* + Explanation: the Junction tree will need to reeliminate to get to the marginal + on X(1), which is not possible because it involves eliminating discrete before + continuous. The solution to this, however, is in Murphy02. TLDR is that this + is 1. expensive and 2. inexact. neverless it is doable. And I believe that we + should do this. + */ +} + +/* ************************************************************************* */ +// TODO(fan): make a graph like Varun's paper one +TEST(HybridGaussianFactorGraph, Switching) { + auto N = 12; + auto hfg = makeSwitchingChain(N); + + // X(5) will be the center, X(1-4), X(6-9) + // X(3), X(7) + // X(2), X(8) + // X(1), X(4), X(6), X(9) + // C(5) will be the center, C(1-4), C(6-8) + // C(3), C(7) + // C(1), C(4), C(2), C(6), C(8) + // auto ordering_full = + // Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7), + // X(5), + // C(1), C(4), C(2), C(6), C(8), C(3), C(7), C(5)}); + KeyVector ordering; + + { + std::vector naturalX(N); + std::iota(naturalX.begin(), naturalX.end(), 1); + std::vector ordX; + std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), + [](int x) { return X(x); }); + + KeyVector ndX; + std::vector lvls; + std::tie(ndX, lvls) = makeBinaryOrdering(ordX); + std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); + for (auto &l : lvls) { + l = -l; + } + std::copy(lvls.begin(), lvls.end(), + std::ostream_iterator(std::cout, ",")); + std::cout << "\n"; + } + { + std::vector naturalC(N - 1); + std::iota(naturalC.begin(), naturalC.end(), 1); + std::vector ordC; + std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), + [](int x) { return C(x); }); + KeyVector ndC; + std::vector lvls; + + // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); + std::tie(ndC, lvls) = makeBinaryOrdering(ordC); + std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering)); + std::copy(lvls.begin(), lvls.end(), + std::ostream_iterator(std::cout, ",")); + } + auto ordering_full = Ordering(ordering); + + // auto ordering_full = + // Ordering(); + + // for (int i = 1; i <= 9; i++) { + // ordering_full.push_back(X(i)); + // } + + // for (int i = 1; i < 9; i++) { + // ordering_full.push_back(C(i)); + // } + + // auto ordering_full = + // Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7), + // X(5), + // C(1), C(2), C(3), C(4), C(5), C(6), C(7), C(8)}); + + // GTSAM_PRINT(*hfg); + GTSAM_PRINT(ordering_full); + + HybridBayesTree::shared_ptr hbt; + HybridGaussianFactorGraph::shared_ptr remaining; + std::tie(hbt, remaining) = hfg->eliminatePartialMultifrontal(ordering_full); + + // GTSAM_PRINT(*hbt); + + // GTSAM_PRINT(*remaining); + + { + DotWriter dw; + dw.positionHints['c'] = 2; + dw.positionHints['x'] = 1; + std::cout << hfg->dot(DefaultKeyFormatter, dw); + std::cout << "\n"; + hbt->dot(std::cout); + } + + { + DotWriter dw; + // dw.positionHints['c'] = 2; + // dw.positionHints['x'] = 1; + std::cout << "\n"; + std::cout << hfg->eliminateSequential(ordering_full) + ->dot(DefaultKeyFormatter, dw); + } + /* + Explanation: the Junction tree will need to reeliminate to get to the marginal + on X(1), which is not possible because it involves eliminating discrete before + continuous. The solution to this, however, is in Murphy02. TLDR is that this + is 1. expensive and 2. inexact. neverless it is doable. And I believe that we + should do this. + */ + hbt->marginalFactor(C(11))->print("HBT: "); +} + +/* ************************************************************************* */ +// TODO(fan): make a graph like Varun's paper one +TEST(HybridGaussianFactorGraph, SwitchingISAM) { + auto N = 11; + auto hfg = makeSwitchingChain(N); + + // X(5) will be the center, X(1-4), X(6-9) + // X(3), X(7) + // X(2), X(8) + // X(1), X(4), X(6), X(9) + // C(5) will be the center, C(1-4), C(6-8) + // C(3), C(7) + // C(1), C(4), C(2), C(6), C(8) + // auto ordering_full = + // Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7), + // X(5), + // C(1), C(4), C(2), C(6), C(8), C(3), C(7), C(5)}); + KeyVector ordering; + + { + std::vector naturalX(N); + std::iota(naturalX.begin(), naturalX.end(), 1); + std::vector ordX; + std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), + [](int x) { return X(x); }); + + KeyVector ndX; + std::vector lvls; + std::tie(ndX, lvls) = makeBinaryOrdering(ordX); + std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); + for (auto &l : lvls) { + l = -l; + } + std::copy(lvls.begin(), lvls.end(), + std::ostream_iterator(std::cout, ",")); + std::cout << "\n"; + } + { + std::vector naturalC(N - 1); + std::iota(naturalC.begin(), naturalC.end(), 1); + std::vector ordC; + std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), + [](int x) { return C(x); }); + KeyVector ndC; + std::vector lvls; + + // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); + std::tie(ndC, lvls) = makeBinaryOrdering(ordC); + std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering)); + std::copy(lvls.begin(), lvls.end(), + std::ostream_iterator(std::cout, ",")); + } + auto ordering_full = Ordering(ordering); + + // GTSAM_PRINT(*hfg); + GTSAM_PRINT(ordering_full); + + HybridBayesTree::shared_ptr hbt; + HybridGaussianFactorGraph::shared_ptr remaining; + std::tie(hbt, remaining) = hfg->eliminatePartialMultifrontal(ordering_full); + + // GTSAM_PRINT(*hbt); + + // GTSAM_PRINT(*remaining); + + { + DotWriter dw; + dw.positionHints['c'] = 2; + dw.positionHints['x'] = 1; + std::cout << hfg->dot(DefaultKeyFormatter, dw); + std::cout << "\n"; + hbt->dot(std::cout); + } + + { + DotWriter dw; + // dw.positionHints['c'] = 2; + // dw.positionHints['x'] = 1; + std::cout << "\n"; + std::cout << hfg->eliminateSequential(ordering_full) + ->dot(DefaultKeyFormatter, dw); + } + + auto new_fg = makeSwitchingChain(12); + auto isam = HybridGaussianISAM(*hbt); + + { + HybridGaussianFactorGraph factorGraph; + factorGraph.push_back(new_fg->at(new_fg->size() - 2)); + factorGraph.push_back(new_fg->at(new_fg->size() - 1)); + isam.update(factorGraph); + std::cout << isam.dot(); + isam.marginalFactor(C(11))->print(); + } +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, SwitchingTwoVar) { + const int N = 7; + auto hfg = makeSwitchingChain(N, X); + hfg->push_back(*makeSwitchingChain(N, Y, D)); + + for (int t = 1; t <= N; t++) { + hfg->add(JacobianFactor(X(t), I_3x3, Y(t), -I_3x3, Vector3(1.0, 0.0, 0.0))); + } + + KeyVector ordering; + + KeyVector naturalX(N); + std::iota(naturalX.begin(), naturalX.end(), 1); + KeyVector ordX; + for (size_t i = 1; i <= N; i++) { + ordX.emplace_back(X(i)); + ordX.emplace_back(Y(i)); + } + + // { + // KeyVector ndX; + // std::vector lvls; + // std::tie(ndX, lvls) = makeBinaryOrdering(naturalX); + // std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); + // std::copy(lvls.begin(), lvls.end(), + // std::ostream_iterator(std::cout, ",")); + // std::cout << "\n"; + + // for (size_t i = 0; i < N; i++) { + // ordX.emplace_back(X(ndX[i])); + // ordX.emplace_back(Y(ndX[i])); + // } + // } + + for (size_t i = 1; i <= N - 1; i++) { + ordX.emplace_back(C(i)); + } + for (size_t i = 1; i <= N - 1; i++) { + ordX.emplace_back(D(i)); + } + + { + DotWriter dw; + dw.positionHints['x'] = 1; + dw.positionHints['c'] = 0; + dw.positionHints['d'] = 3; + dw.positionHints['y'] = 2; + std::cout << hfg->dot(DefaultKeyFormatter, dw); + std::cout << "\n"; + } + + { + DotWriter dw; + dw.positionHints['y'] = 9; + // dw.positionHints['c'] = 0; + // dw.positionHints['d'] = 3; + dw.positionHints['x'] = 1; + std::cout << "\n"; + // std::cout << hfg->eliminateSequential(Ordering(ordX)) + // ->dot(DefaultKeyFormatter, dw); + hfg->eliminateMultifrontal(Ordering(ordX))->dot(std::cout); + } + + Ordering ordering_partial; + for (size_t i = 1; i <= N; i++) { + ordering_partial.emplace_back(X(i)); + ordering_partial.emplace_back(Y(i)); + } + { + HybridBayesNet::shared_ptr hbn; + HybridGaussianFactorGraph::shared_ptr remaining; + std::tie(hbn, remaining) = + hfg->eliminatePartialSequential(ordering_partial); + + // remaining->print(); + { + DotWriter dw; + dw.positionHints['x'] = 1; + dw.positionHints['c'] = 0; + dw.positionHints['d'] = 3; + dw.positionHints['y'] = 2; + std::cout << remaining->dot(DefaultKeyFormatter, dw); + std::cout << "\n"; + } + } +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testGaussianMixture.cpp b/gtsam/hybrid/tests/testGaussianMixture.cpp new file mode 100644 index 000000000..420e22315 --- /dev/null +++ b/gtsam/hybrid/tests/testGaussianMixture.cpp @@ -0,0 +1,95 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testGaussianMixture.cpp + * @brief Unit tests for GaussianMixture class + * @author Varun Agrawal + * @author Fan Jiang + * @author Frank Dellaert + * @date December 2021 + */ + +#include +#include +#include +#include + +#include + +// Include for test suite +#include + +using namespace std; +using namespace gtsam; +using noiseModel::Isotropic; +using symbol_shorthand::M; +using symbol_shorthand::X; + +/* ************************************************************************* */ +/* Check construction of GaussianMixture P(x1 | x2, m1) as well as accessing a + * specific mode i.e. P(x1 | x2, m1=1). + */ +TEST(GaussianMixture, Equals) { + // create a conditional gaussian node + Matrix S1(2, 2); + S1(0, 0) = 1; + S1(1, 0) = 2; + S1(0, 1) = 3; + S1(1, 1) = 4; + + Matrix S2(2, 2); + S2(0, 0) = 6; + S2(1, 0) = 0.2; + S2(0, 1) = 8; + S2(1, 1) = 0.4; + + Matrix R1(2, 2); + R1(0, 0) = 0.1; + R1(1, 0) = 0.3; + R1(0, 1) = 0.0; + R1(1, 1) = 0.34; + + Matrix R2(2, 2); + R2(0, 0) = 0.1; + R2(1, 0) = 0.3; + R2(0, 1) = 0.0; + R2(1, 1) = 0.34; + + SharedDiagonal model = noiseModel::Diagonal::Sigmas(Vector2(1.0, 0.34)); + + Vector2 d1(0.2, 0.5), d2(0.5, 0.2); + + auto conditional0 = boost::make_shared(X(1), d1, R1, + X(2), S1, model), + conditional1 = boost::make_shared(X(1), d2, R2, + X(2), S2, model); + + // Create decision tree + DiscreteKey m1(1, 2); + GaussianMixture::Conditionals conditionals( + {m1}, + vector{conditional0, conditional1}); + GaussianMixture mixtureFactor({X(1)}, {X(2)}, {m1}, conditionals); + + // Let's check that this worked: + DiscreteValues mode; + mode[m1.first] = 1; + auto actual = mixtureFactor(mode); + EXPECT(actual == conditional1); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp b/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp new file mode 100644 index 000000000..36477218b --- /dev/null +++ b/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp @@ -0,0 +1,159 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussianMixtureFactor.cpp + * @brief Unit tests for GaussianMixtureFactor + * @author Varun Agrawal + * @author Fan Jiang + * @author Frank Dellaert + * @date December 2021 + */ + +#include +#include +#include +#include +#include +#include + +// Include for test suite +#include + +using namespace std; +using namespace gtsam; +using noiseModel::Isotropic; +using symbol_shorthand::M; +using symbol_shorthand::X; + +/* ************************************************************************* */ +// Check iterators of empty mixture. +TEST(GaussianMixtureFactor, Constructor) { + GaussianMixtureFactor factor; + GaussianMixtureFactor::const_iterator const_it = factor.begin(); + CHECK(const_it == factor.end()); + GaussianMixtureFactor::iterator it = factor.begin(); + CHECK(it == factor.end()); +} + +/* ************************************************************************* */ +// "Add" two mixture factors together. +TEST(GaussianMixtureFactor, Sum) { + DiscreteKey m1(1, 2), m2(2, 3); + + auto A1 = Matrix::Zero(2, 1); + auto A2 = Matrix::Zero(2, 2); + auto A3 = Matrix::Zero(2, 3); + auto b = Matrix::Zero(2, 1); + Vector2 sigmas; + sigmas << 1, 2; + auto model = noiseModel::Diagonal::Sigmas(sigmas, true); + + auto f10 = boost::make_shared(X(1), A1, X(2), A2, b); + auto f11 = boost::make_shared(X(1), A1, X(2), A2, b); + auto f20 = boost::make_shared(X(1), A1, X(3), A3, b); + auto f21 = boost::make_shared(X(1), A1, X(3), A3, b); + auto f22 = boost::make_shared(X(1), A1, X(3), A3, b); + std::vector factorsA{f10, f11}; + std::vector factorsB{f20, f21, f22}; + + // TODO(Frank): why specify keys at all? And: keys in factor should be *all* + // keys, deviating from Kevin's scheme. Should we index DT on DiscreteKey? + // Design review! + GaussianMixtureFactor mixtureFactorA({X(1), X(2)}, {m1}, factorsA); + GaussianMixtureFactor mixtureFactorB({X(1), X(3)}, {m2}, factorsB); + + // Check that number of keys is 3 + EXPECT_LONGS_EQUAL(3, mixtureFactorA.keys().size()); + + // Check that number of discrete keys is 1 // TODO(Frank): should not exist? + EXPECT_LONGS_EQUAL(1, mixtureFactorA.discreteKeys().size()); + + // Create sum of two mixture factors: it will be a decision tree now on both + // discrete variables m1 and m2: + GaussianMixtureFactor::Sum sum; + sum += mixtureFactorA; + sum += mixtureFactorB; + + // Let's check that this worked: + Assignment mode; + mode[m1.first] = 1; + mode[m2.first] = 2; + auto actual = sum(mode); + EXPECT(actual.at(0) == f11); + EXPECT(actual.at(1) == f22); +} + +TEST(GaussianMixtureFactor, Printing) { + DiscreteKey m1(1, 2); + auto A1 = Matrix::Zero(2, 1); + auto A2 = Matrix::Zero(2, 2); + auto b = Matrix::Zero(2, 1); + auto f10 = boost::make_shared(X(1), A1, X(2), A2, b); + auto f11 = boost::make_shared(X(1), A1, X(2), A2, b); + std::vector factors{f10, f11}; + + GaussianMixtureFactor mixtureFactor({X(1), X(2)}, {m1}, factors); + + std::string expected = + R"(Hybrid x1 x2; 1 ]{ + Choice(1) + 0 Leaf : + A[x1] = [ + 0; + 0 +] + A[x2] = [ + 0, 0; + 0, 0 +] + b = [ 0 0 ] + No noise model + + 1 Leaf : + A[x1] = [ + 0; + 0 +] + A[x2] = [ + 0, 0; + 0, 0 +] + b = [ 0 0 ] + No noise model + +} +)"; + EXPECT(assert_print_equal(expected, mixtureFactor)); +} + +TEST_UNSAFE(GaussianMixtureFactor, GaussianMixture) { + KeyVector keys; + keys.push_back(X(0)); + keys.push_back(X(1)); + + DiscreteKeys dKeys; + dKeys.emplace_back(M(0), 2); + dKeys.emplace_back(M(1), 2); + + auto gaussians = boost::make_shared(); + GaussianMixture::Conditionals conditionals(gaussians); + GaussianMixture gm({}, keys, dKeys, conditionals); + + EXPECT_LONGS_EQUAL(2, gm.discreteKeys().size()); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ \ No newline at end of file diff --git a/gtsam/inference/BayesNet-inst.h b/gtsam/inference/BayesNet-inst.h index afde5498d..e792b5c03 100644 --- a/gtsam/inference/BayesNet-inst.h +++ b/gtsam/inference/BayesNet-inst.h @@ -53,8 +53,9 @@ void BayesNet::dot(std::ostream& os, auto frontals = conditional->frontals(); const Key me = frontals.front(); auto parents = conditional->parents(); - for (const Key& p : parents) - os << " var" << keyFormatter(p) << "->var" << keyFormatter(me) << "\n"; + for (const Key& p : parents) { + os << " var" << p << "->var" << me << "\n"; + } } os << "}"; diff --git a/gtsam/inference/BayesTree.h b/gtsam/inference/BayesTree.h index 5b053ebee..924a505a2 100644 --- a/gtsam/inference/BayesTree.h +++ b/gtsam/inference/BayesTree.h @@ -33,6 +33,7 @@ namespace gtsam { // Forward declarations template class FactorGraph; template class EliminatableClusterTree; + class HybridBayesTreeClique; /* ************************************************************************* */ /** clique statistics */ @@ -272,24 +273,33 @@ namespace gtsam { }; // BayesTree /* ************************************************************************* */ - template - class BayesTreeOrphanWrapper : public CLIQUE::ConditionalType - { - public: + template + class BayesTreeOrphanWrapper : public CLIQUE::ConditionalType { + public: typedef CLIQUE CliqueType; typedef typename CLIQUE::ConditionalType Base; boost::shared_ptr clique; - BayesTreeOrphanWrapper(const boost::shared_ptr& clique) : - clique(clique) - { - // Store parent keys in our base type factor so that eliminating those parent keys will pull - // this subtree into the elimination. - this->keys_.assign(clique->conditional()->beginParents(), clique->conditional()->endParents()); + /** + * @brief Construct a new Bayes Tree Orphan Wrapper object + * + * This object stores parent keys in our base type factor so that + * eliminating those parent keys will pull this subtree into the + * elimination. + * + * @param clique Orphan clique to add for further consideration in + * elimination. + */ + BayesTreeOrphanWrapper(const boost::shared_ptr& clique) + : clique(clique) { + this->keys_.assign(clique->conditional()->beginParents(), + clique->conditional()->endParents()); } - void print(const std::string& s="", const KeyFormatter& formatter = DefaultKeyFormatter) const override { + void print( + const std::string& s = "", + const KeyFormatter& formatter = DefaultKeyFormatter) const override { clique->print(s + "stored clique", formatter); } }; diff --git a/gtsam/inference/DotWriter.cpp b/gtsam/inference/DotWriter.cpp index ad5330575..eac0c90f9 100644 --- a/gtsam/inference/DotWriter.cpp +++ b/gtsam/inference/DotWriter.cpp @@ -43,7 +43,7 @@ void DotWriter::drawVariable(Key key, const KeyFormatter& keyFormatter, const boost::optional& position, ostream* os) const { // Label the node with the label from the KeyFormatter - *os << " var" << keyFormatter(key) << "[label=\"" << keyFormatter(key) + *os << " var" << key << "[label=\"" << keyFormatter(key) << "\""; if (position) { *os << ", pos=\"" << position->x() << "," << position->y() << "!\""; @@ -65,13 +65,13 @@ void DotWriter::DrawFactor(size_t i, const boost::optional& position, static void ConnectVariables(Key key1, Key key2, const KeyFormatter& keyFormatter, ostream* os) { - *os << " var" << keyFormatter(key1) << "--" - << "var" << keyFormatter(key2) << ";\n"; + *os << " var" << key1 << "--" + << "var" << key2 << ";\n"; } static void ConnectVariableFactor(Key key, const KeyFormatter& keyFormatter, size_t i, ostream* os) { - *os << " var" << keyFormatter(key) << "--" + *os << " var" << key << "--" << "factor" << i << ";\n"; } diff --git a/gtsam/inference/EliminateableFactorGraph.h b/gtsam/inference/EliminateableFactorGraph.h index c904d2f7f..900346f7f 100644 --- a/gtsam/inference/EliminateableFactorGraph.h +++ b/gtsam/inference/EliminateableFactorGraph.h @@ -204,7 +204,7 @@ namespace gtsam { OptionalVariableIndex variableIndex = boost::none) const; /** Do multifrontal elimination of the given \c variables in an ordering computed by COLAMD to - * produce a Bayes net and a remaining factor graph. This computes the factorization \f$ p(X) + * produce a Bayes tree and a remaining factor graph. This computes the factorization \f$ p(X) * = p(A|B) p(B) \f$, where \f$ A = \f$ \c variables, \f$ X \f$ is all the variables in the * factor graph, and \f$ B = X\backslash A \f$. */ std::pair, boost::shared_ptr > diff --git a/gtsam/inference/FactorGraph.h b/gtsam/inference/FactorGraph.h index afea63da8..101134c83 100644 --- a/gtsam/inference/FactorGraph.h +++ b/gtsam/inference/FactorGraph.h @@ -361,7 +361,7 @@ class FactorGraph { * less than the original, factors at the end will be removed. If the new * size is larger than the original, null factors will be appended. */ - void resize(size_t size) { factors_.resize(size); } + virtual void resize(size_t size) { factors_.resize(size); } /** delete factor without re-arranging indexes by inserting a nullptr pointer */ diff --git a/gtsam/inference/JunctionTree.h b/gtsam/inference/JunctionTree.h index e01f3721a..e914c325e 100644 --- a/gtsam/inference/JunctionTree.h +++ b/gtsam/inference/JunctionTree.h @@ -70,7 +70,7 @@ namespace gtsam { /// @} - private: + protected: // Private default constructor (used in static construction methods) JunctionTree() {} diff --git a/gtsam/inference/Ordering.cpp b/gtsam/inference/Ordering.cpp index 2ac2c0dde..7b7ff1403 100644 --- a/gtsam/inference/Ordering.cpp +++ b/gtsam/inference/Ordering.cpp @@ -283,6 +283,17 @@ void Ordering::print(const std::string& str, cout.flush(); } +/* ************************************************************************* */ +Ordering::This& Ordering::operator+=(KeyVector& keys) { + this->insert(this->end(), keys.begin(), keys.end()); + return *this; +} + +/* ************************************************************************* */ +bool Ordering::contains(const Key& key) const { + return std::find(this->begin(), this->end(), key) != this->end(); +} + /* ************************************************************************* */ bool Ordering::equals(const Ordering& other, double tol) const { return (*this) == other; diff --git a/gtsam/inference/Ordering.h b/gtsam/inference/Ordering.h index a2d165792..b366b3b3a 100644 --- a/gtsam/inference/Ordering.h +++ b/gtsam/inference/Ordering.h @@ -70,7 +70,23 @@ public: boost::assign_detail::call_push_back(*this))(key); } - /// Invert (not reverse) the ordering - returns a map from key to order position + /** + * @brief Append new keys to the ordering as `ordering += keys`. + * + * @param key + * @return The ordering variable with appended keys. + */ + This& operator+=(KeyVector& keys); + + /// Check if key exists in ordering. + bool contains(const Key& key) const; + + /** + * @brief Invert (not reverse) the ordering - returns a map from key to order + * position. + * + * @return FastMap + */ FastMap invert() const; /// @name Fill-reducing Orderings @{ diff --git a/gtsam/inference/inference.i b/gtsam/inference/inference.i index e7b074ec4..fbdd70fdf 100644 --- a/gtsam/inference/inference.i +++ b/gtsam/inference/inference.i @@ -9,6 +9,7 @@ namespace gtsam { #include #include #include +#include #include @@ -106,36 +107,36 @@ class Ordering { template < FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph, - gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph}> + gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph, gtsam::HybridGaussianFactorGraph}> static gtsam::Ordering Colamd(const FACTOR_GRAPH& graph); template < FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph, - gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph}> + gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph, gtsam::HybridGaussianFactorGraph}> static gtsam::Ordering ColamdConstrainedLast( const FACTOR_GRAPH& graph, const gtsam::KeyVector& constrainLast, bool forceOrder = false); template < FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph, - gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph}> + gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph, gtsam::HybridGaussianFactorGraph}> static gtsam::Ordering ColamdConstrainedFirst( const FACTOR_GRAPH& graph, const gtsam::KeyVector& constrainFirst, bool forceOrder = false); template < FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph, - gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph}> + gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph, gtsam::HybridGaussianFactorGraph}> static gtsam::Ordering Natural(const FACTOR_GRAPH& graph); template < FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph, - gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph}> + gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph, gtsam::HybridGaussianFactorGraph}> static gtsam::Ordering Metis(const FACTOR_GRAPH& graph); template < FACTOR_GRAPH = {gtsam::NonlinearFactorGraph, gtsam::DiscreteFactorGraph, - gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph}> + gtsam::SymbolicFactorGraph, gtsam::GaussianFactorGraph, gtsam::HybridGaussianFactorGraph}> static gtsam::Ordering Create(gtsam::Ordering::OrderingType orderingType, const FACTOR_GRAPH& graph); diff --git a/gtsam/inference/tests/testOrdering.cpp b/gtsam/inference/tests/testOrdering.cpp index 6fdca0d89..1afa1cfde 100644 --- a/gtsam/inference/tests/testOrdering.cpp +++ b/gtsam/inference/tests/testOrdering.cpp @@ -199,6 +199,32 @@ TEST(Ordering, csr_format_3) { EXPECT(adjExpected == adjAcutal); } +/* ************************************************************************* */ +TEST(Ordering, AppendVector) { + using symbol_shorthand::X; + Ordering actual; + KeyVector keys = {X(0), X(1), X(2)}; + actual += keys; + + Ordering expected; + expected += X(0); + expected += X(1); + expected += X(2); + EXPECT(assert_equal(expected, actual)); +} + +/* ************************************************************************* */ +TEST(Ordering, Contains) { + using symbol_shorthand::X; + Ordering ordering; + ordering += X(0); + ordering += X(1); + ordering += X(2); + + EXPECT(ordering.contains(X(1))); + EXPECT(!ordering.contains(X(4))); +} + /* ************************************************************************* */ #ifdef GTSAM_SUPPORT_NESTED_DISSECTION TEST(Ordering, csr_format_4) { diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h index 940ffd882..e4f2950ed 100644 --- a/gtsam/linear/GaussianBayesNet.h +++ b/gtsam/linear/GaussianBayesNet.h @@ -45,7 +45,7 @@ namespace gtsam { /// @name Standard Constructors /// @{ - /** Construct empty factor graph */ + /** Construct empty bayes net */ GaussianBayesNet() {} /** Construct from iterator over conditionals */ diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index 6199f91a7..951577641 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -15,10 +15,10 @@ * @author Christian Potthast, Frank Dellaert */ -#include #include -#include #include +#include +#include #include #ifdef __GNUC__ diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index b2b616dab..11fe1f318 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include // for std::mt19937_64 diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 5c379beb8..5a29e5d7d 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -697,6 +697,12 @@ namespace gtsam { return robust_->loss(std::sqrt(squared_distance)); } + // NOTE: This is special because in whiten the base version will do the reweighting + // which is incorrect! + double squaredMahalanobisDistance(const Vector& v) const override { + return noise_->squaredMahalanobisDistance(v); + } + // These are really robust iterated re-weighting support functions virtual void WhitenSystem(Vector& b) const; void WhitenSystem(std::vector& A, Vector& b) const override; diff --git a/gtsam/linear/SubgraphBuilder.cpp b/gtsam/linear/SubgraphBuilder.cpp index 18e19cd20..de7ae7060 100644 --- a/gtsam/linear/SubgraphBuilder.cpp +++ b/gtsam/linear/SubgraphBuilder.cpp @@ -337,7 +337,6 @@ vector SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg, DSFVector dsf(n); size_t count = 0; - double sum = 0.0; for (const size_t index : sortedIndices) { const GaussianFactor &gf = *gfg[index]; const auto keys = gf.keys(); @@ -347,7 +346,6 @@ vector SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg, if (dsf.find(u) != dsf.find(v)) { dsf.merge(u, v); treeIndices.push_back(index); - sum += weights[index]; if (++count == n - 1) break; } } diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index f1bc92f69..fdf156ff9 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -279,7 +279,6 @@ virtual class GaussianFactor { virtual class JacobianFactor : gtsam::GaussianFactor { //Constructors JacobianFactor(); - JacobianFactor(const gtsam::GaussianFactor& factor); JacobianFactor(Vector b_in); JacobianFactor(size_t i1, Matrix A1, Vector b, const gtsam::noiseModel::Diagonal* model); @@ -295,6 +294,7 @@ virtual class JacobianFactor : gtsam::GaussianFactor { JacobianFactor(const gtsam::GaussianFactorGraph& graph, const gtsam::Ordering& ordering, const gtsam::VariableSlots& p_variableSlots); + JacobianFactor(const gtsam::GaussianFactor& factor); //Testable void print(string s = "", const gtsam::KeyFormatter& keyFormatter = @@ -671,6 +671,10 @@ virtual class DummyPreconditionerParameters : gtsam::PreconditionerParameters { DummyPreconditionerParameters(); }; +virtual class BlockJacobiPreconditionerParameters : gtsam::PreconditionerParameters { + BlockJacobiPreconditionerParameters(); +}; + #include virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters { PCGSolverParameters(); diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index b974b6cd5..f83ba7831 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -661,12 +661,13 @@ TEST(NoiseModel, robustNoiseDCS) TEST(NoiseModel, robustNoiseL2WithDeadZone) { double dead_zone_size = 1.0; - SharedNoiseModel robust = noiseModel::Robust::Create( + auto robust = noiseModel::Robust::Create( noiseModel::mEstimator::L2WithDeadZone::Create(dead_zone_size), Unit::Create(3)); for (int i = 0; i < 5; i++) { - Vector3 error = Vector3(i, 0, 0); + Vector error = Vector3(i, 0, 0); + robust->WhitenSystem(error); DOUBLES_EQUAL(std::fmax(0, i - dead_zone_size) * i, robust->squaredMahalanobisDistance(error), 1e-8); } diff --git a/gtsam/nonlinear/GncOptimizer.h b/gtsam/nonlinear/GncOptimizer.h index cc3fdaf34..eb1c0233f 100644 --- a/gtsam/nonlinear/GncOptimizer.h +++ b/gtsam/nonlinear/GncOptimizer.h @@ -207,9 +207,11 @@ class GTSAM_EXPORT GncOptimizer { std::cout << "GNC Optimizer stopped because all measurements are already known to be inliers or outliers" << std::endl; } + if (params_.verbosity >= GncParameters::Verbosity::MU) { + std::cout << "mu: " << mu << std::endl; + } if (params_.verbosity >= GncParameters::Verbosity::VALUES) { result.print("result\n"); - std::cout << "mu: " << mu << std::endl; } return result; } @@ -218,12 +220,16 @@ class GTSAM_EXPORT GncOptimizer { for (iter = 0; iter < params_.maxIterations; iter++) { // display info - if (params_.verbosity >= GncParameters::Verbosity::VALUES) { + if (params_.verbosity >= GncParameters::Verbosity::MU) { std::cout << "iter: " << iter << std::endl; - result.print("result\n"); std::cout << "mu: " << mu << std::endl; + } + if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) { std::cout << "weights: " << weights_ << std::endl; } + if (params_.verbosity >= GncParameters::Verbosity::VALUES) { + result.print("result\n"); + } // weights update weights_ = calculateWeights(result, mu); @@ -255,10 +261,12 @@ class GTSAM_EXPORT GncOptimizer { if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) { std::cout << "final iterations: " << iter << std::endl; std::cout << "final mu: " << mu << std::endl; - std::cout << "final weights: " << weights_ << std::endl; std::cout << "previous cost: " << prev_cost << std::endl; std::cout << "current cost: " << cost << std::endl; } + if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) { + std::cout << "final weights: " << weights_ << std::endl; + } return result; } @@ -293,6 +301,11 @@ class GTSAM_EXPORT GncOptimizer { std::min(mu_init, barcSq_[k] / (2 * rk - barcSq_[k]) ) : mu_init; } } + if (mu_init >= 0 && mu_init < 1e-6){ + mu_init = 1e-6; // if mu ~ 0 (but positive), that means we have measurements with large errors, + // i.e., rk > barcSq_[k] and rk very large, hence we threshold to 1e-6 to avoid mu = 0 + } + return mu_init > 0 && !std::isinf(mu_init) ? mu_init : -1; // if mu <= 0 or mu = inf, return -1, // which leads to termination of the main gnc loop. In this case, all residuals are already below the threshold // and there is no need to robustify (TLS = least squares) @@ -340,8 +353,10 @@ class GTSAM_EXPORT GncOptimizer { bool checkCostConvergence(const double cost, const double prev_cost) const { bool costConverged = std::fabs(cost - prev_cost) / std::max(prev_cost, 1e-7) < params_.relativeCostTol; - if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY) - std::cout << "checkCostConvergence = true " << std::endl; + if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY){ + std::cout << "checkCostConvergence = true (prev. cost = " << prev_cost + << ", curr. cost = " << cost << ")" << std::endl; + } return costConverged; } @@ -436,18 +451,16 @@ class GTSAM_EXPORT GncOptimizer { return weights; } case GncLossType::TLS: { // use eq (14) in GNC paper - double upperbound = (mu + 1) / mu * barcSq_.maxCoeff(); - double lowerbound = mu / (mu + 1) * barcSq_.minCoeff(); for (size_t k : unknownWeights) { if (nfg_[k]) { double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual - if (u2_k >= upperbound) { + double upperbound = (mu + 1) / mu * barcSq_[k]; + double lowerbound = mu / (mu + 1) * barcSq_[k]; + weights[k] = std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k) - mu; + if (u2_k >= upperbound || weights[k] < 0) { weights[k] = 0; - } else if (u2_k <= lowerbound) { + } else if (u2_k <= lowerbound || weights[k] > 1) { weights[k] = 1; - } else { - weights[k] = std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k) - - mu; } } } diff --git a/gtsam/nonlinear/GncParams.h b/gtsam/nonlinear/GncParams.h index 086f08acc..1f324ae38 100644 --- a/gtsam/nonlinear/GncParams.h +++ b/gtsam/nonlinear/GncParams.h @@ -48,6 +48,8 @@ class GTSAM_EXPORT GncParams { enum Verbosity { SILENT = 0, SUMMARY, + MU, + WEIGHTS, VALUES }; diff --git a/gtsam/nonlinear/Marginals.h b/gtsam/nonlinear/Marginals.h index 028545d01..3c5aa9cab 100644 --- a/gtsam/nonlinear/Marginals.h +++ b/gtsam/nonlinear/Marginals.h @@ -121,7 +121,7 @@ public: /** Optimize the bayes tree */ VectorValues optimize() const; - + protected: /** Compute the Bayes Tree as a helper function to the constructor */ diff --git a/gtsam/nonlinear/PriorFactor.h b/gtsam/nonlinear/PriorFactor.h index c745f7bd9..a490162ac 100644 --- a/gtsam/nonlinear/PriorFactor.h +++ b/gtsam/nonlinear/PriorFactor.h @@ -94,7 +94,6 @@ namespace gtsam { Vector evaluateError(const T& x, boost::optional H = boost::none) const override { if (H) (*H) = Matrix::Identity(traits::GetDimension(x),traits::GetDimension(x)); // manifold equivalent of z-x -> Local(x,z) - // TODO(ASL) Add Jacobians. return -traits::Local(x, prior_); } diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index 3fff71978..033e5ced2 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -226,6 +226,10 @@ class Values { void insert(size_t j, const gtsam::PinholeCamera& camera); void insert(size_t j, const gtsam::PinholeCamera& camera); void insert(size_t j, const gtsam::PinholeCamera& camera); + void insert(size_t j, const gtsam::PinholePose& camera); + void insert(size_t j, const gtsam::PinholePose& camera); + void insert(size_t j, const gtsam::PinholePose& camera); + void insert(size_t j, const gtsam::PinholePose& camera); void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void insert(size_t j, const gtsam::NavState& nav_state); void insert(size_t j, double c); @@ -269,6 +273,10 @@ class Values { void update(size_t j, const gtsam::PinholeCamera& camera); void update(size_t j, const gtsam::PinholeCamera& camera); void update(size_t j, const gtsam::PinholeCamera& camera); + void update(size_t j, const gtsam::PinholePose& camera); + void update(size_t j, const gtsam::PinholePose& camera); + void update(size_t j, const gtsam::PinholePose& camera); + void update(size_t j, const gtsam::PinholePose& camera); void update(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void update(size_t j, const gtsam::NavState& nav_state); void update(size_t j, Vector vector); @@ -310,6 +318,10 @@ class Values { void insert_or_assign(size_t j, const gtsam::PinholeCamera& camera); void insert_or_assign(size_t j, const gtsam::PinholeCamera& camera); void insert_or_assign(size_t j, const gtsam::PinholeCamera& camera); + void insert_or_assign(size_t j, const gtsam::PinholePose& camera); + void insert_or_assign(size_t j, const gtsam::PinholePose& camera); + void insert_or_assign(size_t j, const gtsam::PinholePose& camera); + void insert_or_assign(size_t j, const gtsam::PinholePose& camera); void insert_or_assign(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void insert_or_assign(size_t j, const gtsam::NavState& nav_state); void insert_or_assign(size_t j, Vector vector); @@ -351,6 +363,10 @@ class Values { gtsam::PinholeCamera, gtsam::PinholeCamera, gtsam::PinholeCamera, + gtsam::PinholePose, + gtsam::PinholePose, + gtsam::PinholePose, + gtsam::PinholePose, gtsam::imuBias::ConstantBias, gtsam::NavState, Vector, @@ -468,6 +484,9 @@ virtual class NonlinearOptimizerParams { bool isSequential() const; bool isCholmod() const; bool isIterative() const; + + // This only applies to python since matlab does not have lambda machinery. + gtsam::NonlinearOptimizerParams::IterationHook iterationHook; }; bool checkConvergence(double relativeErrorTreshold, diff --git a/gtsam/sam/BearingRangeFactor.h b/gtsam/sam/BearingRangeFactor.h index da352f2e9..9874760c4 100644 --- a/gtsam/sam/BearingRangeFactor.h +++ b/gtsam/sam/BearingRangeFactor.h @@ -79,9 +79,8 @@ class BearingRangeFactor { std::vector Hs(2); const auto &keys = Factor::keys(); - const Vector error = unwhitenedError( - {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, - Hs); + const Vector error = this->unwhitenedError( + {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, Hs); if (H1) *H1 = Hs[0]; if (H2) *H2 = Hs[1]; return error; diff --git a/gtsam/sam/sam.i b/gtsam/sam/sam.i index 90c319ede..a46a6de9e 100644 --- a/gtsam/sam/sam.i +++ b/gtsam/sam/sam.i @@ -108,5 +108,11 @@ typedef gtsam::BearingRangeFactor BearingRangeFactorPose2; +typedef gtsam::BearingRangeFactor + BearingRangeFactor3D; +typedef gtsam::BearingRangeFactor + BearingRangeFactorPose3; } // namespace gtsam diff --git a/gtsam/sfm/SfmData.h b/gtsam/sfm/SfmData.h index afce12205..430e107ad 100644 --- a/gtsam/sfm/SfmData.h +++ b/gtsam/sfm/SfmData.h @@ -77,10 +77,14 @@ struct GTSAM_EXPORT SfmData { size_t numberCameras() const { return cameras.size(); } /// The track formed by series of landmark measurements - SfmTrack track(size_t idx) const { return tracks[idx]; } + const SfmTrack& track(size_t idx) const { return tracks[idx]; } /// The camera pose at frame index `idx` - SfmCamera camera(size_t idx) const { return cameras[idx]; } + const SfmCamera& camera(size_t idx) const { return cameras[idx]; } + + /// Getters + const std::vector& cameraList() const { return cameras; } + const std::vector& trackList() const { return tracks; } /** * @brief Create projection factors using keys i and P(j) diff --git a/gtsam/sfm/TranslationRecovery.cpp b/gtsam/sfm/TranslationRecovery.cpp index 2e81c2d56..c7ef2e526 100644 --- a/gtsam/sfm/TranslationRecovery.cpp +++ b/gtsam/sfm/TranslationRecovery.cpp @@ -21,13 +21,16 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include +#include #include #include @@ -38,16 +41,13 @@ using namespace std; // In Wrappers we have no access to this so have a default ready. static std::mt19937 kRandomNumberGenerator(42); -TranslationRecovery::TranslationRecovery( - const TranslationRecovery::TranslationEdges &relativeTranslations, - const LevenbergMarquardtParams &lmParams) - : params_(lmParams) { - // Some relative translations may be zero. We treat nodes that have a zero - // relativeTranslation as a single node. - - // A DSFMap is used to find sets of nodes that have a zero relative - // translation. Add the nodes in each edge to the DSFMap, and merge nodes that - // are connected by a zero relative translation. +// Some relative translations may be zero. We treat nodes that have a zero +// relativeTranslation as a single node. +// A DSFMap is used to find sets of nodes that have a zero relative +// translation. Add the nodes in each edge to the DSFMap, and merge nodes that +// are connected by a zero relative translation. +DSFMap getSameTranslationDSFMap( + const std::vector> &relativeTranslations) { DSFMap sameTranslationDSF; for (const auto &edge : relativeTranslations) { Key key1 = sameTranslationDSF.find(edge.key1()); @@ -56,94 +56,161 @@ TranslationRecovery::TranslationRecovery( sameTranslationDSF.merge(key1, key2); } } - // Use only those edges for which two keys have a distinct root in the DSFMap. - for (const auto &edge : relativeTranslations) { - Key key1 = sameTranslationDSF.find(edge.key1()); - Key key2 = sameTranslationDSF.find(edge.key2()); - if (key1 == key2) continue; - relativeTranslations_.emplace_back(key1, key2, edge.measured(), - edge.noiseModel()); - } - // Store the DSF map for post-processing results. - sameTranslationNodes_ = sameTranslationDSF.sets(); + return sameTranslationDSF; } -NonlinearFactorGraph TranslationRecovery::buildGraph() const { +// Removes zero-translation edges from measurements, and combines the nodes in +// these edges into a single node. +template +std::vector> removeSameTranslationNodes( + const std::vector> &edges, + const DSFMap &sameTranslationDSFMap) { + std::vector> newEdges; + for (const auto &edge : edges) { + Key key1 = sameTranslationDSFMap.find(edge.key1()); + Key key2 = sameTranslationDSFMap.find(edge.key2()); + if (key1 == key2) continue; + newEdges.emplace_back(key1, key2, edge.measured(), edge.noiseModel()); + } + return newEdges; +} + +// Adds nodes that were not optimized for because they were connected +// to another node with a zero-translation edge in the input. +Values addSameTranslationNodes(const Values &result, + const DSFMap &sameTranslationDSFMap) { + Values final_result = result; + // Nodes that were not optimized are stored in sameTranslationNodes_ as a map + // from a key that was optimized to keys that were not optimized. Iterate over + // map and add results for keys not optimized. + for (const auto &optimizedAndDuplicateKeys : sameTranslationDSFMap.sets()) { + Key optimizedKey = optimizedAndDuplicateKeys.first; + std::set duplicateKeys = optimizedAndDuplicateKeys.second; + // Add the result for the duplicate key if it does not already exist. + for (const Key duplicateKey : duplicateKeys) { + if (final_result.exists(duplicateKey)) continue; + final_result.insert(duplicateKey, + final_result.at(optimizedKey)); + } + } + return final_result; +} + +NonlinearFactorGraph TranslationRecovery::buildGraph( + const std::vector> &relativeTranslations) const { NonlinearFactorGraph graph; - // Add all relative translation edges - for (auto edge : relativeTranslations_) { + // Add translation factors for input translation directions. + for (auto edge : relativeTranslations) { graph.emplace_shared(edge.key1(), edge.key2(), edge.measured(), edge.noiseModel()); } - return graph; } void TranslationRecovery::addPrior( - const double scale, NonlinearFactorGraph *graph, + const std::vector> &relativeTranslations, + const double scale, + const std::vector> &betweenTranslations, + NonlinearFactorGraph *graph, const SharedNoiseModel &priorNoiseModel) const { - auto edge = relativeTranslations_.begin(); - if (edge == relativeTranslations_.end()) return; - graph->emplace_shared >(edge->key1(), Point3(0, 0, 0), - priorNoiseModel); - graph->emplace_shared >( - edge->key2(), scale * edge->measured().point3(), edge->noiseModel()); + auto edge = relativeTranslations.begin(); + if (edge == relativeTranslations.end()) return; + graph->emplace_shared>(edge->key1(), Point3(0, 0, 0), + priorNoiseModel); + + // Add a scale prior only if no other between factors were added. + if (betweenTranslations.empty()) { + graph->emplace_shared>( + edge->key2(), scale * edge->measured().point3(), edge->noiseModel()); + return; + } + + // Add between factors for optional relative translations. + for (auto prior_edge : betweenTranslations) { + graph->emplace_shared>( + prior_edge.key1(), prior_edge.key2(), prior_edge.measured(), + prior_edge.noiseModel()); + } } -Values TranslationRecovery::initializeRandomly(std::mt19937 *rng) const { +Values TranslationRecovery::initializeRandomly( + const std::vector> &relativeTranslations, + const std::vector> &betweenTranslations, + std::mt19937 *rng, const Values &initialValues) const { uniform_real_distribution randomVal(-1, 1); // Create a lambda expression that checks whether value exists and randomly // initializes if not. Values initial; auto insert = [&](Key j) { - if (!initial.exists(j)) { + if (initial.exists(j)) return; + if (initialValues.exists(j)) { + initial.insert(j, initialValues.at(j)); + } else { initial.insert( j, Point3(randomVal(*rng), randomVal(*rng), randomVal(*rng))); } + // Assumes all nodes connected by zero-edges have the same initialization. }; // Loop over measurements and add a random translation - for (auto edge : relativeTranslations_) { + for (auto edge : relativeTranslations) { insert(edge.key1()); insert(edge.key2()); } - - // If there are no valid edges, but zero-distance edges exist, initialize one - // of the nodes in a connected component of zero-distance edges. - if (initial.empty() && !sameTranslationNodes_.empty()) { - for (const auto &optimizedAndDuplicateKeys : sameTranslationNodes_) { - Key optimizedKey = optimizedAndDuplicateKeys.first; - initial.insert(optimizedKey, Point3(0, 0, 0)); - } + // There may be nodes in betweenTranslations that do not have a measurement. + for (auto edge : betweenTranslations) { + insert(edge.key1()); + insert(edge.key2()); } return initial; } -Values TranslationRecovery::initializeRandomly() const { - return initializeRandomly(&kRandomNumberGenerator); +Values TranslationRecovery::initializeRandomly( + const std::vector> &relativeTranslations, + const std::vector> &betweenTranslations, + const Values &initialValues) const { + return initializeRandomly(relativeTranslations, betweenTranslations, + &kRandomNumberGenerator, initialValues); } -Values TranslationRecovery::run(const double scale) const { - auto graph = buildGraph(); - addPrior(scale, &graph); - const Values initial = initializeRandomly(); - LevenbergMarquardtOptimizer lm(graph, initial, params_); - Values result = lm.optimize(); +Values TranslationRecovery::run( + const TranslationEdges &relativeTranslations, const double scale, + const std::vector> &betweenTranslations, + const Values &initialValues) const { + // Find edges that have a zero-translation, and recompute relativeTranslations + // and betweenTranslations by retaining only one node for every zero-edge. + DSFMap sameTranslationDSFMap = + getSameTranslationDSFMap(relativeTranslations); + const TranslationEdges nonzeroRelativeTranslations = + removeSameTranslationNodes(relativeTranslations, sameTranslationDSFMap); + const std::vector> nonzeroBetweenTranslations = + removeSameTranslationNodes(betweenTranslations, sameTranslationDSFMap); - // Nodes that were not optimized are stored in sameTranslationNodes_ as a map - // from a key that was optimized to keys that were not optimized. Iterate over - // map and add results for keys not optimized. - for (const auto &optimizedAndDuplicateKeys : sameTranslationNodes_) { - Key optimizedKey = optimizedAndDuplicateKeys.first; - std::set duplicateKeys = optimizedAndDuplicateKeys.second; - // Add the result for the duplicate key if it does not already exist. - for (const Key duplicateKey : duplicateKeys) { - if (result.exists(duplicateKey)) continue; - result.insert(duplicateKey, result.at(optimizedKey)); + // Create graph of translation factors. + NonlinearFactorGraph graph = buildGraph(nonzeroRelativeTranslations); + + // Add global frame prior and scale (either from betweenTranslations or + // scale). + addPrior(nonzeroRelativeTranslations, scale, nonzeroBetweenTranslations, + &graph); + + // Uses initial values from params if provided. + Values initial = initializeRandomly( + nonzeroRelativeTranslations, nonzeroBetweenTranslations, initialValues); + + // If there are no valid edges, but zero-distance edges exist, initialize one + // of the nodes in a connected component of zero-distance edges. + if (initial.empty() && !sameTranslationDSFMap.sets().empty()) { + for (const auto &optimizedAndDuplicateKeys : sameTranslationDSFMap.sets()) { + Key optimizedKey = optimizedAndDuplicateKeys.first; + initial.insert(optimizedKey, Point3(0, 0, 0)); } } - return result; + + LevenbergMarquardtOptimizer lm(graph, initial, lmParams_); + Values result = lm.optimize(); + return addSameTranslationNodes(result, sameTranslationDSFMap); } TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( diff --git a/gtsam/sfm/TranslationRecovery.h b/gtsam/sfm/TranslationRecovery.h index 30c9a14e3..44a5ef43e 100644 --- a/gtsam/sfm/TranslationRecovery.h +++ b/gtsam/sfm/TranslationRecovery.h @@ -11,7 +11,7 @@ /** * @file TranslationRecovery.h - * @author Frank Dellaert + * @author Frank Dellaert, Akshay Krishnan * @date March 2020 * @brief Recovering translations in an epipolar graph when rotations are given. */ @@ -57,68 +57,109 @@ class TranslationRecovery { // Translation directions between camera pairs. TranslationEdges relativeTranslations_; - // Parameters used by the LM Optimizer. - LevenbergMarquardtParams params_; - - // Map from a key in the graph to a set of keys that share the same - // translation. - std::map> sameTranslationNodes_; + // Parameters. + LevenbergMarquardtParams lmParams_; public: /** * @brief Construct a new Translation Recovery object * - * @param relativeTranslations the relative translations, in world coordinate - * frames, vector of BinaryMeasurements of Unit3, where each key of a - * measurement is a point in 3D. - * @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be - * used to modify the parameters for the LM optimizer. By default, uses the - * default LM parameters. + * @param lmParams parameters for optimization. */ - TranslationRecovery( - const TranslationEdges &relativeTranslations, - const LevenbergMarquardtParams &lmParams = LevenbergMarquardtParams()); + TranslationRecovery(const LevenbergMarquardtParams &lmParams) + : lmParams_(lmParams) {} + + /** + * @brief Default constructor. + */ + TranslationRecovery() = default; /** * @brief Build the factor graph to do the optimization. * + * @param relativeTranslations unit translation directions between + * translations to be estimated * @return NonlinearFactorGraph */ - NonlinearFactorGraph buildGraph() const; + NonlinearFactorGraph buildGraph( + const std::vector> &relativeTranslations) const; /** - * @brief Add priors on ednpoints of first measurement edge. + * @brief Add 3 factors to the graph: + * - A prior on the first point to lie at (0, 0, 0) + * - If betweenTranslations is non-empty, between factors provided by it. + * - If betweenTranslations is empty, a prior on scale of the first + * relativeTranslations edge. * + * @param relativeTranslations unit translation directions between + * translations to be estimated * @param scale scale for first relative translation which fixes gauge. * @param graph factor graph to which prior is added. + * @param betweenTranslations relative translations (with scale) between 2 + * points in world coordinate frame known a priori. * @param priorNoiseModel the noise model to use with the prior. */ - void addPrior(const double scale, NonlinearFactorGraph *graph, - const SharedNoiseModel &priorNoiseModel = - noiseModel::Isotropic::Sigma(3, 0.01)) const; + void addPrior( + const std::vector> &relativeTranslations, + const double scale, + const std::vector> &betweenTranslations, + NonlinearFactorGraph *graph, + const SharedNoiseModel &priorNoiseModel = + noiseModel::Isotropic::Sigma(3, 0.01)) const; /** * @brief Create random initial translations. * + * @param relativeTranslations unit translation directions between + * translations to be estimated + * @param betweenTranslations relative translations (with scale) between 2 + * points in world coordinate frame known a priori. * @param rng random number generator + * @param intialValues (optional) initial values from a prior * @return Values */ - Values initializeRandomly(std::mt19937 *rng) const; + Values initializeRandomly( + const std::vector> &relativeTranslations, + const std::vector> &betweenTranslations, + std::mt19937 *rng, const Values &initialValues = Values()) const; /** * @brief Version of initializeRandomly with a fixed seed. * + * @param relativeTranslations unit translation directions between + * translations to be estimated + * @param betweenTranslations relative translations (with scale) between 2 + * points in world coordinate frame known a priori. + * @param initialValues (optional) initial values from a prior * @return Values */ - Values initializeRandomly() const; + Values initializeRandomly( + const std::vector> &relativeTranslations, + const std::vector> &betweenTranslations, + const Values &initialValues = Values()) const; /** * @brief Build and optimize factor graph. * + * @param relativeTranslations the relative translations, in world coordinate + * frames, vector of BinaryMeasurements of Unit3, where each key of a + * measurement is a point in 3D. If a relative translation magnitude is zero, + * it is treated as a hard same-point constraint (the result of all nodes + * connected by a zero-magnitude edge will be the same). * @param scale scale for first relative translation which fixes gauge. + * The scale is only used if betweenTranslations is empty. + * @param betweenTranslations relative translations (with scale) between 2 + * points in world coordinate frame known a priori. Unlike + * relativeTranslations, zero-magnitude betweenTranslations are not treated as + * hard constraints. + * @param initialValues intial values for optimization. Initializes randomly + * if not provided. * @return Values */ - Values run(const double scale = 1.0) const; + Values run( + const TranslationEdges &relativeTranslations, const double scale = 1.0, + const std::vector> &betweenTranslations = {}, + const Values &initialValues = Values()) const; /** * @brief Simulate translation direction measurements diff --git a/gtsam/sfm/sfm.i b/gtsam/sfm/sfm.i index bf9a73ac5..83bd07b13 100644 --- a/gtsam/sfm/sfm.i +++ b/gtsam/sfm/sfm.i @@ -4,11 +4,15 @@ namespace gtsam { +#include +#include #include class SfmTrack { SfmTrack(); SfmTrack(const gtsam::Point3& pt); const Point3& point3() const; + + Point3 p; double r; double g; @@ -34,12 +38,15 @@ class SfmData { static gtsam::SfmData FromBundlerFile(string filename); static gtsam::SfmData FromBalFile(string filename); + std::vector& trackList() const; + std::vector>& cameraList() const; + void addTrack(const gtsam::SfmTrack& t); void addCamera(const gtsam::SfmCamera& cam); size_t numberTracks() const; size_t numberCameras() const; - gtsam::SfmTrack track(size_t idx) const; - gtsam::PinholeCamera camera(size_t idx) const; + gtsam::SfmTrack& track(size_t idx) const; + gtsam::PinholeCamera& camera(size_t idx) const; gtsam::NonlinearFactorGraph generalSfmFactors( const gtsam::SharedNoiseModel& model = @@ -83,6 +90,7 @@ class BinaryMeasurement { typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; +typedef gtsam::BinaryMeasurement BinaryMeasurementPoint3; class BinaryMeasurementsUnit3 { BinaryMeasurementsUnit3(); @@ -91,6 +99,20 @@ class BinaryMeasurementsUnit3 { void push_back(const gtsam::BinaryMeasurement& measurement); }; +class BinaryMeasurementsPoint3 { + BinaryMeasurementsPoint3(); + size_t size() const; + gtsam::BinaryMeasurement at(size_t idx) const; + void push_back(const gtsam::BinaryMeasurement& measurement); +}; + +class BinaryMeasurementsRot3 { + BinaryMeasurementsRot3(); + size_t size() const; + gtsam::BinaryMeasurement at(size_t idx) const; + void push_back(const gtsam::BinaryMeasurement& measurement); +}; + #include // TODO(frank): copy/pasta below until we have integer template paremeters in @@ -142,8 +164,8 @@ class ShonanAveraging2 { ShonanAveraging2(string g2oFile); ShonanAveraging2(string g2oFile, const gtsam::ShonanAveragingParameters2& parameters); - ShonanAveraging2(const gtsam::BetweenFactorPose2s &factors, - const gtsam::ShonanAveragingParameters2 ¶meters); + ShonanAveraging2(const gtsam::BetweenFactorPose2s& factors, + const gtsam::ShonanAveragingParameters2& parameters); // Query properties size_t nrUnknowns() const; @@ -184,6 +206,10 @@ class ShonanAveraging2 { }; class ShonanAveraging3 { + ShonanAveraging3( + const std::vector>& measurements, + const gtsam::ShonanAveragingParameters3& parameters = + gtsam::ShonanAveragingParameters3()); ShonanAveraging3(string g2oFile); ShonanAveraging3(string g2oFile, const gtsam::ShonanAveragingParameters3& parameters); @@ -252,15 +278,36 @@ class MFAS { }; #include + class TranslationRecovery { - TranslationRecovery( + TranslationRecovery(const gtsam::LevenbergMarquardtParams& lmParams); + TranslationRecovery(); // default params. + void addPrior(const gtsam::BinaryMeasurementsUnit3& relativeTranslations, + const double scale, + const gtsam::BinaryMeasurementsPoint3& betweenTranslations, + gtsam::NonlinearFactorGraph @graph, + const gtsam::SharedNoiseModel& priorNoiseModel) const; + void addPrior(const gtsam::BinaryMeasurementsUnit3& relativeTranslations, + const double scale, + const gtsam::BinaryMeasurementsPoint3& betweenTranslations, + gtsam::NonlinearFactorGraph @graph) const; + gtsam::NonlinearFactorGraph buildGraph( + const gtsam::BinaryMeasurementsUnit3& relativeTranslations) const; + gtsam::Values run(const gtsam::BinaryMeasurementsUnit3& relativeTranslations, + const double scale, + const gtsam::BinaryMeasurementsPoint3& betweenTranslations, + const gtsam::Values& initialValues) const; + // default random initial values + gtsam::Values run( const gtsam::BinaryMeasurementsUnit3& relativeTranslations, - const gtsam::LevenbergMarquardtParams& lmParams); - TranslationRecovery( - const gtsam::BinaryMeasurementsUnit3& - relativeTranslations); // default LevenbergMarquardtParams - gtsam::Values run(const double scale) const; - gtsam::Values run() const; // default scale = 1.0 + const double scale, + const gtsam::BinaryMeasurementsPoint3& betweenTranslations) const; + // default empty betweenTranslations + gtsam::Values run(const gtsam::BinaryMeasurementsUnit3& relativeTranslations, + const double scale) const; + // default scale = 1.0, empty betweenTranslations + gtsam::Values run( + const gtsam::BinaryMeasurementsUnit3& relativeTranslations) const; }; } // namespace gtsam diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index dc450a9f7..95e750674 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -223,6 +223,8 @@ parse3DFactors(const std::string &filename, size_t maxIndex = 0); using BinaryMeasurementsUnit3 = std::vector>; +using BinaryMeasurementsPoint3 = std::vector>; +using BinaryMeasurementsRot3 = std::vector>; #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 inline boost::optional GTSAM_DEPRECATED diff --git a/gtsam/slam/slam.i b/gtsam/slam/slam.i index 4e943253e..8e1e06d5b 100644 --- a/gtsam/slam/slam.i +++ b/gtsam/slam/slam.i @@ -90,6 +90,22 @@ typedef gtsam::GeneralSFMFactor, gtsam::Point3> GeneralSFMFactorCal3Unified; +typedef gtsam::GeneralSFMFactor, + gtsam::Point3> + GeneralSFMFactorPoseCal3_S2; +typedef gtsam::GeneralSFMFactor, + gtsam::Point3> + GeneralSFMFactorPoseCal3DS2; +typedef gtsam::GeneralSFMFactor, + gtsam::Point3> + GeneralSFMFactorPoseCal3Bundler; +typedef gtsam::GeneralSFMFactor, + gtsam::Point3> + GeneralSFMFactorPoseCal3Fisheye; +typedef gtsam::GeneralSFMFactor, + gtsam::Point3> + GeneralSFMFactorPoseCal3Unified; + template virtual class GeneralSFMFactor2 : gtsam::NoiseModelFactor { diff --git a/gtsam/slam/tests/testPriorFactor.cpp b/gtsam/slam/tests/testPriorFactor.cpp index 2dc083cb2..d1a60e346 100644 --- a/gtsam/slam/tests/testPriorFactor.cpp +++ b/gtsam/slam/tests/testPriorFactor.cpp @@ -5,12 +5,16 @@ * @date Nov 4, 2014 */ -#include -#include #include +#include +#include +#include +#include using namespace std; +using namespace std::placeholders; using namespace gtsam; +using namespace imuBias; /* ************************************************************************* */ @@ -23,16 +27,44 @@ TEST(PriorFactor, ConstructorScalar) { // Constructor vector3 TEST(PriorFactor, ConstructorVector3) { SharedNoiseModel model = noiseModel::Isotropic::Sigma(3, 1.0); - PriorFactor factor(1, Vector3(1,2,3), model); + PriorFactor factor(1, Vector3(1, 2, 3), model); } // Constructor dynamic sized vector TEST(PriorFactor, ConstructorDynamicSizeVector) { - Vector v(5); v << 1, 2, 3, 4, 5; + Vector v(5); + v << 1, 2, 3, 4, 5; SharedNoiseModel model = noiseModel::Isotropic::Sigma(5, 1.0); PriorFactor factor(1, v, model); } +Vector callEvaluateError(const PriorFactor& factor, + const ConstantBias& bias) { + return factor.evaluateError(bias); +} + +// Test for imuBias::ConstantBias +TEST(PriorFactor, ConstantBias) { + Vector3 biasAcc(1, 2, 3); + Vector3 biasGyro(0.1, 0.2, 0.3); + ConstantBias bias(biasAcc, biasGyro); + + PriorFactor factor(1, bias, + noiseModel::Isotropic::Sigma(6, 0.1)); + Values values; + values.insert(1, bias); + + EXPECT_DOUBLES_EQUAL(0.0, factor.error(values), 1e-8); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + + ConstantBias incorrectBias( + (Vector6() << 1.1, 2.1, 3.1, 0.2, 0.3, 0.4).finished()); + values.clear(); + values.insert(1, incorrectBias); + EXPECT_DOUBLES_EQUAL(3.0, factor.error(values), 1e-8); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/symbolic/tests/testSymbolicBayesNet.cpp b/gtsam/symbolic/tests/testSymbolicBayesNet.cpp index 2e13be10e..7795d5b89 100644 --- a/gtsam/symbolic/tests/testSymbolicBayesNet.cpp +++ b/gtsam/symbolic/tests/testSymbolicBayesNet.cpp @@ -104,16 +104,16 @@ TEST(SymbolicBayesNet, Dot) { "digraph {\n" " size=\"5,5\";\n" "\n" - " vara1[label=\"a1\", pos=\"1,2!\", shape=box];\n" - " vara2[label=\"a2\", pos=\"2,2!\", shape=box];\n" - " varx1[label=\"x1\", pos=\"1,1!\"];\n" - " varx2[label=\"x2\", pos=\"2,1!\"];\n" - " varx3[label=\"x3\", pos=\"3,1!\"];\n" + " var6989586621679009793[label=\"a1\", pos=\"1,2!\", shape=box];\n" + " var6989586621679009794[label=\"a2\", pos=\"2,2!\", shape=box];\n" + " var8646911284551352321[label=\"x1\", pos=\"1,1!\"];\n" + " var8646911284551352322[label=\"x2\", pos=\"2,1!\"];\n" + " var8646911284551352323[label=\"x3\", pos=\"3,1!\"];\n" "\n" - " varx1->varx2\n" - " vara1->varx2\n" - " varx2->varx3\n" - " vara2->varx3\n" + " var8646911284551352321->var8646911284551352322\n" + " var6989586621679009793->var8646911284551352322\n" + " var8646911284551352322->var8646911284551352323\n" + " var6989586621679009794->var8646911284551352323\n" "}"); } diff --git a/gtsam_unstable/slam/PoseToPointFactor.h b/gtsam_unstable/slam/PoseToPointFactor.h index cab48e506..b9b2ad5ce 100644 --- a/gtsam_unstable/slam/PoseToPointFactor.h +++ b/gtsam_unstable/slam/PoseToPointFactor.h @@ -61,6 +61,12 @@ class PoseToPointFactor : public NoiseModelFactor2 { traits::Equals(this->measured_, e->measured_, tol); } + /// @return a deep copy of this factor + gtsam::NonlinearFactor::shared_ptr clone() const override { + return boost::static_pointer_cast( + gtsam::NonlinearFactor::shared_ptr(new This(*this))); + } + /** implement functions needed to derive from Factor */ /** vector of errors diff --git a/gtsam_unstable/timing/timeShonanAveraging.cpp b/gtsam_unstable/timing/timeShonanAveraging.cpp index e932b6400..74dd04a78 100644 --- a/gtsam_unstable/timing/timeShonanAveraging.cpp +++ b/gtsam_unstable/timing/timeShonanAveraging.cpp @@ -79,9 +79,9 @@ void saveG2oResult(string name, const Values& values, std::map poses myfile << "VERTEX_SE3:QUAT" << " "; myfile << i << " "; myfile << poses[i].x() << " " << poses[i].y() << " " << poses[i].z() << " "; - Vector quaternion = Rot3(R).quaternion(); - myfile << quaternion(3) << " " << quaternion(2) << " " << quaternion(1) - << " " << quaternion(0); + Quaternion quaternion = Rot3(R).toQuaternion(); + myfile << quaternion.x() << " " << quaternion.y() << " " << quaternion.z() + << " " << quaternion.w(); myfile << "\n"; } myfile.close(); diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index d3cbff32d..cba206d11 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -47,7 +47,9 @@ set(ignore gtsam::Pose3Vector gtsam::Rot3Vector gtsam::KeyVector + gtsam::BinaryMeasurementsPoint3 gtsam::BinaryMeasurementsUnit3 + gtsam::BinaryMeasurementsRot3 gtsam::DiscreteKey gtsam::KeyPairDoubleMap) @@ -65,6 +67,7 @@ set(interface_headers ${PROJECT_SOURCE_DIR}/gtsam/sfm/sfm.i ${PROJECT_SOURCE_DIR}/gtsam/navigation/navigation.i ${PROJECT_SOURCE_DIR}/gtsam/basis/basis.i + ${PROJECT_SOURCE_DIR}/gtsam/hybrid/hybrid.i ) set(GTSAM_PYTHON_TARGET gtsam_py) @@ -107,6 +110,14 @@ file(GLOB GTSAM_PYTHON_UTIL_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/utils/*.py" foreach(util_file ${GTSAM_PYTHON_UTIL_FILES}) configure_file(${util_file} "${GTSAM_MODULE_PATH}/utils/${test_file}" COPYONLY) endforeach() +file(GLOB GTSAM_PYTHON_PREAMBLE_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/preamble/*.h") +foreach(util_file ${GTSAM_PYTHON_PREAMBLE_FILES}) + configure_file(${util_file} "${GTSAM_MODULE_PATH}/preamble/${test_file}" COPYONLY) +endforeach() +file(GLOB GTSAM_PYTHON_SPECIALIZATION_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/specializations/*.h") +foreach(util_file ${GTSAM_PYTHON_SPECIALIZATION_FILES}) + configure_file(${util_file} "${GTSAM_MODULE_PATH}/specializations/${test_file}" COPYONLY) +endforeach() # Common directory for data/datasets stored with the package. # This will store the data in the Python site package directly. @@ -128,7 +139,9 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) gtsam::Pose3Vector gtsam::KeyVector gtsam::FixedLagSmootherKeyTimestampMapValue + gtsam::BinaryMeasurementsPoint3 gtsam::BinaryMeasurementsUnit3 + gtsam::BinaryMeasurementsRot3 gtsam::CameraSetCal3_S2 gtsam::CameraSetCal3Bundler gtsam::CameraSetCal3Unified @@ -176,7 +189,7 @@ endif() # Add custom target so we can install with `make python-install` set(GTSAM_PYTHON_INSTALL_TARGET python-install) add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} - COMMAND ${PYTHON_EXECUTABLE} -m pip install --user . + COMMAND ${PYTHON_EXECUTABLE} -m pip install . DEPENDS ${GTSAM_PYTHON_DEPENDENCIES} WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}) diff --git a/python/gtsam/examples/Pose3ISAM2Example.py b/python/gtsam/examples/Pose3ISAM2Example.py index cb71813c5..986bd5b9c 100644 --- a/python/gtsam/examples/Pose3ISAM2Example.py +++ b/python/gtsam/examples/Pose3ISAM2Example.py @@ -34,7 +34,10 @@ def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsa # Plot the newly updated iSAM2 inference. fig = plt.figure(0) - axes = fig.gca(projection='3d') + if not fig.axes: + axes = fig.add_subplot(projection='3d') + else: + axes = fig.axes[0] plt.cla() i = 1 diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py index 054b61126..92a7d04e3 100644 --- a/python/gtsam/examples/TranslationAveragingExample.py +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -123,7 +123,7 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, w_iZj_inliers = filter_outliers(w_iZj_list) # Run the optimizer to obtain translations for normalized directions. - wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run() + wtc_values = gtsam.TranslationRecovery().run(w_iZj_inliers) wTc_values = gtsam.Values() for key in wRc_values.keys(): diff --git a/python/gtsam/examples/VisualISAM2Example.py b/python/gtsam/examples/VisualISAM2Example.py index 4b480fab7..60d4fb376 100644 --- a/python/gtsam/examples/VisualISAM2Example.py +++ b/python/gtsam/examples/VisualISAM2Example.py @@ -33,7 +33,10 @@ def visual_ISAM2_plot(result): fignum = 0 fig = plt.figure(fignum) - axes = fig.gca(projection='3d') + if not fig.axes: + axes = fig.add_subplot(projection='3d') + else: + axes = fig.axes[0] plt.cla() # Plot points diff --git a/python/gtsam/notebooks/ellipses.ipynb b/python/gtsam/notebooks/ellipses.ipynb new file mode 100644 index 000000000..06938f696 --- /dev/null +++ b/python/gtsam/notebooks/ellipses.ipynb @@ -0,0 +1,133 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Ellipse Scaling\n", + "\n", + "The code to calculate the percentages included in ellipses with various values of \"k\" in `plot.py`.\n", + "\n", + "Thanks to @senselessDev, January 26, for providing the code in [PR #1067](https://github.com/borglab/gtsam/pull/1067)." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import scipy\n", + "import scipy.stats\n", + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def pct_to_sigma(pct, dof):\n", + " return np.sqrt(scipy.stats.chi2.ppf(pct / 100., df=dof))\n", + "\n", + "def sigma_to_pct(sigma, dof):\n", + " return scipy.stats.chi2.cdf(sigma**2, df=dof) * 100." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0D\t 1 \t 2 \t 3 \t 4 \t 5 \n", + "1D\t68.26895%\t95.44997%\t99.73002%\t99.99367%\t99.99994%\n", + "2D\t39.34693%\t86.46647%\t98.88910%\t99.96645%\t99.99963%\n", + "3D\t19.87480%\t73.85359%\t97.07091%\t99.88660%\t99.99846%\n" + ] + } + ], + "source": [ + "for dof in range(0, 4):\n", + " print(\"{}D\".format(dof), end=\"\")\n", + " for sigma in range(1, 6):\n", + " if dof == 0: print(\"\\t {} \".format(sigma), end=\"\")\n", + " else: print(\"\\t{:.5f}%\".format(sigma_to_pct(sigma, dof)), end=\"\")\n", + " print()" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1D\n", + "\n", + "pct=50.0 -> sigma=0.674489750196\n", + "pct=95.0 -> sigma=1.959963984540\n", + "pct=99.0 -> sigma=2.575829303549\n", + "\n", + "2D\n", + "\n", + "pct=50.0 -> sigma=1.177410022515\n", + "pct=95.0 -> sigma=2.447746830681\n", + "pct=99.0 -> sigma=3.034854258770\n", + "\n", + "3D\n", + "\n", + "pct=50.0 -> sigma=1.538172254455\n", + "pct=95.0 -> sigma=2.795483482915\n", + "pct=99.0 -> sigma=3.368214175219\n", + "\n" + ] + } + ], + "source": [ + "for dof in range(1, 4):\n", + " print(\"{}D\\n\".format(dof))\n", + " for pct in [50, 95, 99]:\n", + " print(\"pct={:.1f} -> sigma={:.12f}\".format(pct, pct_to_sigma(pct, dof)))\n", + " print()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "4d608302ba82e7596903db5446e6fa05f049271852e8cc6e1cafaafe5fbd9fed" + }, + "kernelspec": { + "display_name": "Python 3.8.13 ('gtsfm-v1')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/gtsam/preamble/base.h b/python/gtsam/preamble/base.h index 626b47ae4..5cf633e65 100644 --- a/python/gtsam/preamble/base.h +++ b/python/gtsam/preamble/base.h @@ -11,6 +11,8 @@ * mutations on Python side will not be reflected on C++. */ -PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(gtsam::IndexPairVector); + +PYBIND11_MAKE_OPAQUE(gtsam::IndexPairSet); PYBIND11_MAKE_OPAQUE(std::vector); // JacobianVector diff --git a/python/gtsam/preamble/discrete.h b/python/gtsam/preamble/discrete.h index 608508c32..320e0ac71 100644 --- a/python/gtsam/preamble/discrete.h +++ b/python/gtsam/preamble/discrete.h @@ -13,4 +13,3 @@ #include -PYBIND11_MAKE_OPAQUE(gtsam::DiscreteKeys); diff --git a/python/gtsam/preamble/hybrid.h b/python/gtsam/preamble/hybrid.h new file mode 100644 index 000000000..5e5a71e48 --- /dev/null +++ b/python/gtsam/preamble/hybrid.h @@ -0,0 +1,14 @@ +/* Please refer to: + * https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html + * These are required to save one copy operation on Python calls. + * + * NOTES + * ================= + * + * `PYBIND11_MAKE_OPAQUE` will mark the type as "opaque" for the pybind11 + * automatic STL binding, such that the raw objects can be accessed in Python. + * Without this they will be automatically converted to a Python object, and all + * mutations on Python side will not be reflected on C++. + */ + +PYBIND11_MAKE_OPAQUE(std::vector); diff --git a/python/gtsam/preamble/inference.h b/python/gtsam/preamble/inference.h index 4106c794a..d07a75f6f 100644 --- a/python/gtsam/preamble/inference.h +++ b/python/gtsam/preamble/inference.h @@ -10,5 +10,3 @@ * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. */ - -#include \ No newline at end of file diff --git a/python/gtsam/preamble/sfm.h b/python/gtsam/preamble/sfm.h index a34e73058..8ff0ea82e 100644 --- a/python/gtsam/preamble/sfm.h +++ b/python/gtsam/preamble/sfm.h @@ -9,4 +9,18 @@ * automatic STL binding, such that the raw objects can be accessed in Python. * Without this they will be automatically converted to a Python object, and all * mutations on Python side will not be reflected on C++. - */ \ No newline at end of file + */ + +// Including can cause some serious hard-to-debug bugs!!! +// #include +#include + +PYBIND11_MAKE_OPAQUE( + std::vector); + +PYBIND11_MAKE_OPAQUE( + std::vector); +PYBIND11_MAKE_OPAQUE( + std::vector>); +PYBIND11_MAKE_OPAQUE( + std::vector>); diff --git a/python/gtsam/specializations/geometry.h b/python/gtsam/specializations/geometry.h index 5a0ea35ef..99f40253f 100644 --- a/python/gtsam/specializations/geometry.h +++ b/python/gtsam/specializations/geometry.h @@ -21,6 +21,8 @@ py::bind_vector>(m_, "Pose3Pairs"); py::bind_vector>(m_, "Pose3Vector"); py::bind_vector>>( m_, "CameraSetCal3_S2"); +py::bind_vector>>( + m_, "CameraSetCal3DS2"); py::bind_vector>>( m_, "CameraSetCal3Bundler"); py::bind_vector>>( diff --git a/python/gtsam/specializations/hybrid.h b/python/gtsam/specializations/hybrid.h new file mode 100644 index 000000000..bede6d86c --- /dev/null +++ b/python/gtsam/specializations/hybrid.h @@ -0,0 +1,4 @@ + +py::bind_vector >(m_, "GaussianFactorVector"); + +py::implicitly_convertible >(); diff --git a/python/gtsam/specializations/inference.h b/python/gtsam/specializations/inference.h index 22fe3beff..9e23444ea 100644 --- a/python/gtsam/specializations/inference.h +++ b/python/gtsam/specializations/inference.h @@ -11,3 +11,4 @@ * and saves one copy operation. */ +py::bind_map>(m_, "__MapCharDouble"); diff --git a/python/gtsam/specializations/sfm.h b/python/gtsam/specializations/sfm.h index 6de15217f..311b2c59b 100644 --- a/python/gtsam/specializations/sfm.h +++ b/python/gtsam/specializations/sfm.h @@ -11,6 +11,23 @@ * and saves one copy operation. */ +py::bind_vector > >( + m_, "BinaryMeasurementsPoint3"); py::bind_vector > >( m_, "BinaryMeasurementsUnit3"); +py::bind_vector > >( + m_, "BinaryMeasurementsRot3"); py::bind_map(m_, "KeyPairDoubleMap"); + +py::bind_vector< + std::vector >( + m_, "SfmTracks"); + +py::bind_vector< + std::vector >( + m_, "SfmCameras"); + +py::bind_vector< + std::vector>>( + m_, "SfmMeasurementVector" + ); diff --git a/python/gtsam/tests/test_DiscreteBayesNet.py b/python/gtsam/tests/test_DiscreteBayesNet.py index 74191dcc7..ff2ba99d1 100644 --- a/python/gtsam/tests/test_DiscreteBayesNet.py +++ b/python/gtsam/tests/test_DiscreteBayesNet.py @@ -11,12 +11,12 @@ Author: Frank Dellaert # pylint: disable=no-name-in-module, invalid-name -import unittest import textwrap +import unittest import gtsam -from gtsam import (DiscreteBayesNet, DiscreteConditional, DiscreteFactorGraph, - DiscreteKeys, DiscreteDistribution, DiscreteValues, Ordering) +from gtsam import (DiscreteBayesNet, DiscreteConditional, DiscreteDistribution, + DiscreteFactorGraph, DiscreteKeys, DiscreteValues, Ordering) from gtsam.utils.test_case import GtsamTestCase # Some keys: @@ -139,7 +139,7 @@ class TestDiscreteBayesNet(GtsamTestCase): # Make sure we can *update* position hints writer = gtsam.DotWriter() ph: dict = writer.positionHints - ph.update({'a': 2}) # hint at symbol position + ph['a'] = 2 # hint at symbol position writer.positionHints = ph # Check the output of dot @@ -152,10 +152,10 @@ class TestDiscreteBayesNet(GtsamTestCase): var4[label="4"]; var5[label="5"]; var6[label="6"]; - vara0[label="a0", pos="0,2!"]; + var6989586621679009792[label="a0", pos="0,2!"]; var4->var6 - vara0->var3 + var6989586621679009792->var3 var3->var5 var6->var5 }""" diff --git a/python/gtsam/tests/test_Factors.py b/python/gtsam/tests/test_Factors.py index 3ec8648a4..a5688eec8 100644 --- a/python/gtsam/tests/test_Factors.py +++ b/python/gtsam/tests/test_Factors.py @@ -11,9 +11,8 @@ Author: Varun Agrawal """ import unittest -import numpy as np - import gtsam +import numpy as np from gtsam.utils.test_case import GtsamTestCase @@ -21,6 +20,7 @@ class TestNonlinearEquality2Factor(GtsamTestCase): """ Test various instantiations of NonlinearEquality2. """ + def test_point3(self): """Test for Point3 version.""" factor = gtsam.NonlinearEquality2Point3(0, 1) @@ -30,5 +30,23 @@ class TestNonlinearEquality2Factor(GtsamTestCase): np.testing.assert_allclose(error, np.zeros(3)) +class TestJacobianFactor(GtsamTestCase): + """Test JacobianFactor""" + + def test_gaussian_factor_graph(self): + """Test construction from GaussianFactorGraph.""" + gfg = gtsam.GaussianFactorGraph() + jf = gtsam.JacobianFactor(gfg) + self.assertIsInstance(jf, gtsam.JacobianFactor) + + nfg = gtsam.NonlinearFactorGraph() + nfg.push_back(gtsam.PriorFactorDouble(1, 0.0, gtsam.noiseModel.Isotropic.Sigma(1, 1.0))) + values = gtsam.Values() + values.insert(1, 0.0) + gfg = nfg.linearize(values) + jf = gtsam.JacobianFactor(gfg) + self.assertIsInstance(jf, gtsam.JacobianFactor) + + if __name__ == "__main__": unittest.main() diff --git a/python/gtsam/tests/test_HybridFactorGraph.py b/python/gtsam/tests/test_HybridFactorGraph.py new file mode 100644 index 000000000..781cfd924 --- /dev/null +++ b/python/gtsam/tests/test_HybridFactorGraph.py @@ -0,0 +1,60 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for Hybrid Factor Graphs. +Author: Fan Jiang +""" +# pylint: disable=invalid-name, no-name-in-module, no-member + +from __future__ import print_function + +import unittest + +import gtsam +import numpy as np +from gtsam.symbol_shorthand import C, X +from gtsam.utils.test_case import GtsamTestCase + + +class TestHybridGaussianFactorGraph(GtsamTestCase): + """Unit tests for HybridGaussianFactorGraph.""" + + def test_create(self): + """Test contruction of hybrid factor graph.""" + noiseModel = gtsam.noiseModel.Unit.Create(3) + dk = gtsam.DiscreteKeys() + dk.push_back((C(0), 2)) + + jf1 = gtsam.JacobianFactor(X(0), np.eye(3), np.zeros((3, 1)), + noiseModel) + jf2 = gtsam.JacobianFactor(X(0), np.eye(3), np.ones((3, 1)), + noiseModel) + + gmf = gtsam.GaussianMixtureFactor.FromFactors([X(0)], dk, [jf1, jf2]) + + hfg = gtsam.HybridGaussianFactorGraph() + hfg.add(jf1) + hfg.add(jf2) + hfg.push_back(gmf) + + hbn = hfg.eliminateSequential( + gtsam.Ordering.ColamdConstrainedLastHybridGaussianFactorGraph( + hfg, [C(0)])) + + # print("hbn = ", hbn) + self.assertEqual(hbn.size(), 2) + + mixture = hbn.at(0).inner() + self.assertIsInstance(mixture, gtsam.GaussianMixture) + self.assertEqual(len(mixture.keys()), 2) + + discrete_conditional = hbn.at(hbn.size() - 1).inner() + self.assertIsInstance(discrete_conditional, gtsam.DiscreteConditional) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py index e2561ae52..37afd9e1c 100644 --- a/python/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -15,12 +15,10 @@ from __future__ import print_function import unittest import gtsam -from gtsam import (DoglegOptimizer, DoglegParams, - DummyPreconditionerParameters, GaussNewtonOptimizer, - GaussNewtonParams, GncLMParams, GncLMOptimizer, - LevenbergMarquardtOptimizer, LevenbergMarquardtParams, - NonlinearFactorGraph, Ordering, - PCGSolverParameters, Point2, PriorFactorPoint2, Values) +from gtsam import (DoglegOptimizer, DoglegParams, DummyPreconditionerParameters, + GaussNewtonOptimizer, GaussNewtonParams, GncLMParams, GncLMOptimizer, + LevenbergMarquardtOptimizer, LevenbergMarquardtParams, NonlinearFactorGraph, + Ordering, PCGSolverParameters, Point2, PriorFactorPoint2, Values) from gtsam.utils.test_case import GtsamTestCase KEY1 = 1 @@ -28,63 +26,83 @@ KEY2 = 2 class TestScenario(GtsamTestCase): - def test_optimize(self): - """Do trivial test with three optimizer variants.""" - fg = NonlinearFactorGraph() + """Do trivial test with three optimizer variants.""" + + def setUp(self): + """Set up the optimization problem and ordering""" + # create graph + self.fg = NonlinearFactorGraph() model = gtsam.noiseModel.Unit.Create(2) - fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) + self.fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) # test error at minimum xstar = Point2(0, 0) - optimal_values = Values() - optimal_values.insert(KEY1, xstar) - self.assertEqual(0.0, fg.error(optimal_values), 0.0) + self.optimal_values = Values() + self.optimal_values.insert(KEY1, xstar) + self.assertEqual(0.0, self.fg.error(self.optimal_values), 0.0) # test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = x0 = Point2(3, 3) - initial_values = Values() - initial_values.insert(KEY1, x0) - self.assertEqual(9.0, fg.error(initial_values), 1e-3) + self.initial_values = Values() + self.initial_values.insert(KEY1, x0) + self.assertEqual(9.0, self.fg.error(self.initial_values), 1e-3) # optimize parameters - ordering = Ordering() - ordering.push_back(KEY1) + self.ordering = Ordering() + self.ordering.push_back(KEY1) - # Gauss-Newton + def test_gauss_newton(self): gnParams = GaussNewtonParams() - gnParams.setOrdering(ordering) - actual1 = GaussNewtonOptimizer(fg, initial_values, gnParams).optimize() - self.assertAlmostEqual(0, fg.error(actual1)) + gnParams.setOrdering(self.ordering) + actual = GaussNewtonOptimizer(self.fg, self.initial_values, gnParams).optimize() + self.assertAlmostEqual(0, self.fg.error(actual)) - # Levenberg-Marquardt + def test_levenberg_marquardt(self): lmParams = LevenbergMarquardtParams.CeresDefaults() - lmParams.setOrdering(ordering) - actual2 = LevenbergMarquardtOptimizer( - fg, initial_values, lmParams).optimize() - self.assertAlmostEqual(0, fg.error(actual2)) + lmParams.setOrdering(self.ordering) + actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize() + self.assertAlmostEqual(0, self.fg.error(actual)) - # Levenberg-Marquardt + def test_levenberg_marquardt_pcg(self): lmParams = LevenbergMarquardtParams.CeresDefaults() lmParams.setLinearSolverType("ITERATIVE") cgParams = PCGSolverParameters() cgParams.setPreconditionerParams(DummyPreconditionerParameters()) lmParams.setIterativeParams(cgParams) - actual2 = LevenbergMarquardtOptimizer( - fg, initial_values, lmParams).optimize() - self.assertAlmostEqual(0, fg.error(actual2)) + actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize() + self.assertAlmostEqual(0, self.fg.error(actual)) - # Dogleg + def test_dogleg(self): dlParams = DoglegParams() - dlParams.setOrdering(ordering) - actual3 = DoglegOptimizer(fg, initial_values, dlParams).optimize() - self.assertAlmostEqual(0, fg.error(actual3)) - - # Graduated Non-Convexity (GNC) - gncParams = GncLMParams() - actual4 = GncLMOptimizer(fg, initial_values, gncParams).optimize() - self.assertAlmostEqual(0, fg.error(actual4)) - + dlParams.setOrdering(self.ordering) + actual = DoglegOptimizer(self.fg, self.initial_values, dlParams).optimize() + self.assertAlmostEqual(0, self.fg.error(actual)) + def test_graduated_non_convexity(self): + gncParams = GncLMParams() + actual = GncLMOptimizer(self.fg, self.initial_values, gncParams).optimize() + self.assertAlmostEqual(0, self.fg.error(actual)) + + def test_iteration_hook(self): + # set up iteration hook to track some testable values + iteration_count = 0 + final_error = 0 + final_values = None + def iteration_hook(iter, error_before, error_after): + nonlocal iteration_count, final_error, final_values + iteration_count = iter + final_error = error_after + final_values = optimizer.values() + # optimize + params = LevenbergMarquardtParams.CeresDefaults() + params.setOrdering(self.ordering) + params.iterationHook = iteration_hook + optimizer = LevenbergMarquardtOptimizer(self.fg, self.initial_values, params) + actual = optimizer.optimize() + self.assertAlmostEqual(0, self.fg.error(actual)) + self.gtsamAssertEquals(final_values, actual) + self.assertEqual(self.fg.error(actual), final_error) + self.assertEqual(optimizer.iterations(), iteration_count) if __name__ == "__main__": unittest.main() diff --git a/python/gtsam/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py index cde71de53..4464e8d47 100644 --- a/python/gtsam/tests/test_Pose3.py +++ b/python/gtsam/tests/test_Pose3.py @@ -19,6 +19,62 @@ from gtsam import Point3, Pose3, Rot3, Point3Pairs from gtsam.utils.test_case import GtsamTestCase +def numerical_derivative_pose(pose, method, delta=1e-5): + jacobian = np.zeros((6, 6)) + for idx in range(6): + xplus = np.zeros(6) + xplus[idx] = delta + xminus = np.zeros(6) + xminus[idx] = -delta + pose_plus = pose.retract(xplus).__getattribute__(method)() + pose_minus = pose.retract(xminus).__getattribute__(method)() + jacobian[:, idx] = pose_minus.localCoordinates(pose_plus) / (2 * delta) + return jacobian + + +def numerical_derivative_2_poses(pose, other_pose, method, delta=1e-5, inputs=()): + jacobian = np.zeros((6, 6)) + other_jacobian = np.zeros((6, 6)) + for idx in range(6): + xplus = np.zeros(6) + xplus[idx] = delta + xminus = np.zeros(6) + xminus[idx] = -delta + + pose_plus = pose.retract(xplus).__getattribute__(method)(*inputs, other_pose) + pose_minus = pose.retract(xminus).__getattribute__(method)(*inputs, other_pose) + jacobian[:, idx] = pose_minus.localCoordinates(pose_plus) / (2 * delta) + + other_pose_plus = pose.__getattribute__(method)(*inputs, other_pose.retract(xplus)) + other_pose_minus = pose.__getattribute__(method)(*inputs, other_pose.retract(xminus)) + other_jacobian[:, idx] = other_pose_minus.localCoordinates(other_pose_plus) / (2 * delta) + return jacobian, other_jacobian + + +def numerical_derivative_pose_point(pose, point, method, delta=1e-5): + jacobian = np.zeros((3, 6)) + point_jacobian = np.zeros((3, 3)) + for idx in range(6): + xplus = np.zeros(6) + xplus[idx] = delta + xminus = np.zeros(6) + xminus[idx] = -delta + + point_plus = pose.retract(xplus).__getattribute__(method)(point) + point_minus = pose.retract(xminus).__getattribute__(method)(point) + jacobian[:, idx] = (point_plus - point_minus) / (2 * delta) + + if idx < 3: + xplus = np.zeros(3) + xplus[idx] = delta + xminus = np.zeros(3) + xminus[idx] = -delta + point_plus = pose.__getattribute__(method)(point + xplus) + point_minus = pose.__getattribute__(method)(point + xminus) + point_jacobian[:, idx] = (point_plus - point_minus) / (2 * delta) + return jacobian, point_jacobian + + class TestPose3(GtsamTestCase): """Test selected Pose3 methods.""" @@ -30,6 +86,47 @@ class TestPose3(GtsamTestCase): actual = T2.between(T3) self.gtsamAssertEquals(actual, expected, 1e-6) + #test jacobians + jacobian = np.zeros((6, 6), order='F') + jacobian_other = np.zeros((6, 6), order='F') + T2.between(T3, jacobian, jacobian_other) + jacobian_numerical, jacobian_numerical_other = numerical_derivative_2_poses(T2, T3, 'between') + self.gtsamAssertEquals(jacobian, jacobian_numerical) + self.gtsamAssertEquals(jacobian_other, jacobian_numerical_other) + + def test_inverse(self): + """Test between method.""" + pose = Pose3(Rot3.Rodrigues(0, 0, -math.pi/2), Point3(2, 4, 0)) + expected = Pose3(Rot3.Rodrigues(0, 0, math.pi/2), Point3(4, -2, 0)) + actual = pose.inverse() + self.gtsamAssertEquals(actual, expected, 1e-6) + + #test jacobians + jacobian = np.zeros((6, 6), order='F') + pose.inverse(jacobian) + jacobian_numerical = numerical_derivative_pose(pose, 'inverse') + self.gtsamAssertEquals(jacobian, jacobian_numerical) + + def test_slerp(self): + """Test slerp method.""" + pose0 = gtsam.Pose3() + pose1 = Pose3(Rot3.Rodrigues(0, 0, -math.pi/2), Point3(2, 4, 0)) + actual_alpha_0 = pose0.slerp(0, pose1) + self.gtsamAssertEquals(actual_alpha_0, pose0) + actual_alpha_1 = pose0.slerp(1, pose1) + self.gtsamAssertEquals(actual_alpha_1, pose1) + actual_alpha_half = pose0.slerp(0.5, pose1) + expected_alpha_half = Pose3(Rot3.Rodrigues(0, 0, -math.pi/4), Point3(0.17157288, 2.41421356, 0)) + self.gtsamAssertEquals(actual_alpha_half, expected_alpha_half, tol=1e-6) + + # test jacobians + jacobian = np.zeros((6, 6), order='F') + jacobian_other = np.zeros((6, 6), order='F') + pose0.slerp(0.5, pose1, jacobian, jacobian_other) + jacobian_numerical, jacobian_numerical_other = numerical_derivative_2_poses(pose0, pose1, 'slerp', inputs=[0.5]) + self.gtsamAssertEquals(jacobian, jacobian_numerical) + self.gtsamAssertEquals(jacobian_other, jacobian_numerical_other) + def test_transformTo(self): """Test transformTo method.""" pose = Pose3(Rot3.Rodrigues(0, 0, -math.pi/2), Point3(2, 4, 0)) @@ -37,6 +134,15 @@ class TestPose3(GtsamTestCase): expected = Point3(2, 1, 10) self.gtsamAssertEquals(actual, expected, 1e-6) + #test jacobians + point = Point3(3, 2, 10) + jacobian_pose = np.zeros((3, 6), order='F') + jacobian_point = np.zeros((3, 3), order='F') + pose.transformTo(point, jacobian_pose, jacobian_point) + jacobian_numerical_pose, jacobian_numerical_point = numerical_derivative_pose_point(pose, point, 'transformTo') + self.gtsamAssertEquals(jacobian_pose, jacobian_numerical_pose) + self.gtsamAssertEquals(jacobian_point, jacobian_numerical_point) + # multi-point version points = np.stack([Point3(3, 2, 10), Point3(3, 2, 10)]).T actual_array = pose.transformTo(points) @@ -51,6 +157,15 @@ class TestPose3(GtsamTestCase): expected = Point3(3, 2, 10) self.gtsamAssertEquals(actual, expected, 1e-6) + #test jacobians + point = Point3(3, 2, 10) + jacobian_pose = np.zeros((3, 6), order='F') + jacobian_point = np.zeros((3, 3), order='F') + pose.transformFrom(point, jacobian_pose, jacobian_point) + jacobian_numerical_pose, jacobian_numerical_point = numerical_derivative_pose_point(pose, point, 'transformFrom') + self.gtsamAssertEquals(jacobian_pose, jacobian_numerical_pose) + self.gtsamAssertEquals(jacobian_point, jacobian_numerical_point) + # multi-point version points = np.stack([Point3(2, 1, 10), Point3(2, 1, 10)]).T actual_array = pose.transformFrom(points) @@ -122,4 +237,4 @@ class TestPose3(GtsamTestCase): if __name__ == "__main__": - unittest.main() + unittest.main() \ No newline at end of file diff --git a/python/gtsam/tests/test_TranslationRecovery.py b/python/gtsam/tests/test_TranslationRecovery.py index 0fb0518b6..99fbce89e 100644 --- a/python/gtsam/tests/test_TranslationRecovery.py +++ b/python/gtsam/tests/test_TranslationRecovery.py @@ -34,8 +34,10 @@ class TestTranslationRecovery(unittest.TestCase): def test_constructor(self): """Construct from binary measurements.""" - algorithm = gtsam.TranslationRecovery(gtsam.BinaryMeasurementsUnit3()) + algorithm = gtsam.TranslationRecovery() self.assertIsInstance(algorithm, gtsam.TranslationRecovery) + algorithm_params = gtsam.TranslationRecovery(gtsam.LevenbergMarquardtParams()) + self.assertIsInstance(algorithm_params, gtsam.TranslationRecovery) def test_run(self): gt_poses = ExampleValues() @@ -45,9 +47,9 @@ class TestTranslationRecovery(unittest.TestCase): lmParams = gtsam.LevenbergMarquardtParams() lmParams.setVerbosityLM("silent") - algorithm = gtsam.TranslationRecovery(measurements, lmParams) + algorithm = gtsam.TranslationRecovery(lmParams) scale = 2.0 - result = algorithm.run(scale) + result = algorithm.run(measurements, scale) w_aTc = gt_poses.atPose3(2).translation() - gt_poses.atPose3(0).translation() w_aTb = gt_poses.atPose3(1).translation() - gt_poses.atPose3(0).translation() diff --git a/python/gtsam/tests/test_Triangulation.py b/python/gtsam/tests/test_Triangulation.py index 0a258a0af..8630e1da7 100644 --- a/python/gtsam/tests/test_Triangulation.py +++ b/python/gtsam/tests/test_Triangulation.py @@ -8,26 +8,17 @@ See LICENSE for the license information Test Triangulation Authors: Frank Dellaert & Fan Jiang (Python) & Sushmita Warrier & John Lambert """ +# pylint: disable=no-name-in-module, invalid-name, no-member import unittest from typing import Iterable, List, Optional, Tuple, Union - import numpy as np import gtsam -from gtsam import ( - Cal3_S2, - Cal3Bundler, - CameraSetCal3_S2, - CameraSetCal3Bundler, - PinholeCameraCal3_S2, - PinholeCameraCal3Bundler, - Point2, - Point2Vector, - Point3, - Pose3, - Pose3Vector, - Rot3, -) +from gtsam import (Cal3_S2, Cal3Bundler, CameraSetCal3_S2, + CameraSetCal3Bundler, PinholeCameraCal3_S2, + PinholeCameraCal3Bundler, Point2, Point2Vector, Point3, + Pose3, Pose3Vector, Rot3, TriangulationParameters, + TriangulationResult) from gtsam.utils.test_case import GtsamTestCase UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2) @@ -218,6 +209,68 @@ class TestTriangulationExample(GtsamTestCase): # using the Huber loss we now have a quite small error!! nice! self.assertTrue(np.allclose(landmark, actual4, atol=0.05)) + def test_outliers_and_far_landmarks(self) -> None: + """Check safe triangulation function.""" + pose1, pose2 = self.poses + + K1 = Cal3_S2(1500, 1200, 0, 640, 480) + # create first camera. Looking along X-axis, 1 meter above ground plane (x-y) + camera1 = PinholeCameraCal3_S2(pose1, K1) + + # create second camera 1 meter to the right of first camera + K2 = Cal3_S2(1600, 1300, 0, 650, 440) + camera2 = PinholeCameraCal3_S2(pose2, K2) + + # 1. Project two landmarks into two cameras and triangulate + z1 = camera1.project(self.landmark) + z2 = camera2.project(self.landmark) + + cameras = CameraSetCal3_S2() + measurements = Point2Vector() + + cameras.append(camera1) + cameras.append(camera2) + measurements.append(z1) + measurements.append(z2) + + landmarkDistanceThreshold = 10 # landmark is closer than that + # all default except landmarkDistanceThreshold: + params = TriangulationParameters(1.0, False, landmarkDistanceThreshold) + actual: TriangulationResult = gtsam.triangulateSafe( + cameras, measurements, params) + self.gtsamAssertEquals(actual.get(), self.landmark, 1e-2) + self.assertTrue(actual.valid()) + + landmarkDistanceThreshold = 4 # landmark is farther than that + params2 = TriangulationParameters( + 1.0, False, landmarkDistanceThreshold) + actual = gtsam.triangulateSafe(cameras, measurements, params2) + self.assertTrue(actual.farPoint()) + + # 3. Add a slightly rotated third camera above with a wrong measurement + # (OUTLIER) + pose3 = pose1 * Pose3(Rot3.Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)) + K3 = Cal3_S2(700, 500, 0, 640, 480) + camera3 = PinholeCameraCal3_S2(pose3, K3) + z3 = camera3.project(self.landmark) + + cameras.append(camera3) + measurements.append(z3 + Point2(10, -10)) + + landmarkDistanceThreshold = 10 # landmark is closer than that + outlierThreshold = 100 # loose, the outlier is going to pass + params3 = TriangulationParameters(1.0, False, landmarkDistanceThreshold, + outlierThreshold) + actual = gtsam.triangulateSafe(cameras, measurements, params3) + self.assertTrue(actual.valid()) + + # now set stricter threshold for outlier rejection + outlierThreshold = 5 # tighter, the outlier is not going to pass + params4 = TriangulationParameters(1.0, False, landmarkDistanceThreshold, + outlierThreshold) + actual = gtsam.triangulateSafe(cameras, measurements, params4) + self.assertTrue(actual.outlier()) + if __name__ == "__main__": unittest.main() diff --git a/python/gtsam/tests/test_logging_optimizer.py b/python/gtsam/tests/test_logging_optimizer.py index 47eb32e7b..602aeffc9 100644 --- a/python/gtsam/tests/test_logging_optimizer.py +++ b/python/gtsam/tests/test_logging_optimizer.py @@ -18,7 +18,7 @@ import numpy as np from gtsam import Rot3 from gtsam.utils.test_case import GtsamTestCase -from gtsam.utils.logging_optimizer import gtsam_optimize +from gtsam.utils.logging_optimizer import gtsam_optimize, optimize_using KEY = 0 MODEL = gtsam.noiseModel.Unit.Create(3) @@ -34,19 +34,20 @@ class TestOptimizeComet(GtsamTestCase): rotations = {R, R.inverse()} # mean is the identity self.expected = Rot3() - graph = gtsam.NonlinearFactorGraph() - for R in rotations: - graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL)) - initial = gtsam.Values() - initial.insert(KEY, R) - self.params = gtsam.GaussNewtonParams() - self.optimizer = gtsam.GaussNewtonOptimizer( - graph, initial, self.params) + def check(actual): + # Check that optimizing yields the identity + self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6) + # Check that logging output prints out 3 lines (exact intermediate values differ by OS) + self.assertEqual(self.capturedOutput.getvalue().count('\n'), 3) + # reset stdout catcher + self.capturedOutput.truncate(0) + self.check = check - self.lmparams = gtsam.LevenbergMarquardtParams() - self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer( - graph, initial, self.lmparams - ) + self.graph = gtsam.NonlinearFactorGraph() + for R in rotations: + self.graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL)) + self.initial = gtsam.Values() + self.initial.insert(KEY, R) # setup output capture self.capturedOutput = StringIO() @@ -63,22 +64,29 @@ class TestOptimizeComet(GtsamTestCase): def hook(_, error): print(error) - # Only thing we require from optimizer is an iterate method - gtsam_optimize(self.optimizer, self.params, hook) - - # Check that optimizing yields the identity. - actual = self.optimizer.values() - self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6) + # Wrapper function sets the hook and calls optimizer.optimize() for us. + params = gtsam.GaussNewtonParams() + actual = optimize_using(gtsam.GaussNewtonOptimizer, hook, self.graph, self.initial) + self.check(actual) + actual = optimize_using(gtsam.GaussNewtonOptimizer, hook, self.graph, self.initial, params) + self.check(actual) + actual = gtsam_optimize(gtsam.GaussNewtonOptimizer(self.graph, self.initial, params), + params, hook) + self.check(actual) def test_lm_simple_printing(self): """Make sure we are properly terminating LM""" def hook(_, error): print(error) - gtsam_optimize(self.lmoptimizer, self.lmparams, hook) - - actual = self.lmoptimizer.values() - self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6) + params = gtsam.LevenbergMarquardtParams() + actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook, self.graph, self.initial) + self.check(actual) + actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook, self.graph, self.initial, + params) + self.check(actual) + actual = gtsam_optimize(gtsam.LevenbergMarquardtOptimizer(self.graph, self.initial, params), + params, hook) @unittest.skip("Not a test we want run every time, as needs comet.ml account") def test_comet(self): diff --git a/python/gtsam/tests/test_sam.py b/python/gtsam/tests/test_sam.py index e01b9c1d1..fd8da5308 100644 --- a/python/gtsam/tests/test_sam.py +++ b/python/gtsam/tests/test_sam.py @@ -50,6 +50,34 @@ class TestSam(GtsamTestCase): self.assertEqual(range_measurement, measurement.range()) self.gtsamAssertEquals(bearing_measurement, measurement.bearing()) + def test_BearingRangeFactor3D(self): + """ + Test that `measured` works as expected for BearingRangeFactor3D. + """ + bearing_measurement = gtsam.Unit3() + range_measurement = 10.0 + factor = gtsam.BearingRangeFactor3D( + 1, 2, bearing_measurement, range_measurement, + gtsam.noiseModel.Isotropic.Sigma(3, 1)) + measurement = factor.measured() + + self.assertEqual(range_measurement, measurement.range()) + self.gtsamAssertEquals(bearing_measurement, measurement.bearing()) + + def test_BearingRangeFactorPose3(self): + """ + Test that `measured` works as expected for BearingRangeFactorPose3. + """ + range_measurement = 10.0 + bearing_measurement = gtsam.Unit3() + factor = gtsam.BearingRangeFactorPose3( + 1, 2, bearing_measurement, range_measurement, + gtsam.noiseModel.Isotropic.Sigma(3, 1)) + measurement = factor.measured() + + self.assertEqual(range_measurement, measurement.range()) + self.gtsamAssertEquals(bearing_measurement, measurement.bearing()) + if __name__ == "__main__": unittest.main() diff --git a/python/gtsam/utils/logging_optimizer.py b/python/gtsam/utils/logging_optimizer.py index 3d9175951..fe2f717d8 100644 --- a/python/gtsam/utils/logging_optimizer.py +++ b/python/gtsam/utils/logging_optimizer.py @@ -6,6 +6,53 @@ Author: Jing Wu and Frank Dellaert from gtsam import NonlinearOptimizer, NonlinearOptimizerParams import gtsam +from typing import Any, Callable + +OPTIMIZER_PARAMS_MAP = { + gtsam.GaussNewtonOptimizer: gtsam.GaussNewtonParams, + gtsam.LevenbergMarquardtOptimizer: gtsam.LevenbergMarquardtParams, + gtsam.DoglegOptimizer: gtsam.DoglegParams, + gtsam.GncGaussNewtonOptimizer: gtsam.GaussNewtonParams, + gtsam.GncLMOptimizer: gtsam.LevenbergMarquardtParams +} + + +def optimize_using(OptimizerClass, hook, *args) -> gtsam.Values: + """ Wraps the constructor and "optimize()" call for an Optimizer together and adds an iteration + hook. + Example usage: + ```python + def hook(optimizer, error): + print("iteration {:}, error = {:}".format(optimizer.iterations(), error)) + solution = optimize_using(gtsam.GaussNewtonOptimizer, hook, graph, init, params) + ``` + Iteration hook's args are (optimizer, error) and return type should be None + + Args: + OptimizerClass (T): A NonlinearOptimizer class (e.g. GaussNewtonOptimizer, + LevenbergMarquardtOptimizer) + hook ([T, double] -> None): Function to callback after each iteration. Args are (optimizer, + error) and return should be None. + *args: Arguments that would be passed into the OptimizerClass constructor, usually: + graph, init, [params] + Returns: + (gtsam.Values): A Values object representing the optimization solution. + """ + # Add the iteration hook to the NonlinearOptimizerParams + for arg in args: + if isinstance(arg, gtsam.NonlinearOptimizerParams): + arg.iterationHook = lambda iteration, error_before, error_after: hook( + optimizer, error_after) + break + else: + params = OPTIMIZER_PARAMS_MAP[OptimizerClass]() + params.iterationHook = lambda iteration, error_before, error_after: hook( + optimizer, error_after) + args = (*args, params) + # Construct Optimizer and optimize + optimizer = OptimizerClass(*args) + hook(optimizer, optimizer.error()) # Call hook once with init values to match behavior below + return optimizer.optimize() def optimize(optimizer, check_convergence, hook): @@ -21,7 +68,8 @@ def optimize(optimizer, check_convergence, hook): current_error = optimizer.error() hook(optimizer, current_error) - # Iterative loop + # Iterative loop. Cannot use `params.iterationHook` because we don't have access to params + # (backwards compatibility issue). while True: # Do next iteration optimizer.iterate() @@ -36,6 +84,7 @@ def gtsam_optimize(optimizer, params, hook): """ Given an optimizer and params, iterate until convergence. + Recommend using optimize_using instead. After each iteration, hook(optimizer) is called. After the function, use values and errors to get the result. Arguments: diff --git a/python/gtsam/utils/plot.py b/python/gtsam/utils/plot.py index 5ff7fd7aa..880c436e8 100644 --- a/python/gtsam/utils/plot.py +++ b/python/gtsam/utils/plot.py @@ -12,13 +12,26 @@ from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import import gtsam from gtsam import Marginals, Point2, Point3, Pose2, Pose3, Values -# For future reference: following -# https://www.xarg.org/2018/04/how-to-plot-a-covariance-error-ellipse/ -# we have, in 2D: -# def kk(p): return math.sqrt(-2*math.log(1-p)) # k to get p probability mass -# def pp(k): return 1-math.exp(-float(k**2)/2.0) # p as a function of k -# Some values: -# k = 5 => p = 99.9996 % + +# For translation between a scaling of the uncertainty ellipse and the +# percentage of inliers see discussion in +# [PR 1067](https://github.com/borglab/gtsam/pull/1067) +# and the notebook python/gtsam/notebooks/ellipses.ipynb (needs scipy). +# +# In the following, the default scaling is chosen for 95% inliers, which +# translates to the following sigma values: +# 1D: 1.959963984540 +# 2D: 2.447746830681 +# 3D: 2.795483482915 +# +# Further references are Stochastic Models, Estimation, and Control Vol 1 by Maybeck, +# page 366 and https://www.xarg.org/2018/04/how-to-plot-a-covariance-error-ellipse/ +# +# For reference, here are the inlier percentages for some sigma values: +# 1 2 3 4 5 +# 1D 68.26895 95.44997 99.73002 99.99367 99.99994 +# 2D 39.34693 86.46647 98.88910 99.96645 99.99963 +# 3D 19.87480 73.85359 97.07091 99.88660 99.99846 def set_axes_equal(fignum: int) -> None: """ @@ -81,9 +94,8 @@ def plot_covariance_ellipse_3d(axes, """ Plots a Gaussian as an uncertainty ellipse - Based on Maybeck Vol 1, page 366 - k=2.296 corresponds to 1 std, 68.26% of all probability - k=11.82 corresponds to 3 std, 99.74% of all probability + The ellipse is scaled in such a way that 95% of drawn samples are inliers. + Derivation of the scaling factor is explained at the beginning of this file. Args: axes (matplotlib.axes.Axes): Matplotlib axes. @@ -94,7 +106,8 @@ def plot_covariance_ellipse_3d(axes, n: Defines the granularity of the ellipse. Higher values indicate finer ellipses. alpha: Transparency value for the plotted surface in the range [0, 1]. """ - k = 11.82 + # this corresponds to 95%, see note above + k = 2.795483482915 U, S, _ = np.linalg.svd(P) radii = k * np.sqrt(S) @@ -115,12 +128,48 @@ def plot_covariance_ellipse_3d(axes, axes.plot_surface(x, y, z, alpha=alpha, cmap='hot') +def plot_covariance_ellipse_2d(axes, + origin: Point2, + covariance: np.ndarray) -> None: + """ + Plots a Gaussian as an uncertainty ellipse + + The ellipse is scaled in such a way that 95% of drawn samples are inliers. + Derivation of the scaling factor is explained at the beginning of this file. + + Args: + axes (matplotlib.axes.Axes): Matplotlib axes. + origin: The origin in the world frame. + covariance: The marginal covariance matrix of the 2D point + which will be represented as an ellipse. + """ + + w, v = np.linalg.eigh(covariance) + + # this corresponds to 95%, see note above + k = 2.447746830681 + + angle = np.arctan2(v[1, 0], v[0, 0]) + # We multiply k by 2 since k corresponds to the radius but Ellipse uses + # the diameter. + e1 = patches.Ellipse(origin, + np.sqrt(w[0]) * 2 * k, + np.sqrt(w[1]) * 2 * k, + np.rad2deg(angle), + fill=False) + axes.add_patch(e1) + + def plot_point2_on_axes(axes, point: Point2, linespec: str, P: Optional[np.ndarray] = None) -> None: """ - Plot a 2D point on given axis `axes` with given `linespec`. + Plot a 2D point and its corresponding uncertainty ellipse on given axis + `axes` with given `linespec`. + + The uncertainty ellipse (if covariance is given) is scaled in such a way + that 95% of drawn samples are inliers, see `plot_covariance_ellipse_2d`. Args: axes (matplotlib.axes.Axes): Matplotlib axes. @@ -130,19 +179,7 @@ def plot_point2_on_axes(axes, """ axes.plot([point[0]], [point[1]], linespec, marker='.', markersize=10) if P is not None: - w, v = np.linalg.eig(P) - - # 5 sigma corresponds to 99.9996%, see note above - k = 5.0 - - angle = np.arctan2(v[1, 0], v[0, 0]) - e1 = patches.Ellipse(point, - np.sqrt(w[0] * k), - np.sqrt(w[1] * k), - np.rad2deg(angle), - fill=False) - axes.add_patch(e1) - + plot_covariance_ellipse_2d(axes, point, P) def plot_point2( fignum: int, @@ -154,6 +191,9 @@ def plot_point2( """ Plot a 2D point on given figure with given `linespec`. + The uncertainty ellipse (if covariance is given) is scaled in such a way + that 95% of drawn samples are inliers, see `plot_covariance_ellipse_2d`. + Args: fignum: Integer representing the figure number to use for plotting. point: The point to be plotted. @@ -182,6 +222,9 @@ def plot_pose2_on_axes(axes, """ Plot a 2D pose on given axis `axes` with given `axis_length`. + The ellipse is scaled in such a way that 95% of drawn samples are inliers, + see `plot_covariance_ellipse_2d`. + Args: axes (matplotlib.axes.Axes): Matplotlib axes. pose: The pose to be plotted. @@ -206,19 +249,7 @@ def plot_pose2_on_axes(axes, if covariance is not None: pPp = covariance[0:2, 0:2] gPp = np.matmul(np.matmul(gRp, pPp), gRp.T) - - w, v = np.linalg.eig(gPp) - - # 5 sigma corresponds to 99.9996%, see note above - k = 5.0 - - angle = np.arctan2(v[1, 0], v[0, 0]) - e1 = patches.Ellipse(origin, - np.sqrt(w[0] * k), - np.sqrt(w[1] * k), - np.rad2deg(angle), - fill=False) - axes.add_patch(e1) + plot_covariance_ellipse_2d(axes, origin, gPp) def plot_pose2( @@ -231,6 +262,9 @@ def plot_pose2( """ Plot a 2D pose on given figure with given `axis_length`. + The uncertainty ellipse (if covariance is given) is scaled in such a way + that 95% of drawn samples are inliers, see `plot_covariance_ellipse_2d`. + Args: fignum: Integer representing the figure number to use for plotting. pose: The pose to be plotted. @@ -260,6 +294,9 @@ def plot_point3_on_axes(axes, """ Plot a 3D point on given axis `axes` with given `linespec`. + The uncertainty ellipse (if covariance is given) is scaled in such a way + that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`. + Args: axes (matplotlib.axes.Axes): Matplotlib axes. point: The point to be plotted. @@ -281,6 +318,9 @@ def plot_point3( """ Plot a 3D point on given figure with given `linespec`. + The uncertainty ellipse (if covariance is given) is scaled in such a way + that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`. + Args: fignum: Integer representing the figure number to use for plotting. point: The point to be plotted. @@ -293,7 +333,10 @@ def plot_point3( """ fig = plt.figure(fignum) - axes = fig.gca(projection='3d') + if not fig.axes: + axes = fig.add_subplot(projection='3d') + else: + axes = fig.axes[0] plot_point3_on_axes(axes, point, linespec, P) axes.set_xlabel(axis_labels[0]) @@ -348,13 +391,16 @@ def plot_3d_points(fignum, fig = plt.figure(fignum) fig.suptitle(title) - fig.canvas.set_window_title(title.lower()) + fig.canvas.manager.set_window_title(title.lower()) def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): """ Plot a 3D pose on given axis `axes` with given `axis_length`. + The uncertainty ellipse (if covariance is given) is scaled in such a way + that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`. + Args: axes (matplotlib.axes.Axes): Matplotlib axes. point (gtsam.Point3): The point to be plotted. @@ -397,6 +443,9 @@ def plot_pose3( """ Plot a 3D pose on given figure with given `axis_length`. + The uncertainty ellipse (if covariance is given) is scaled in such a way + that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`. + Args: fignum: Integer representing the figure number to use for plotting. pose (gtsam.Pose3): 3D pose to be plotted. @@ -444,7 +493,10 @@ def plot_trajectory( axis_labels (iterable[string]): List of axis labels to set. """ fig = plt.figure(fignum) - axes = fig.gca(projection='3d') + if not fig.axes: + axes = fig.add_subplot(projection='3d') + else: + axes = fig.axes[0] axes.set_xlabel(axis_labels[0]) axes.set_ylabel(axis_labels[1]) @@ -476,7 +528,7 @@ def plot_trajectory( plot_pose3_on_axes(axes, pose, P=covariance, axis_length=scale) fig.suptitle(title) - fig.canvas.set_window_title(title.lower()) + fig.canvas.manager.set_window_title(title.lower()) def plot_incremental_trajectory(fignum: int, @@ -499,7 +551,10 @@ def plot_incremental_trajectory(fignum: int, Used to create animation effect. """ fig = plt.figure(fignum) - axes = fig.gca(projection='3d') + if not fig.axes: + axes = fig.add_subplot(projection='3d') + else: + axes = fig.axes[0] poses = gtsam.utilities.allPose3s(values) keys = gtsam.KeyVector(poses.keys()) diff --git a/tests/testNonlinearFactorGraph.cpp b/tests/testNonlinearFactorGraph.cpp index 05a6e7f45..e1a88d616 100644 --- a/tests/testNonlinearFactorGraph.cpp +++ b/tests/testNonlinearFactorGraph.cpp @@ -335,21 +335,21 @@ TEST(NonlinearFactorGraph, dot) { "graph {\n" " size=\"5,5\";\n" "\n" - " varl1[label=\"l1\"];\n" - " varx1[label=\"x1\"];\n" - " varx2[label=\"x2\"];\n" + " var7782220156096217089[label=\"l1\"];\n" + " var8646911284551352321[label=\"x1\"];\n" + " var8646911284551352322[label=\"x2\"];\n" "\n" " factor0[label=\"\", shape=point];\n" - " varx1--factor0;\n" + " var8646911284551352321--factor0;\n" " factor1[label=\"\", shape=point];\n" - " varx1--factor1;\n" - " varx2--factor1;\n" + " var8646911284551352321--factor1;\n" + " var8646911284551352322--factor1;\n" " factor2[label=\"\", shape=point];\n" - " varx1--factor2;\n" - " varl1--factor2;\n" + " var8646911284551352321--factor2;\n" + " var7782220156096217089--factor2;\n" " factor3[label=\"\", shape=point];\n" - " varx2--factor3;\n" - " varl1--factor3;\n" + " var8646911284551352322--factor3;\n" + " var7782220156096217089--factor3;\n" "}\n"; const NonlinearFactorGraph fg = createNonlinearFactorGraph(); @@ -363,21 +363,21 @@ TEST(NonlinearFactorGraph, dot_extra) { "graph {\n" " size=\"5,5\";\n" "\n" - " varl1[label=\"l1\", pos=\"0,0!\"];\n" - " varx1[label=\"x1\", pos=\"1,0!\"];\n" - " varx2[label=\"x2\", pos=\"1,1.5!\"];\n" + " var7782220156096217089[label=\"l1\", pos=\"0,0!\"];\n" + " var8646911284551352321[label=\"x1\", pos=\"1,0!\"];\n" + " var8646911284551352322[label=\"x2\", pos=\"1,1.5!\"];\n" "\n" " factor0[label=\"\", shape=point];\n" - " varx1--factor0;\n" + " var8646911284551352321--factor0;\n" " factor1[label=\"\", shape=point];\n" - " varx1--factor1;\n" - " varx2--factor1;\n" + " var8646911284551352321--factor1;\n" + " var8646911284551352322--factor1;\n" " factor2[label=\"\", shape=point];\n" - " varx1--factor2;\n" - " varl1--factor2;\n" + " var8646911284551352321--factor2;\n" + " var7782220156096217089--factor2;\n" " factor3[label=\"\", shape=point];\n" - " varx2--factor3;\n" - " varl1--factor3;\n" + " var8646911284551352322--factor3;\n" + " var7782220156096217089--factor3;\n" "}\n"; const NonlinearFactorGraph fg = createNonlinearFactorGraph(); diff --git a/tests/testRobust.cpp b/tests/testRobust.cpp new file mode 100644 index 000000000..ad361f6d9 --- /dev/null +++ b/tests/testRobust.cpp @@ -0,0 +1,53 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testRobust.cpp + * @brief Unit tests for Robust loss functions + * @author Fan Jiang + * @author Yetong Zhang + * @date Apr 7, 2022 + **/ + +#include +#include +#include +#include +#include +#include + +using namespace gtsam; + +TEST(RobustNoise, loss) { + // Keys. + gtsam::Key x1_key = 1; + gtsam::Key x2_key = 2; + + auto gm = noiseModel::mEstimator::GemanMcClure::Create(1.0); + auto noise = noiseModel::Robust::Create(gm, noiseModel::Unit::Create(1)); + + auto factor = PriorFactor(x1_key, 0.0, noise); + auto between_factor = BetweenFactor(x1_key, x2_key, 0.0, noise); + + Values values; + values.insert(x1_key, 10.0); + values.insert(x2_key, 0.0); + + EXPECT_DOUBLES_EQUAL(0.49505, factor.error(values), 1e-5); + EXPECT_DOUBLES_EQUAL(0.49505, between_factor.error(values), 1e-5); + EXPECT_DOUBLES_EQUAL(0.49505, gm->loss(10.0), 1e-5); +} + +int main() { + TestResult tr; + + return TestRegistry::runAllTests(tr); +} diff --git a/tests/testTranslationRecovery.cpp b/tests/testTranslationRecovery.cpp index 833f11355..15f1caa1b 100644 --- a/tests/testTranslationRecovery.cpp +++ b/tests/testTranslationRecovery.cpp @@ -17,8 +17,8 @@ */ #include -#include #include +#include #include using namespace std; @@ -62,13 +62,13 @@ TEST(TranslationRecovery, BAL) { unitTranslation.measured())); } - TranslationRecovery algorithm(relativeTranslations); - const auto graph = algorithm.buildGraph(); + TranslationRecovery algorithm; + const auto graph = algorithm.buildGraph(relativeTranslations); EXPECT_LONGS_EQUAL(3, graph.size()); // Run translation recovery const double scale = 2.0; - const auto result = algorithm.run(scale); + const auto result = algorithm.run(relativeTranslations, scale); // Check result for first two translations, determined by prior EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); @@ -107,12 +107,12 @@ TEST(TranslationRecovery, TwoPoseTest) { unitTranslation.measured())); } - TranslationRecovery algorithm(relativeTranslations); - const auto graph = algorithm.buildGraph(); + TranslationRecovery algorithm; + const auto graph = algorithm.buildGraph(relativeTranslations); EXPECT_LONGS_EQUAL(1, graph.size()); // Run translation recovery - const auto result = algorithm.run(/*scale=*/3.0); + const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0); // Check result for first two translations, determined by prior EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); @@ -145,11 +145,11 @@ TEST(TranslationRecovery, ThreePoseTest) { unitTranslation.measured())); } - TranslationRecovery algorithm(relativeTranslations); - const auto graph = algorithm.buildGraph(); + TranslationRecovery algorithm; + const auto graph = algorithm.buildGraph(relativeTranslations); EXPECT_LONGS_EQUAL(3, graph.size()); - const auto result = algorithm.run(/*scale=*/3.0); + const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0); // Check result EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); @@ -180,13 +180,9 @@ TEST(TranslationRecovery, ThreePosesIncludingZeroTranslation) { unitTranslation.measured())); } - TranslationRecovery algorithm(relativeTranslations); - const auto graph = algorithm.buildGraph(); - // There is only 1 non-zero translation edge. - EXPECT_LONGS_EQUAL(1, graph.size()); - + TranslationRecovery algorithm; // Run translation recovery - const auto result = algorithm.run(/*scale=*/3.0); + const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0); // Check result EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); @@ -222,12 +218,10 @@ TEST(TranslationRecovery, FourPosesIncludingZeroTranslation) { unitTranslation.measured())); } - TranslationRecovery algorithm(relativeTranslations); - const auto graph = algorithm.buildGraph(); - EXPECT_LONGS_EQUAL(3, graph.size()); + TranslationRecovery algorithm; // Run translation recovery - const auto result = algorithm.run(/*scale=*/4.0); + const auto result = algorithm.run(relativeTranslations, /*scale=*/4.0); // Check result EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); @@ -251,13 +245,10 @@ TEST(TranslationRecovery, ThreePosesWithZeroTranslation) { unitTranslation.measured())); } - TranslationRecovery algorithm(relativeTranslations); - const auto graph = algorithm.buildGraph(); - // Graph size will be zero as there no 'non-zero distance' edges. - EXPECT_LONGS_EQUAL(0, graph.size()); + TranslationRecovery algorithm; // Run translation recovery - const auto result = algorithm.run(/*scale=*/4.0); + const auto result = algorithm.run(relativeTranslations, /*scale=*/4.0); // Check result EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-8)); @@ -265,6 +256,103 @@ TEST(TranslationRecovery, ThreePosesWithZeroTranslation) { EXPECT(assert_equal(Point3(0, 0, 0), result.at(2), 1e-8)); } +TEST(TranslationRecovery, ThreePosesWithOneSoftConstraint) { + // Create a dataset with 3 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // \ __ / + // \\// + // 3 + // + // 0 and 1 face in the same direction but have a translation offset. 3 is in + // the same direction as 0 and 1, in between 0 and 1, with some Y axis offset. + + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {0, 3}, {1, 3}}); + + std::vector> betweenTranslations; + betweenTranslations.emplace_back(0, 3, Point3(1, -1, 0), + noiseModel::Isotropic::Sigma(3, 1e-2)); + + TranslationRecovery algorithm; + auto result = + algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-4)); + EXPECT(assert_equal(Point3(2, 0, 0), result.at(1), 1e-4)); + EXPECT(assert_equal(Point3(1, -1, 0), result.at(3), 1e-4)); +} + +TEST(TranslationRecovery, ThreePosesWithOneHardConstraint) { + // Create a dataset with 3 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // \ __ / + // \\// + // 3 + // + // 0 and 1 face in the same direction but have a translation offset. 3 is in + // the same direction as 0 and 1, in between 0 and 1, with some Y axis offset. + + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {0, 3}, {1, 3}}); + + std::vector> betweenTranslations; + betweenTranslations.emplace_back(0, 1, Point3(2, 0, 0), + noiseModel::Constrained::All(3, 1e2)); + + TranslationRecovery algorithm; + auto result = + algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-4)); + EXPECT(assert_equal(Point3(2, 0, 0), result.at(1), 1e-4)); + EXPECT(assert_equal(Point3(1, -1, 0), result.at(3), 1e-4)); +} + +TEST(TranslationRecovery, NodeWithBetweenFactorAndNoMeasurements) { + // Checks that valid results are obtained when a between translation edge is + // provided with a node that does not have any other relative translations. + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + poses.insert(4, Pose3(Rot3(), Point3(1, 2, 1))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {0, 3}, {1, 3}}); + + std::vector> betweenTranslations; + betweenTranslations.emplace_back(0, 1, Point3(2, 0, 0), + noiseModel::Constrained::All(3, 1e2)); + // Node 4 only has this between translation prior, no relative translations. + betweenTranslations.emplace_back(0, 4, Point3(1, 2, 1)); + + TranslationRecovery algorithm; + auto result = + algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0), 1e-4)); + EXPECT(assert_equal(Point3(2, 0, 0), result.at(1), 1e-4)); + EXPECT(assert_equal(Point3(1, -1, 0), result.at(3), 1e-4)); + EXPECT(assert_equal(Point3(1, 2, 1), result.at(4), 1e-4)); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/wrap/cmake/MatlabWrap.cmake b/wrap/cmake/MatlabWrap.cmake index 3cb058102..eaffcc059 100644 --- a/wrap/cmake/MatlabWrap.cmake +++ b/wrap/cmake/MatlabWrap.cmake @@ -242,6 +242,13 @@ function(wrap_library_internal interfaceHeader moduleName linkLibraries extraInc find_package(PythonInterp ${WRAP_PYTHON_VERSION} EXACT) find_package(PythonLibs ${WRAP_PYTHON_VERSION} EXACT) + # Set the path separator for PYTHONPATH + if(UNIX) + set(GTWRAP_PATH_SEPARATOR ":") + else() + set(GTWRAP_PATH_SEPARATOR ";") + endif() + add_custom_command( OUTPUT ${generated_cpp_file} DEPENDS ${interfaceHeader} ${module_library_target} ${otherLibraryTargets}