diff --git a/.gitignore b/.gitignore index cde059767..e6e38132f 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ .idea *.pyc *.DS_Store +*.swp /examples/Data/dubrovnik-3-7-pre-rewritten.txt /examples/Data/pose2example-rewritten.txt /examples/Data/pose3example-rewritten.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index d2559705d..b8480867e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -105,6 +105,11 @@ endif() GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in") export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake) +if (GTSAM_BUILD_UNSTABLE) + GtsamMakeConfigFile(GTSAM_UNSTABLE "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in") + export(TARGETS ${GTSAM_UNSTABLE_EXPORTED_TARGETS} FILE GTSAM_UNSTABLE-exports.cmake) +endif() + # Check for doxygen availability - optional dependency find_package(Doxygen) diff --git a/cmake/GtsamMakeConfigFile.cmake b/cmake/GtsamMakeConfigFile.cmake index 0479a2524..91cb98a8c 100644 --- a/cmake/GtsamMakeConfigFile.cmake +++ b/cmake/GtsamMakeConfigFile.cmake @@ -27,6 +27,8 @@ function(GtsamMakeConfigFile PACKAGE_NAME) # here. if(NOT DEFINED ${PACKAGE_NAME}_VERSION AND DEFINED ${PACKAGE_NAME}_VERSION_STRING) set(${PACKAGE_NAME}_VERSION ${${PACKAGE_NAME}_VERSION_STRING}) + elseif(NOT DEFINED ${PACKAGE_NAME}_VERSION_STRING) + set(${PACKAGE_NAME}_VERSION ${GTSAM_VERSION_STRING}) endif() # Version file diff --git a/cmake/HandleGeneralOptions.cmake b/cmake/HandleGeneralOptions.cmake index ee86066a2..64c239f39 100644 --- a/cmake/HandleGeneralOptions.cmake +++ b/cmake/HandleGeneralOptions.cmake @@ -14,20 +14,21 @@ if(GTSAM_UNSTABLE_AVAILABLE) option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON) option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF) endif() -option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON) -option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) -option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) -option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) -option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) -option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) -option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) -option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) -option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) -option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF) -option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) -option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) -option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) -option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) +option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON) +option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) +option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) +option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) +option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) +option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) +option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) +option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) +option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) +option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF) +option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) +option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) +option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) +option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) +option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF) if(NOT MSVC AND NOT XCODE_VERSION) option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON) endif() diff --git a/cmake/HandleTBB.cmake b/cmake/HandleTBB.cmake index 118dc4dac..52ee75494 100644 --- a/cmake/HandleTBB.cmake +++ b/cmake/HandleTBB.cmake @@ -7,9 +7,9 @@ if (GTSAM_WITH_TBB) if(TBB_FOUND) set(GTSAM_USE_TBB 1) # This will go into config.h - if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1")) - message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.") - endif() +# if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1")) +# message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.") +# endif() if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020)) set(TBB_GREATER_EQUAL_2020 1) diff --git a/doc/math.lyx b/doc/math.lyx index 2533822a7..4ee89a9cc 100644 --- a/doc/math.lyx +++ b/doc/math.lyx @@ -5082,6 +5082,394 @@ reference "ex:projection" \end_inset +\end_layout + +\begin_layout Subsection +Derivative of Adjoint +\begin_inset CommandInset label +LatexCommand label +name "subsec:pose3_adjoint_deriv" + +\end_inset + + +\end_layout + +\begin_layout Standard +Consider +\begin_inset Formula $f:SE(3)\times\mathbb{R}^{6}\rightarrow\mathbb{R}^{6}$ +\end_inset + + is defined as +\begin_inset Formula $f(T,\xi_{b})=Ad_{T}\hat{\xi}_{b}$ +\end_inset + +. + The derivative is notated (see Section +\begin_inset CommandInset ref +LatexCommand ref +reference "sec:Derivatives-of-Actions" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +): +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +Df_{(T,\xi_{b})}(\xi,\delta\xi_{b})=D_{1}f_{(T,\xi_{b})}(\xi)+D_{2}f_{(T,\xi_{b})}(\delta\xi_{b}) +\] + +\end_inset + +First, computing +\begin_inset Formula $D_{2}f_{(T,\xi_{b})}(\xi_{b})$ +\end_inset + + is easy, as its matrix is simply +\begin_inset Formula $Ad_{T}$ +\end_inset + +: +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +f(T,\xi_{b}+\delta\xi_{b})=Ad_{T}(\widehat{\xi_{b}+\delta\xi_{b}})=Ad_{T}(\hat{\xi}_{b})+Ad_{T}(\delta\hat{\xi}_{b}) +\] + +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +D_{2}f_{(T,\xi_{b})}(\xi_{b})=Ad_{T} +\] + +\end_inset + +We will derive +\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi)$ +\end_inset + + using two approaches. + In the first, we'll define +\begin_inset Formula $g(T,\xi)\triangleq T\exp\hat{\xi}$ +\end_inset + +. + From Section +\begin_inset CommandInset ref +LatexCommand ref +reference "sec:Derivatives-of-Actions" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +, +\end_layout + +\begin_layout Standard +\begin_inset Formula +\begin{align*} +D_{2}g_{(T,\xi)}(\xi) & =T\hat{\xi}\\ +D_{2}g_{(T,\xi)}^{-1}(\xi) & =-\hat{\xi}T^{-1} +\end{align*} + +\end_inset + +Now we can use the definition of the Adjoint representation +\begin_inset Formula $Ad_{g}\hat{\xi}=g\hat{\xi}g^{-1}$ +\end_inset + + (aka conjugation by +\begin_inset Formula $g$ +\end_inset + +) then apply product rule and simplify: +\end_layout + +\begin_layout Standard +\begin_inset Formula +\begin{align*} +D_{1}f_{(T,\xi_{b})}(\xi)=D_{1}\left(Ad_{T\exp(\hat{\xi})}\hat{\xi}_{b}\right)(\xi) & =D_{1}\left(g\hat{\xi}_{b}g^{-1}\right)(\xi)\\ + & =\left(D_{2}g_{(T,\xi)}(\xi)\right)\hat{\xi}_{b}g^{-1}(T,0)+g(T,0)\hat{\xi}_{b}\left(D_{2}g_{(T,\xi)}^{-1}(\xi)\right)\\ + & =T\hat{\xi}\hat{\xi}_{b}T^{-1}-T\hat{\xi}_{b}\hat{\xi}T^{-1}\\ + & =T\left(\hat{\xi}\hat{\xi}_{b}-\hat{\xi}_{b}\hat{\xi}\right)T^{-1}\\ + & =Ad_{T}(ad_{\hat{\xi}}\hat{\xi}_{b})\\ + & =-Ad_{T}(ad_{\hat{\xi}_{b}}\hat{\xi})\\ +D_{1}F_{(T,\xi_{b})} & =-(Ad_{T})(ad_{\hat{\xi}_{b}}) +\end{align*} + +\end_inset + +Where +\begin_inset Formula $ad_{\hat{\xi}}:\mathfrak{g}\rightarrow\mathfrak{g}$ +\end_inset + + is the adjoint map of the lie algebra. +\end_layout + +\begin_layout Standard +The second, perhaps more intuitive way of deriving +\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi_{b})$ +\end_inset + +, would be to use the fact that the derivative at the origin +\begin_inset Formula $D_{1}Ad_{I}\hat{\xi}_{b}=ad_{\hat{\xi}_{b}}$ +\end_inset + + by definition of the adjoint +\begin_inset Formula $ad_{\xi}$ +\end_inset + +. + Then applying the property +\begin_inset Formula $Ad_{AB}=Ad_{A}Ad_{B}$ +\end_inset + +, +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +D_{1}Ad_{T}\hat{\xi}_{b}(\xi)=D_{1}Ad_{T*I}\hat{\xi}_{b}(\xi)=Ad_{T}\left(D_{1}Ad_{I}\hat{\xi}_{b}(\xi)\right)=Ad_{T}\left(ad_{\hat{\xi}}(\hat{\xi}_{b})\right)=-Ad_{T}\left(ad_{\hat{\xi}_{b}}(\hat{\xi})\right) +\] + +\end_inset + + +\end_layout + +\begin_layout Subsection +Derivative of AdjointTranspose +\end_layout + +\begin_layout Standard +The transpose of the Adjoint, +\family roman +\series medium +\shape up +\size normal +\emph off +\bar no +\strikeout off +\xout off +\uuline off +\uwave off +\noun off +\color none + +\begin_inset Formula $Ad_{T}^{T}:\mathfrak{g^{*}\rightarrow g^{*}}$ +\end_inset + +, is useful as a way to change the reference frame of vectors in the dual + space +\family default +\series default +\shape default +\size default +\emph default +\bar default +\strikeout default +\xout default +\uuline default +\uwave default +\noun default +\color inherit +(note the +\begin_inset Formula $^{*}$ +\end_inset + + denoting that we are now in the dual space) +\family roman +\series medium +\shape up +\size normal +\emph off +\bar no +\strikeout off +\xout off +\uuline off +\uwave off +\noun off +\color none +. + To be more concrete, where +\family default +\series default +\shape default +\size default +\emph default +\bar default +\strikeout default +\xout default +\uuline default +\uwave default +\noun default +\color inherit +as +\begin_inset Formula $Ad_{T}\hat{\xi}_{b}$ +\end_inset + + converts the +\emph on +twist +\emph default + +\family roman +\series medium +\shape up +\size normal +\emph off +\bar no +\strikeout off +\xout off +\uuline off +\uwave off +\noun off +\color none + +\begin_inset Formula $\xi_{b}$ +\end_inset + + from the +\begin_inset Formula $T$ +\end_inset + + frame, +\family default +\series default +\shape default +\size default +\emph default +\bar default +\strikeout default +\xout default +\uuline default +\uwave default +\noun default +\color inherit + +\family roman +\series medium +\shape up +\size normal +\emph off +\bar no +\strikeout off +\xout off +\uuline off +\uwave off +\noun off +\color none + +\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$ +\end_inset + + converts the +\family default +\series default +\shape default +\size default +\emph on +\bar default +\strikeout default +\xout default +\uuline default +\uwave default +\noun default +\color inherit +wrench +\emph default + +\family roman +\series medium +\shape up +\size normal +\emph off +\bar no +\strikeout off +\xout off +\uuline off +\uwave off +\noun off +\color none + +\begin_inset Formula $\xi_{b}^{*}$ +\end_inset + + from the +\begin_inset Formula $T$ +\end_inset + + frame +\family default +\series default +\shape default +\size default +\emph default +\bar default +\strikeout default +\xout default +\uuline default +\uwave default +\noun default +\color inherit +. + It's difficult to apply a similar derivation as in Section +\begin_inset CommandInset ref +LatexCommand ref +reference "subsec:pose3_adjoint_deriv" +plural "false" +caps "false" +noprefix "false" + +\end_inset + + for the derivative of +\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$ +\end_inset + + because +\begin_inset Formula $Ad_{T}^{T}$ +\end_inset + + cannot be naturally defined as a conjugation, so we resort to crunching + through the algebra. + The details are omitted but the result is a form that vaguely resembles + (but does not exactly match) +\begin_inset Formula $ad(Ad_{T}^{T}\hat{\xi}_{b}^{*})$ +\end_inset + +: +\end_layout + +\begin_layout Standard +\begin_inset Formula +\begin{align*} +\begin{bmatrix}\omega_{T}\\ +v_{T} +\end{bmatrix}^{*} & \triangleq Ad_{T}^{T}\hat{\xi}_{b}^{*}\\ +D_{1}Ad_{T}^{T}\hat{\xi}_{b}^{*}(\xi) & =\begin{bmatrix}\hat{\omega}_{T} & \hat{v}_{T}\\ +\hat{v}_{T} & 0 +\end{bmatrix} +\end{align*} + +\end_inset + + \end_layout \begin_layout Subsection diff --git a/doc/math.pdf b/doc/math.pdf index 8dc7270f1..40980354e 100644 Binary files a/doc/math.pdf and b/doc/math.pdf differ diff --git a/examples/DiscreteBayesNetExample.cpp b/examples/DiscreteBayesNetExample.cpp index 5dca116c3..febc1e128 100644 --- a/examples/DiscreteBayesNetExample.cpp +++ b/examples/DiscreteBayesNetExample.cpp @@ -56,8 +56,8 @@ int main(int argc, char **argv) { DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering); // solve - DiscreteFactor::sharedValues mpe = chordal->optimize(); - GTSAM_PRINT(*mpe); + auto mpe = chordal->optimize(); + GTSAM_PRINT(mpe); // We can also build a Bayes tree (directed junction tree). // The elimination order above will do fine: @@ -70,14 +70,14 @@ int main(int argc, char **argv) { // solve again, now with evidence DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering); - DiscreteFactor::sharedValues mpe2 = chordal2->optimize(); - GTSAM_PRINT(*mpe2); + auto mpe2 = chordal2->optimize(); + GTSAM_PRINT(mpe2); // We can also sample from it cout << "\n10 samples:" << endl; for (size_t i = 0; i < 10; i++) { - DiscreteFactor::sharedValues sample = chordal2->sample(); - GTSAM_PRINT(*sample); + auto sample = chordal2->sample(); + GTSAM_PRINT(sample); } return 0; } diff --git a/examples/DiscreteBayesNet_FG.cpp b/examples/DiscreteBayesNet_FG.cpp index 121df4bef..69283a1be 100644 --- a/examples/DiscreteBayesNet_FG.cpp +++ b/examples/DiscreteBayesNet_FG.cpp @@ -33,11 +33,11 @@ using namespace gtsam; int main(int argc, char **argv) { // Define keys and a print function Key C(1), S(2), R(3), W(4); - auto print = [=](DiscreteFactor::sharedValues values) { - cout << boolalpha << "Cloudy = " << static_cast((*values)[C]) - << " Sprinkler = " << static_cast((*values)[S]) - << " Rain = " << boolalpha << static_cast((*values)[R]) - << " WetGrass = " << static_cast((*values)[W]) << endl; + auto print = [=](const DiscreteFactor::Values& values) { + cout << boolalpha << "Cloudy = " << static_cast(values.at(C)) + << " Sprinkler = " << static_cast(values.at(S)) + << " Rain = " << boolalpha << static_cast(values.at(R)) + << " WetGrass = " << static_cast(values.at(W)) << endl; }; // We assume binary state variables @@ -85,7 +85,7 @@ int main(int argc, char **argv) { } // "Most Probable Explanation", i.e., configuration with largest value - DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize(); + auto mpe = graph.eliminateSequential()->optimize(); cout << "\nMost Probable Explanation (MPE):" << endl; print(mpe); @@ -97,7 +97,7 @@ int main(int argc, char **argv) { // solve again, now with evidence DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); - DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize(); + auto mpe_with_evidence = chordal->optimize(); cout << "\nMPE given C=0:" << endl; print(mpe_with_evidence); @@ -113,7 +113,7 @@ int main(int argc, char **argv) { // We can also sample from it cout << "\n10 samples:" << endl; for (size_t i = 0; i < 10; i++) { - DiscreteFactor::sharedValues sample = chordal->sample(); + auto sample = chordal->sample(); print(sample); } return 0; diff --git a/examples/HMMExample.cpp b/examples/HMMExample.cpp index ee861e381..b46baf4e0 100644 --- a/examples/HMMExample.cpp +++ b/examples/HMMExample.cpp @@ -66,14 +66,14 @@ int main(int argc, char **argv) { chordal->print("Eliminated"); // solve - DiscreteFactor::sharedValues mpe = chordal->optimize(); - GTSAM_PRINT(*mpe); + auto mpe = chordal->optimize(); + GTSAM_PRINT(mpe); // We can also sample from it cout << "\n10 samples:" << endl; for (size_t k = 0; k < 10; k++) { - DiscreteFactor::sharedValues sample = chordal->sample(); - GTSAM_PRINT(*sample); + auto sample = chordal->sample(); + GTSAM_PRINT(sample); } // Or compute the marginals. This re-eliminates the FG into a Bayes tree diff --git a/examples/IMUKittiExampleGPS.cpp b/examples/IMUKittiExampleGPS.cpp index e2ca49647..cb60b2516 100644 --- a/examples/IMUKittiExampleGPS.cpp +++ b/examples/IMUKittiExampleGPS.cpp @@ -11,21 +11,23 @@ /** * @file IMUKittiExampleGPS - * @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE - * @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics + * @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI + * VISION BENCHMARK SUITE + * @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ + * Electronics */ // GTSAM related includes. +#include #include #include #include -#include -#include -#include #include #include #include -#include +#include +#include +#include #include #include @@ -34,35 +36,35 @@ using namespace std; using namespace gtsam; -using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y) -using symbol_shorthand::V; // Vel (xdot,ydot,zdot) using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz) +using symbol_shorthand::V; // Vel (xdot,ydot,zdot) +using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y) struct KittiCalibration { - double body_ptx; - double body_pty; - double body_ptz; - double body_prx; - double body_pry; - double body_prz; - double accelerometer_sigma; - double gyroscope_sigma; - double integration_sigma; - double accelerometer_bias_sigma; - double gyroscope_bias_sigma; - double average_delta_t; + double body_ptx; + double body_pty; + double body_ptz; + double body_prx; + double body_pry; + double body_prz; + double accelerometer_sigma; + double gyroscope_sigma; + double integration_sigma; + double accelerometer_bias_sigma; + double gyroscope_bias_sigma; + double average_delta_t; }; struct ImuMeasurement { - double time; - double dt; - Vector3 accelerometer; - Vector3 gyroscope; // omega + double time; + double dt; + Vector3 accelerometer; + Vector3 gyroscope; // omega }; struct GpsMeasurement { - double time; - Vector3 position; // x,y,z + double time; + Vector3 position; // x,y,z }; const string output_filename = "IMUKittiExampleGPSResults.csv"; @@ -70,290 +72,313 @@ const string output_filename = "IMUKittiExampleGPSResults.csv"; void loadKittiData(KittiCalibration& kitti_calibration, vector& imu_measurements, vector& gps_measurements) { - string line; + string line; - // Read IMU metadata and compute relative sensor pose transforms - // BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma - // AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT - string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt"); - ifstream imu_metadata(imu_metadata_file.c_str()); + // Read IMU metadata and compute relative sensor pose transforms + // BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma + // GyroscopeSigma IntegrationSigma AccelerometerBiasSigma GyroscopeBiasSigma + // AverageDeltaT + string imu_metadata_file = + findExampleDataFile("KittiEquivBiasedImu_metadata.txt"); + ifstream imu_metadata(imu_metadata_file.c_str()); - printf("-- Reading sensor metadata\n"); + printf("-- Reading sensor metadata\n"); - getline(imu_metadata, line, '\n'); // ignore the first line + getline(imu_metadata, line, '\n'); // ignore the first line - // Load Kitti calibration - getline(imu_metadata, line, '\n'); - sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf", - &kitti_calibration.body_ptx, - &kitti_calibration.body_pty, - &kitti_calibration.body_ptz, - &kitti_calibration.body_prx, - &kitti_calibration.body_pry, - &kitti_calibration.body_prz, - &kitti_calibration.accelerometer_sigma, - &kitti_calibration.gyroscope_sigma, - &kitti_calibration.integration_sigma, - &kitti_calibration.accelerometer_bias_sigma, - &kitti_calibration.gyroscope_bias_sigma, - &kitti_calibration.average_delta_t); - printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n", - kitti_calibration.body_ptx, - kitti_calibration.body_pty, - kitti_calibration.body_ptz, - kitti_calibration.body_prx, - kitti_calibration.body_pry, - kitti_calibration.body_prz, - kitti_calibration.accelerometer_sigma, - kitti_calibration.gyroscope_sigma, - kitti_calibration.integration_sigma, - kitti_calibration.accelerometer_bias_sigma, - kitti_calibration.gyroscope_bias_sigma, - kitti_calibration.average_delta_t); + // Load Kitti calibration + getline(imu_metadata, line, '\n'); + sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf", + &kitti_calibration.body_ptx, &kitti_calibration.body_pty, + &kitti_calibration.body_ptz, &kitti_calibration.body_prx, + &kitti_calibration.body_pry, &kitti_calibration.body_prz, + &kitti_calibration.accelerometer_sigma, + &kitti_calibration.gyroscope_sigma, + &kitti_calibration.integration_sigma, + &kitti_calibration.accelerometer_bias_sigma, + &kitti_calibration.gyroscope_bias_sigma, + &kitti_calibration.average_delta_t); + printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n", + kitti_calibration.body_ptx, kitti_calibration.body_pty, + kitti_calibration.body_ptz, kitti_calibration.body_prx, + kitti_calibration.body_pry, kitti_calibration.body_prz, + kitti_calibration.accelerometer_sigma, + kitti_calibration.gyroscope_sigma, kitti_calibration.integration_sigma, + kitti_calibration.accelerometer_bias_sigma, + kitti_calibration.gyroscope_bias_sigma, + kitti_calibration.average_delta_t); - // Read IMU data - // Time dt accelX accelY accelZ omegaX omegaY omegaZ - string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt"); - printf("-- Reading IMU measurements from file\n"); - { - ifstream imu_data(imu_data_file.c_str()); - getline(imu_data, line, '\n'); // ignore the first line + // Read IMU data + // Time dt accelX accelY accelZ omegaX omegaY omegaZ + string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt"); + printf("-- Reading IMU measurements from file\n"); + { + ifstream imu_data(imu_data_file.c_str()); + getline(imu_data, line, '\n'); // ignore the first line - double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0; - while (!imu_data.eof()) { - getline(imu_data, line, '\n'); - sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", - &time, &dt, - &acc_x, &acc_y, &acc_z, - &gyro_x, &gyro_y, &gyro_z); + double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, + gyro_y = 0, gyro_z = 0; + while (!imu_data.eof()) { + getline(imu_data, line, '\n'); + sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", &time, &dt, + &acc_x, &acc_y, &acc_z, &gyro_x, &gyro_y, &gyro_z); - ImuMeasurement measurement; - measurement.time = time; - measurement.dt = dt; - measurement.accelerometer = Vector3(acc_x, acc_y, acc_z); - measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z); - imu_measurements.push_back(measurement); - } + ImuMeasurement measurement; + measurement.time = time; + measurement.dt = dt; + measurement.accelerometer = Vector3(acc_x, acc_y, acc_z); + measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z); + imu_measurements.push_back(measurement); } + } - // Read GPS data - // Time,X,Y,Z - string gps_data_file = findExampleDataFile("KittiGps_converted.txt"); - printf("-- Reading GPS measurements from file\n"); - { - ifstream gps_data(gps_data_file.c_str()); - getline(gps_data, line, '\n'); // ignore the first line + // Read GPS data + // Time,X,Y,Z + string gps_data_file = findExampleDataFile("KittiGps_converted.txt"); + printf("-- Reading GPS measurements from file\n"); + { + ifstream gps_data(gps_data_file.c_str()); + getline(gps_data, line, '\n'); // ignore the first line - double time = 0, gps_x = 0, gps_y = 0, gps_z = 0; - while (!gps_data.eof()) { - getline(gps_data, line, '\n'); - sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z); + double time = 0, gps_x = 0, gps_y = 0, gps_z = 0; + while (!gps_data.eof()) { + getline(gps_data, line, '\n'); + sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z); - GpsMeasurement measurement; - measurement.time = time; - measurement.position = Vector3(gps_x, gps_y, gps_z); - gps_measurements.push_back(measurement); - } + GpsMeasurement measurement; + measurement.time = time; + measurement.position = Vector3(gps_x, gps_y, gps_z); + gps_measurements.push_back(measurement); } + } } int main(int argc, char* argv[]) { - KittiCalibration kitti_calibration; - vector imu_measurements; - vector gps_measurements; - loadKittiData(kitti_calibration, imu_measurements, gps_measurements); + KittiCalibration kitti_calibration; + vector imu_measurements; + vector gps_measurements; + loadKittiData(kitti_calibration, imu_measurements, gps_measurements); - Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz, - kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz) - .finished(); - auto body_T_imu = Pose3::Expmap(BodyP); - if (!body_T_imu.equals(Pose3(), 1e-5)) { - printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same"); - exit(-1); - } + Vector6 BodyP = + (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, + kitti_calibration.body_ptz, kitti_calibration.body_prx, + kitti_calibration.body_pry, kitti_calibration.body_prz) + .finished(); + auto body_T_imu = Pose3::Expmap(BodyP); + if (!body_T_imu.equals(Pose3(), 1e-5)) { + printf( + "Currently only support IMUinBody is identity, i.e. IMU and body frame " + "are the same"); + exit(-1); + } - // Configure different variables - // double t_offset = gps_measurements[0].time; - size_t first_gps_pose = 1; - size_t gps_skip = 10; // Skip this many GPS measurements each time - double g = 9.8; - auto w_coriolis = Vector3::Zero(); // zero vector + // Configure different variables + // double t_offset = gps_measurements[0].time; + size_t first_gps_pose = 1; + size_t gps_skip = 10; // Skip this many GPS measurements each time + double g = 9.8; + auto w_coriolis = Vector3::Zero(); // zero vector - // Configure noise models - auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0), - Vector3::Constant(1.0/0.07)) - .finished()); + // Configure noise models + auto noise_model_gps = noiseModel::Diagonal::Precisions( + (Vector6() << Vector3::Constant(0), Vector3::Constant(1.0 / 0.07)) + .finished()); - // Set initial conditions for the estimated trajectory - // initial pose is the reference frame (navigation frame) - auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position); - // the vehicle is stationary at the beginning at position 0,0,0 - Vector3 current_velocity_global = Vector3::Zero(); - auto current_bias = imuBias::ConstantBias(); // init with zero bias + // Set initial conditions for the estimated trajectory + // initial pose is the reference frame (navigation frame) + auto current_pose_global = + Pose3(Rot3(), gps_measurements[first_gps_pose].position); + // the vehicle is stationary at the beginning at position 0,0,0 + Vector3 current_velocity_global = Vector3::Zero(); + auto current_bias = imuBias::ConstantBias(); // init with zero bias - auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0), - Vector3::Constant(1.0)) - .finished()); - auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0)); - auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100), - Vector3::Constant(5.00e-05)) - .finished()); + auto sigma_init_x = noiseModel::Diagonal::Precisions( + (Vector6() << Vector3::Constant(0), Vector3::Constant(1.0)).finished()); + auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0)); + auto sigma_init_b = noiseModel::Diagonal::Sigmas( + (Vector6() << Vector3::Constant(0.100), Vector3::Constant(5.00e-05)) + .finished()); - // Set IMU preintegration parameters - Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2); - Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2); - // error committed in integrating position from velocities - Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2); + // Set IMU preintegration parameters + Matrix33 measured_acc_cov = + I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2); + Matrix33 measured_omega_cov = + I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2); + // error committed in integrating position from velocities + Matrix33 integration_error_cov = + I_3x3 * pow(kitti_calibration.integration_sigma, 2); - auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g); - imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous - imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous - imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous - imu_params->omegaCoriolis = w_coriolis; + auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g); + imu_params->accelerometerCovariance = + measured_acc_cov; // acc white noise in continuous + imu_params->integrationCovariance = + integration_error_cov; // integration uncertainty continuous + imu_params->gyroscopeCovariance = + measured_omega_cov; // gyro white noise in continuous + imu_params->omegaCoriolis = w_coriolis; - std::shared_ptr current_summarized_measurement = nullptr; + std::shared_ptr current_summarized_measurement = + nullptr; - // Set ISAM2 parameters and create ISAM2 solver object - ISAM2Params isam_params; - isam_params.factorization = ISAM2Params::CHOLESKY; - isam_params.relinearizeSkip = 10; + // Set ISAM2 parameters and create ISAM2 solver object + ISAM2Params isam_params; + isam_params.factorization = ISAM2Params::CHOLESKY; + isam_params.relinearizeSkip = 10; - ISAM2 isam(isam_params); + ISAM2 isam(isam_params); - // Create the factor graph and values object that will store new factors and values to add to the incremental graph - NonlinearFactorGraph new_factors; - Values new_values; // values storing the initial estimates of new nodes in the factor graph + // Create the factor graph and values object that will store new factors and + // values to add to the incremental graph + NonlinearFactorGraph new_factors; + Values new_values; // values storing the initial estimates of new nodes in + // the factor graph - /// Main loop: - /// (1) we read the measurements - /// (2) we create the corresponding factors in the graph - /// (3) we solve the graph to obtain and optimal estimate of robot trajectory - printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n"); - size_t j = 0; - for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { - // At each non=IMU measurement we initialize a new node in the graph - auto current_pose_key = X(i); - auto current_vel_key = V(i); - auto current_bias_key = B(i); - double t = gps_measurements[i].time; + /// Main loop: + /// (1) we read the measurements + /// (2) we create the corresponding factors in the graph + /// (3) we solve the graph to obtain and optimal estimate of robot trajectory + printf( + "-- Starting main loop: inference is performed at each time step, but we " + "plot trajectory every 10 steps\n"); + size_t j = 0; + size_t included_imu_measurement_count = 0; - if (i == first_gps_pose) { - // Create initial estimate and prior on initial pose, velocity, and biases - new_values.insert(current_pose_key, current_pose_global); - new_values.insert(current_vel_key, current_velocity_global); - new_values.insert(current_bias_key, current_bias); - new_factors.emplace_shared>(current_pose_key, current_pose_global, sigma_init_x); - new_factors.emplace_shared>(current_vel_key, current_velocity_global, sigma_init_v); - new_factors.emplace_shared>(current_bias_key, current_bias, sigma_init_b); - } else { - double t_previous = gps_measurements[i-1].time; + for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { + // At each non=IMU measurement we initialize a new node in the graph + auto current_pose_key = X(i); + auto current_vel_key = V(i); + auto current_bias_key = B(i); + double t = gps_measurements[i].time; - // Summarize IMU data between the previous GPS measurement and now - current_summarized_measurement = std::make_shared(imu_params, current_bias); - static size_t included_imu_measurement_count = 0; - while (j < imu_measurements.size() && imu_measurements[j].time <= t) { - if (imu_measurements[j].time >= t_previous) { - current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer, - imu_measurements[j].gyroscope, - imu_measurements[j].dt); - included_imu_measurement_count++; - } - j++; - } + if (i == first_gps_pose) { + // Create initial estimate and prior on initial pose, velocity, and biases + new_values.insert(current_pose_key, current_pose_global); + new_values.insert(current_vel_key, current_velocity_global); + new_values.insert(current_bias_key, current_bias); + new_factors.emplace_shared>( + current_pose_key, current_pose_global, sigma_init_x); + new_factors.emplace_shared>( + current_vel_key, current_velocity_global, sigma_init_v); + new_factors.emplace_shared>( + current_bias_key, current_bias, sigma_init_b); + } else { + double t_previous = gps_measurements[i - 1].time; - // Create IMU factor - auto previous_pose_key = X(i-1); - auto previous_vel_key = V(i-1); - auto previous_bias_key = B(i-1); + // Summarize IMU data between the previous GPS measurement and now + current_summarized_measurement = + std::make_shared(imu_params, + current_bias); - new_factors.emplace_shared(previous_pose_key, previous_vel_key, - current_pose_key, current_vel_key, - previous_bias_key, *current_summarized_measurement); - - // Bias evolution as given in the IMU metadata - auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() << - Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma), - Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma)) - .finished()); - new_factors.emplace_shared>(previous_bias_key, - current_bias_key, - imuBias::ConstantBias(), - sigma_between_b); - - // Create GPS factor - auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position); - if ((i % gps_skip) == 0) { - new_factors.emplace_shared>(current_pose_key, gps_pose, noise_model_gps); - new_values.insert(current_pose_key, gps_pose); - - printf("################ POSE INCLUDED AT TIME %lf ################\n", t); - cout << gps_pose.translation(); - printf("\n\n"); - } else { - new_values.insert(current_pose_key, current_pose_global); - } - - // Add initial values for velocity and bias based on the previous estimates - new_values.insert(current_vel_key, current_velocity_global); - new_values.insert(current_bias_key, current_bias); - - // Update solver - // ======================================================================= - // We accumulate 2*GPSskip GPS measurements before updating the solver at - // first so that the heading becomes observable. - if (i > (first_gps_pose + 2*gps_skip)) { - printf("################ NEW FACTORS AT TIME %lf ################\n", t); - new_factors.print(); - - isam.update(new_factors, new_values); - - // Reset the newFactors and newValues list - new_factors.resize(0); - new_values.clear(); - - // Extract the result/current estimates - Values result = isam.calculateEstimate(); - - current_pose_global = result.at(current_pose_key); - current_velocity_global = result.at(current_vel_key); - current_bias = result.at(current_bias_key); - - printf("\n################ POSE AT TIME %lf ################\n", t); - current_pose_global.print(); - printf("\n\n"); - } + while (j < imu_measurements.size() && imu_measurements[j].time <= t) { + if (imu_measurements[j].time >= t_previous) { + current_summarized_measurement->integrateMeasurement( + imu_measurements[j].accelerometer, imu_measurements[j].gyroscope, + imu_measurements[j].dt); + included_imu_measurement_count++; } + j++; + } + + // Create IMU factor + auto previous_pose_key = X(i - 1); + auto previous_vel_key = V(i - 1); + auto previous_bias_key = B(i - 1); + + new_factors.emplace_shared( + previous_pose_key, previous_vel_key, current_pose_key, + current_vel_key, previous_bias_key, *current_summarized_measurement); + + // Bias evolution as given in the IMU metadata + auto sigma_between_b = noiseModel::Diagonal::Sigmas( + (Vector6() << Vector3::Constant( + sqrt(included_imu_measurement_count) * + kitti_calibration.accelerometer_bias_sigma), + Vector3::Constant(sqrt(included_imu_measurement_count) * + kitti_calibration.gyroscope_bias_sigma)) + .finished()); + new_factors.emplace_shared>( + previous_bias_key, current_bias_key, imuBias::ConstantBias(), + sigma_between_b); + + // Create GPS factor + auto gps_pose = + Pose3(current_pose_global.rotation(), gps_measurements[i].position); + if ((i % gps_skip) == 0) { + new_factors.emplace_shared>( + current_pose_key, gps_pose, noise_model_gps); + new_values.insert(current_pose_key, gps_pose); + + printf("############ POSE INCLUDED AT TIME %.6lf ############\n", + t); + cout << gps_pose.translation(); + printf("\n\n"); + } else { + new_values.insert(current_pose_key, current_pose_global); + } + + // Add initial values for velocity and bias based on the previous + // estimates + new_values.insert(current_vel_key, current_velocity_global); + new_values.insert(current_bias_key, current_bias); + + // Update solver + // ======================================================================= + // We accumulate 2*GPSskip GPS measurements before updating the solver at + // first so that the heading becomes observable. + if (i > (first_gps_pose + 2 * gps_skip)) { + printf("############ NEW FACTORS AT TIME %.6lf ############\n", + t); + new_factors.print(); + + isam.update(new_factors, new_values); + + // Reset the newFactors and newValues list + new_factors.resize(0); + new_values.clear(); + + // Extract the result/current estimates + Values result = isam.calculateEstimate(); + + current_pose_global = result.at(current_pose_key); + current_velocity_global = result.at(current_vel_key); + current_bias = result.at(current_bias_key); + + printf("\n############ POSE AT TIME %lf ############\n", t); + current_pose_global.print(); + printf("\n\n"); + } } + } - // Save results to file - printf("\nWriting results to file...\n"); - FILE* fp_out = fopen(output_filename.c_str(), "w+"); - fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n"); + // Save results to file + printf("\nWriting results to file...\n"); + FILE* fp_out = fopen(output_filename.c_str(), "w+"); + fprintf(fp_out, + "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n"); - Values result = isam.calculateEstimate(); - for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { - auto pose_key = X(i); - auto vel_key = V(i); - auto bias_key = B(i); + Values result = isam.calculateEstimate(); + for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { + auto pose_key = X(i); + auto vel_key = V(i); + auto bias_key = B(i); - auto pose = result.at(pose_key); - auto velocity = result.at(vel_key); - auto bias = result.at(bias_key); + auto pose = result.at(pose_key); + auto velocity = result.at(vel_key); + auto bias = result.at(bias_key); - auto pose_quat = pose.rotation().toQuaternion(); - auto gps = gps_measurements[i].position; + auto pose_quat = pose.rotation().toQuaternion(); + auto gps = gps_measurements[i].position; - cout << "State at #" << i << endl; - cout << "Pose:" << endl << pose << endl; - cout << "Velocity:" << endl << velocity << endl; - cout << "Bias:" << endl << bias << endl; + cout << "State at #" << i << endl; + cout << "Pose:" << endl << pose << endl; + cout << "Velocity:" << endl << velocity << endl; + cout << "Bias:" << endl << bias << endl; - fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n", - gps_measurements[i].time, - pose.x(), pose.y(), pose.z(), - pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), - gps(0), gps(1), gps(2)); - } + fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n", + gps_measurements[i].time, pose.x(), pose.y(), pose.z(), + pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), gps(0), + gps(1), gps(2)); + } - fclose(fp_out); + fclose(fp_out); } diff --git a/examples/UGM_chain.cpp b/examples/UGM_chain.cpp index 3a885a844..ababef022 100644 --- a/examples/UGM_chain.cpp +++ b/examples/UGM_chain.cpp @@ -70,8 +70,8 @@ int main(int argc, char** argv) { // "Decoding", i.e., configuration with largest value // We use sequential variable elimination DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); - DiscreteFactor::sharedValues optimalDecoding = chordal->optimize(); - optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n"); + auto optimalDecoding = chordal->optimize(); + optimalDecoding.print("\nMost Probable Explanation (optimalDecoding)\n"); // "Inference" Computing marginals for each node // Here we'll make use of DiscreteMarginals class, which makes use of diff --git a/examples/UGM_small.cpp b/examples/UGM_small.cpp index 27a6205a3..f4f3f1fd0 100644 --- a/examples/UGM_small.cpp +++ b/examples/UGM_small.cpp @@ -63,8 +63,8 @@ int main(int argc, char** argv) { // "Decoding", i.e., configuration with largest value (MPE) // We use sequential variable elimination DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); - DiscreteFactor::sharedValues optimalDecoding = chordal->optimize(); - optimalDecoding->print("\noptimalDecoding"); + auto optimalDecoding = chordal->optimize(); + GTSAM_PRINT(optimalDecoding); // "Inference" Computing marginals cout << "\nComputing Node Marginals .." << endl; diff --git a/gtsam/base/Lie.h b/gtsam/base/Lie.h index ac7c2a9a5..cb8e7d017 100644 --- a/gtsam/base/Lie.h +++ b/gtsam/base/Lie.h @@ -370,4 +370,4 @@ public: * the gtsam namespace to be more easily enforced as testable */ #define GTSAM_CONCEPT_LIE_INST(T) template class gtsam::IsLieGroup; -#define GTSAM_CONCEPT_LIE_TYPE(T) typedef gtsam::IsLieGroup _gtsam_IsLieGroup_##T; +#define GTSAM_CONCEPT_LIE_TYPE(T) using _gtsam_IsLieGroup_##T = gtsam::IsLieGroup; diff --git a/gtsam/base/Manifold.h b/gtsam/base/Manifold.h index dbe497005..962dc8269 100644 --- a/gtsam/base/Manifold.h +++ b/gtsam/base/Manifold.h @@ -178,4 +178,4 @@ struct FixedDimension { // * the gtsam namespace to be more easily enforced as testable // */ #define GTSAM_CONCEPT_MANIFOLD_INST(T) template class gtsam::IsManifold; -#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) typedef gtsam::IsManifold _gtsam_IsManifold_##T; +#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) using _gtsam_IsManifold_##T = gtsam::IsManifold; diff --git a/gtsam/base/Matrix.h b/gtsam/base/Matrix.h index 013947bbd..61c61a5af 100644 --- a/gtsam/base/Matrix.h +++ b/gtsam/base/Matrix.h @@ -46,28 +46,28 @@ typedef Eigen::Matrix M // Create handy typedefs and constants for square-size matrices // MatrixMN, MatrixN = MatrixNN, I_NxN, and Z_NxN, for M,N=1..9 #define GTSAM_MAKE_MATRIX_DEFS(N) \ -typedef Eigen::Matrix Matrix##N; \ -typedef Eigen::Matrix Matrix1##N; \ -typedef Eigen::Matrix Matrix2##N; \ -typedef Eigen::Matrix Matrix3##N; \ -typedef Eigen::Matrix Matrix4##N; \ -typedef Eigen::Matrix Matrix5##N; \ -typedef Eigen::Matrix Matrix6##N; \ -typedef Eigen::Matrix Matrix7##N; \ -typedef Eigen::Matrix Matrix8##N; \ -typedef Eigen::Matrix Matrix9##N; \ +using Matrix##N = Eigen::Matrix; \ +using Matrix1##N = Eigen::Matrix; \ +using Matrix2##N = Eigen::Matrix; \ +using Matrix3##N = Eigen::Matrix; \ +using Matrix4##N = Eigen::Matrix; \ +using Matrix5##N = Eigen::Matrix; \ +using Matrix6##N = Eigen::Matrix; \ +using Matrix7##N = Eigen::Matrix; \ +using Matrix8##N = Eigen::Matrix; \ +using Matrix9##N = Eigen::Matrix; \ static const Eigen::MatrixBase::IdentityReturnType I_##N##x##N = Matrix##N::Identity(); \ static const Eigen::MatrixBase::ConstantReturnType Z_##N##x##N = Matrix##N::Zero(); -GTSAM_MAKE_MATRIX_DEFS(1); -GTSAM_MAKE_MATRIX_DEFS(2); -GTSAM_MAKE_MATRIX_DEFS(3); -GTSAM_MAKE_MATRIX_DEFS(4); -GTSAM_MAKE_MATRIX_DEFS(5); -GTSAM_MAKE_MATRIX_DEFS(6); -GTSAM_MAKE_MATRIX_DEFS(7); -GTSAM_MAKE_MATRIX_DEFS(8); -GTSAM_MAKE_MATRIX_DEFS(9); +GTSAM_MAKE_MATRIX_DEFS(1) +GTSAM_MAKE_MATRIX_DEFS(2) +GTSAM_MAKE_MATRIX_DEFS(3) +GTSAM_MAKE_MATRIX_DEFS(4) +GTSAM_MAKE_MATRIX_DEFS(5) +GTSAM_MAKE_MATRIX_DEFS(6) +GTSAM_MAKE_MATRIX_DEFS(7) +GTSAM_MAKE_MATRIX_DEFS(8) +GTSAM_MAKE_MATRIX_DEFS(9) // Matrix expressions for accessing parts of matrices typedef Eigen::Block SubMatrix; diff --git a/gtsam/base/Testable.h b/gtsam/base/Testable.h index 6062c7ae1..d50d62c1f 100644 --- a/gtsam/base/Testable.h +++ b/gtsam/base/Testable.h @@ -173,4 +173,4 @@ namespace gtsam { * @deprecated please use BOOST_CONCEPT_ASSERT and */ #define GTSAM_CONCEPT_TESTABLE_INST(T) template class gtsam::IsTestable; -#define GTSAM_CONCEPT_TESTABLE_TYPE(T) typedef gtsam::IsTestable _gtsam_Testable_##T; +#define GTSAM_CONCEPT_TESTABLE_TYPE(T) using _gtsam_Testable_##T = gtsam::IsTestable; diff --git a/gtsam/base/TestableAssertions.h b/gtsam/base/TestableAssertions.h index 0e6e1c276..c86fbb6d2 100644 --- a/gtsam/base/TestableAssertions.h +++ b/gtsam/base/TestableAssertions.h @@ -85,7 +85,7 @@ bool assert_equal(const V& expected, const boost::optional& actual, do * \deprecated: use container equals instead */ template -bool assert_equal(const std::vector& expected, const std::vector& actual, double tol = 1e-9) { +bool GTSAM_DEPRECATED assert_equal(const std::vector& expected, const std::vector& actual, double tol = 1e-9) { bool match = true; if (expected.size() != actual.size()) match = false; diff --git a/gtsam/base/Vector.h b/gtsam/base/Vector.h index ed90a7126..a057da46b 100644 --- a/gtsam/base/Vector.h +++ b/gtsam/base/Vector.h @@ -48,18 +48,18 @@ static const Eigen::MatrixBase::ConstantReturnType Z_3x1 = Vector3::Zer // Create handy typedefs and constants for vectors with N>3 // VectorN and Z_Nx1, for N=1..9 #define GTSAM_MAKE_VECTOR_DEFS(N) \ - typedef Eigen::Matrix Vector##N; \ + using Vector##N = Eigen::Matrix; \ static const Eigen::MatrixBase::ConstantReturnType Z_##N##x1 = Vector##N::Zero(); -GTSAM_MAKE_VECTOR_DEFS(4); -GTSAM_MAKE_VECTOR_DEFS(5); -GTSAM_MAKE_VECTOR_DEFS(6); -GTSAM_MAKE_VECTOR_DEFS(7); -GTSAM_MAKE_VECTOR_DEFS(8); -GTSAM_MAKE_VECTOR_DEFS(9); -GTSAM_MAKE_VECTOR_DEFS(10); -GTSAM_MAKE_VECTOR_DEFS(11); -GTSAM_MAKE_VECTOR_DEFS(12); +GTSAM_MAKE_VECTOR_DEFS(4) +GTSAM_MAKE_VECTOR_DEFS(5) +GTSAM_MAKE_VECTOR_DEFS(6) +GTSAM_MAKE_VECTOR_DEFS(7) +GTSAM_MAKE_VECTOR_DEFS(8) +GTSAM_MAKE_VECTOR_DEFS(9) +GTSAM_MAKE_VECTOR_DEFS(10) +GTSAM_MAKE_VECTOR_DEFS(11) +GTSAM_MAKE_VECTOR_DEFS(12) typedef Eigen::VectorBlock SubVector; typedef Eigen::VectorBlock ConstSubVector; @@ -207,14 +207,14 @@ inline double inner_prod(const V1 &a, const V2& b) { * BLAS Level 1 scal: x <- alpha*x * \deprecated: use operators instead */ -inline void scal(double alpha, Vector& x) { x *= alpha; } +inline void GTSAM_DEPRECATED scal(double alpha, Vector& x) { x *= alpha; } /** * BLAS Level 1 axpy: y <- alpha*x + y * \deprecated: use operators instead */ template -inline void axpy(double alpha, const V1& x, V2& y) { +inline void GTSAM_DEPRECATED axpy(double alpha, const V1& x, V2& y) { assert (y.size()==x.size()); y += alpha * x; } diff --git a/gtsam/base/chartTesting.h b/gtsam/base/chartTesting.h index f63054a5b..8f5213f91 100644 --- a/gtsam/base/chartTesting.h +++ b/gtsam/base/chartTesting.h @@ -32,7 +32,7 @@ void testDefaultChart(TestResult& result_, const std::string& name_, const T& value) { - GTSAM_CONCEPT_TESTABLE_TYPE(T); + GTSAM_CONCEPT_TESTABLE_TYPE(T) typedef typename gtsam::DefaultChart Chart; typedef typename Chart::vector Vector; diff --git a/gtsam/base/serialization.h b/gtsam/base/serialization.h index f589ecc5e..24355c684 100644 --- a/gtsam/base/serialization.h +++ b/gtsam/base/serialization.h @@ -19,8 +19,9 @@ #pragma once -#include +#include #include +#include #include // includes for standard serialization types @@ -40,6 +41,17 @@ #include #include +// Workaround a bug in GCC >= 7 and C++17 +// ref. https://gitlab.com/libeigen/eigen/-/issues/1676 +#ifdef __GNUC__ +#if __GNUC__ >= 7 && __cplusplus >= 201703L +namespace boost { namespace serialization { struct U; } } +namespace Eigen { namespace internal { +template<> struct traits {enum {Flags=0};}; +} } +#endif +#endif + namespace gtsam { /** @name Standard serialization diff --git a/gtsam/base/tests/testVector.cpp b/gtsam/base/tests/testVector.cpp index bd715e3cb..c87732b09 100644 --- a/gtsam/base/tests/testVector.cpp +++ b/gtsam/base/tests/testVector.cpp @@ -220,8 +220,8 @@ TEST(Vector, axpy ) Vector x = Vector3(10., 20., 30.); Vector y0 = Vector3(2.0, 5.0, 6.0); Vector y1 = y0, y2 = y0; - axpy(0.1,x,y1); - axpy(0.1,x,y2.head(3)); + y1 += 0.1 * x; + y2.head(3) += 0.1 * x; Vector expected = Vector3(3.0, 7.0, 9.0); EXPECT(assert_equal(expected,y1)); EXPECT(assert_equal(expected,Vector(y2))); diff --git a/gtsam/base/treeTraversal-inst.h b/gtsam/base/treeTraversal-inst.h index 7a88f72eb..30cec3b9a 100644 --- a/gtsam/base/treeTraversal-inst.h +++ b/gtsam/base/treeTraversal-inst.h @@ -158,9 +158,8 @@ void DepthFirstForestParallel(FOREST& forest, DATA& rootData, // Typedefs typedef typename FOREST::Node Node; - tbb::task::spawn_root_and_wait( - internal::CreateRootTask(forest.roots(), rootData, visitorPre, - visitorPost, problemSizeThreshold)); + internal::CreateRootTask(forest.roots(), rootData, visitorPre, + visitorPost, problemSizeThreshold); #else DepthFirstForest(forest, rootData, visitorPre, visitorPost); #endif diff --git a/gtsam/base/treeTraversal/parallelTraversalTasks.h b/gtsam/base/treeTraversal/parallelTraversalTasks.h index 87d5b0d4c..dc1b45906 100644 --- a/gtsam/base/treeTraversal/parallelTraversalTasks.h +++ b/gtsam/base/treeTraversal/parallelTraversalTasks.h @@ -22,7 +22,7 @@ #include #ifdef GTSAM_USE_TBB -#include // tbb::task, tbb::task_list +#include // tbb::task_group #include // tbb::scalable_allocator namespace gtsam { @@ -34,7 +34,7 @@ namespace gtsam { /* ************************************************************************* */ template - class PreOrderTask : public tbb::task + class PreOrderTask { public: const boost::shared_ptr& treeNode; @@ -42,28 +42,30 @@ namespace gtsam { VISITOR_PRE& visitorPre; VISITOR_POST& visitorPost; int problemSizeThreshold; + tbb::task_group& tg; bool makeNewTasks; - bool isPostOrderPhase; + // Keep track of order phase across multiple calls to the same functor + mutable bool isPostOrderPhase; PreOrderTask(const boost::shared_ptr& treeNode, const boost::shared_ptr& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold, - bool makeNewTasks = true) + tbb::task_group& tg, bool makeNewTasks = true) : treeNode(treeNode), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost), problemSizeThreshold(problemSizeThreshold), + tg(tg), makeNewTasks(makeNewTasks), isPostOrderPhase(false) {} - tbb::task* execute() override + void operator()() const { if(isPostOrderPhase) { // Run the post-order visitor since this task was recycled to run the post-order visitor (void) visitorPost(treeNode, *myData); - return nullptr; } else { @@ -71,14 +73,10 @@ namespace gtsam { { if(!treeNode->children.empty()) { - // Allocate post-order task as a continuation - isPostOrderPhase = true; - recycle_as_continuation(); - bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold); - tbb::task* firstChild = 0; - tbb::task_list childTasks; + // If we have child tasks, start subtasks and wait for them to complete + tbb::task_group ctg; for(const boost::shared_ptr& child: treeNode->children) { // Process child in a subtask. Important: Run visitorPre before calling @@ -86,37 +84,30 @@ namespace gtsam { // allocated an extra child, this causes a TBB error. boost::shared_ptr childData = boost::allocate_shared( tbb::scalable_allocator(), visitorPre(child, *myData)); - tbb::task* childTask = - new (allocate_child()) PreOrderTask(child, childData, visitorPre, visitorPost, - problemSizeThreshold, overThreshold); - if (firstChild) - childTasks.push_back(*childTask); - else - firstChild = childTask; + ctg.run(PreOrderTask(child, childData, visitorPre, visitorPost, + problemSizeThreshold, ctg, overThreshold)); } + ctg.wait(); - // If we have child tasks, start subtasks and wait for them to complete - set_ref_count((int)treeNode->children.size()); - spawn(childTasks); - return firstChild; + // Allocate post-order task as a continuation + isPostOrderPhase = true; + tg.run(*this); } else { // Run the post-order visitor in this task if we have no children (void) visitorPost(treeNode, *myData); - return nullptr; } } else { // Process this node and its children in this task processNodeRecursively(treeNode, *myData); - return nullptr; } } } - void processNodeRecursively(const boost::shared_ptr& node, DATA& myData) + void processNodeRecursively(const boost::shared_ptr& node, DATA& myData) const { for(const boost::shared_ptr& child: node->children) { @@ -131,7 +122,7 @@ namespace gtsam { /* ************************************************************************* */ template - class RootTask : public tbb::task + class RootTask { public: const ROOTS& roots; @@ -139,38 +130,31 @@ namespace gtsam { VISITOR_PRE& visitorPre; VISITOR_POST& visitorPost; int problemSizeThreshold; + tbb::task_group& tg; RootTask(const ROOTS& roots, DATA& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, - int problemSizeThreshold) : + int problemSizeThreshold, tbb::task_group& tg) : roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost), - problemSizeThreshold(problemSizeThreshold) {} + problemSizeThreshold(problemSizeThreshold), tg(tg) {} - tbb::task* execute() override + void operator()() const { typedef PreOrderTask PreOrderTask; // Create data and tasks for our children - tbb::task_list tasks; for(const boost::shared_ptr& root: roots) { boost::shared_ptr rootData = boost::allocate_shared(tbb::scalable_allocator(), visitorPre(root, myData)); - tasks.push_back(*new(allocate_child()) - PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold)); + tg.run(PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold, tg)); } - // Set TBB ref count - set_ref_count(1 + (int) roots.size()); - // Spawn tasks - spawn_and_wait_for_all(tasks); - // Return nullptr - return nullptr; } }; template - RootTask& - CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold) + void CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold) { typedef RootTask RootTask; - return *new(tbb::task::allocate_root()) RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold); - } + tbb::task_group tg; + tg.run_and_wait(RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold, tg)); + } } diff --git a/gtsam/base/types.h b/gtsam/base/types.h index aaada3cee..a0d24f1a6 100644 --- a/gtsam/base/types.h +++ b/gtsam/base/types.h @@ -34,6 +34,14 @@ #include #endif +#if defined(__GNUC__) || defined(__clang__) +#define GTSAM_DEPRECATED __attribute__((deprecated)) +#elif defined(_MSC_VER) +#define GTSAM_DEPRECATED __declspec(deprecated) +#else +#define GTSAM_DEPRECATED +#endif + #ifdef GTSAM_USE_EIGEN_MKL_OPENMP #include #endif diff --git a/gtsam/config.h.in b/gtsam/config.h.in index 9f7106187..e7623c52b 100644 --- a/gtsam/config.h.in +++ b/gtsam/config.h.in @@ -80,3 +80,6 @@ // Whether to use the system installed Metis instead of the provided one #cmakedefine GTSAM_USE_SYSTEM_METIS + +// Toggle switch for BetweenFactor jacobian computation +#cmakedefine GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR diff --git a/gtsam/discrete/DiscreteBayesNet.cpp b/gtsam/discrete/DiscreteBayesNet.cpp index 84a80c565..71c50c477 100644 --- a/gtsam/discrete/DiscreteBayesNet.cpp +++ b/gtsam/discrete/DiscreteBayesNet.cpp @@ -54,20 +54,20 @@ namespace gtsam { } /* ************************************************************************* */ - DiscreteFactor::sharedValues DiscreteBayesNet::optimize() const { + DiscreteFactor::Values DiscreteBayesNet::optimize() const { // solve each node in turn in topological sort order (parents first) - DiscreteFactor::sharedValues result(new DiscreteFactor::Values()); + DiscreteFactor::Values result; for (auto conditional: boost::adaptors::reverse(*this)) - conditional->solveInPlace(*result); + conditional->solveInPlace(&result); return result; } /* ************************************************************************* */ - DiscreteFactor::sharedValues DiscreteBayesNet::sample() const { + DiscreteFactor::Values DiscreteBayesNet::sample() const { // sample each node in turn in topological sort order (parents first) - DiscreteFactor::sharedValues result(new DiscreteFactor::Values()); + DiscreteFactor::Values result; for (auto conditional: boost::adaptors::reverse(*this)) - conditional->sampleInPlace(*result); + conditional->sampleInPlace(&result); return result; } diff --git a/gtsam/discrete/DiscreteBayesNet.h b/gtsam/discrete/DiscreteBayesNet.h index d5ba30584..e89645658 100644 --- a/gtsam/discrete/DiscreteBayesNet.h +++ b/gtsam/discrete/DiscreteBayesNet.h @@ -83,10 +83,10 @@ namespace gtsam { /** * Solve the DiscreteBayesNet by back-substitution */ - DiscreteFactor::sharedValues optimize() const; + DiscreteFactor::Values optimize() const; /** Do ancestral sampling */ - DiscreteFactor::sharedValues sample() const; + DiscreteFactor::Values sample() const; ///@} diff --git a/gtsam/discrete/DiscreteConditional.cpp b/gtsam/discrete/DiscreteConditional.cpp index ac7c58405..26a43a10c 100644 --- a/gtsam/discrete/DiscreteConditional.cpp +++ b/gtsam/discrete/DiscreteConditional.cpp @@ -35,7 +35,7 @@ using namespace std; namespace gtsam { // Instantiate base class -template class Conditional ; +template class GTSAM_EXPORT Conditional ; /* ******************************************************************************** */ DiscreteConditional::DiscreteConditional(const size_t nrFrontals, @@ -117,9 +117,9 @@ Potentials::ADT DiscreteConditional::choose(const Values& parentsValues) const { } /* ******************************************************************************** */ -void DiscreteConditional::solveInPlace(Values& values) const { +void DiscreteConditional::solveInPlace(Values* values) const { // TODO: Abhijit asks: is this really the fastest way? He thinks it is. - ADT pFS = choose(values); // P(F|S=parentsValues) + ADT pFS = choose(*values); // P(F|S=parentsValues) // Initialize Values mpe; @@ -145,16 +145,16 @@ void DiscreteConditional::solveInPlace(Values& values) const { //set values (inPlace) to mpe for(Key j: frontals()) { - values[j] = mpe[j]; + (*values)[j] = mpe[j]; } } /* ******************************************************************************** */ -void DiscreteConditional::sampleInPlace(Values& values) const { +void DiscreteConditional::sampleInPlace(Values* values) const { assert(nrFrontals() == 1); Key j = (firstFrontalKey()); - size_t sampled = sample(values); // Sample variable - values[j] = sampled; // store result in partial solution + size_t sampled = sample(*values); // Sample variable given parents + (*values)[j] = sampled; // store result in partial solution } /* ******************************************************************************** */ diff --git a/gtsam/discrete/DiscreteConditional.h b/gtsam/discrete/DiscreteConditional.h index 8299fab2c..191a80fb0 100644 --- a/gtsam/discrete/DiscreteConditional.h +++ b/gtsam/discrete/DiscreteConditional.h @@ -45,7 +45,6 @@ public: /** A map from keys to values.. * TODO: Again, do we need this??? */ typedef Assignment Values; - typedef boost::shared_ptr sharedValues; /// @name Standard Constructors /// @{ @@ -133,10 +132,10 @@ public: /// @{ /// solve a conditional, in place - void solveInPlace(Values& parentsValues) const; + void solveInPlace(Values* parentsValues) const; /// sample in place, stores result in partial solution - void sampleInPlace(Values& parentsValues) const; + void sampleInPlace(Values* parentsValues) const; /// @} diff --git a/gtsam/discrete/DiscreteFactor.h b/gtsam/discrete/DiscreteFactor.h index 6b0919507..cd883c59c 100644 --- a/gtsam/discrete/DiscreteFactor.h +++ b/gtsam/discrete/DiscreteFactor.h @@ -51,7 +51,6 @@ public: * the new class DiscreteValue, as the varible's type (domain) */ typedef Assignment Values; - typedef boost::shared_ptr sharedValues; public: diff --git a/gtsam/discrete/DiscreteFactorGraph.cpp b/gtsam/discrete/DiscreteFactorGraph.cpp index e41968d6b..4ff0e339e 100644 --- a/gtsam/discrete/DiscreteFactorGraph.cpp +++ b/gtsam/discrete/DiscreteFactorGraph.cpp @@ -94,7 +94,7 @@ namespace gtsam { // } /* ************************************************************************* */ - DiscreteFactor::sharedValues DiscreteFactorGraph::optimize() const + DiscreteFactorGraph::Values DiscreteFactorGraph::optimize() const { gttic(DiscreteFactorGraph_optimize); return BaseEliminateable::eliminateSequential()->optimize(); diff --git a/gtsam/discrete/DiscreteFactorGraph.h b/gtsam/discrete/DiscreteFactorGraph.h index f39adc9a8..329d015e9 100644 --- a/gtsam/discrete/DiscreteFactorGraph.h +++ b/gtsam/discrete/DiscreteFactorGraph.h @@ -74,7 +74,6 @@ public: /** A map from keys to values */ typedef KeyVector Indices; typedef Assignment Values; - typedef boost::shared_ptr sharedValues; /** Default constructor */ DiscreteFactorGraph() {} @@ -101,25 +100,27 @@ public: /// @} - template + // Add single key decision-tree factor. + template void add(const DiscreteKey& j, SOURCE table) { DiscreteKeys keys; keys.push_back(j); - push_back(boost::make_shared(keys, table)); + emplace_shared(keys, table); } - template + // Add binary key decision-tree factor. + template void add(const DiscreteKey& j1, const DiscreteKey& j2, SOURCE table) { DiscreteKeys keys; keys.push_back(j1); keys.push_back(j2); - push_back(boost::make_shared(keys, table)); + emplace_shared(keys, table); } - /** add shared discreteFactor immediately from arguments */ - template + // Add shared discreteFactor immediately from arguments. + template void add(const DiscreteKeys& keys, SOURCE table) { - push_back(boost::make_shared(keys, table)); + emplace_shared(keys, table); } /** Return the set of variables involved in the factors (set union) */ @@ -140,7 +141,7 @@ public: * the dense elimination function specified in \c function, * followed by back-substitution resulting from elimination. Is equivalent * to calling graph.eliminateSequential()->optimize(). */ - DiscreteFactor::sharedValues optimize() const; + Values optimize() const; // /** Permute the variables in the factors */ diff --git a/gtsam/discrete/tests/testDiscreteBayesNet.cpp b/gtsam/discrete/tests/testDiscreteBayesNet.cpp index 2b440e5a0..f6159c0c6 100644 --- a/gtsam/discrete/tests/testDiscreteBayesNet.cpp +++ b/gtsam/discrete/tests/testDiscreteBayesNet.cpp @@ -104,12 +104,12 @@ TEST(DiscreteBayesNet, Asia) { EXPECT(assert_equal(expected2, *chordal->back())); // solve - DiscreteFactor::sharedValues actualMPE = chordal->optimize(); + auto actualMPE = chordal->optimize(); DiscreteFactor::Values expectedMPE; insert(expectedMPE)(Asia.first, 0)(Dyspnea.first, 0)(XRay.first, 0)( Tuberculosis.first, 0)(Smoking.first, 0)(Either.first, 0)( LungCancer.first, 0)(Bronchitis.first, 0); - EXPECT(assert_equal(expectedMPE, *actualMPE)); + EXPECT(assert_equal(expectedMPE, actualMPE)); // add evidence, we were in Asia and we have dyspnea fg.add(Asia, "0 1"); @@ -117,12 +117,12 @@ TEST(DiscreteBayesNet, Asia) { // solve again, now with evidence DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering); - DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize(); + auto actualMPE2 = chordal2->optimize(); DiscreteFactor::Values expectedMPE2; insert(expectedMPE2)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 0)( Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 0)( LungCancer.first, 0)(Bronchitis.first, 1); - EXPECT(assert_equal(expectedMPE2, *actualMPE2)); + EXPECT(assert_equal(expectedMPE2, actualMPE2)); // now sample from it DiscreteFactor::Values expectedSample; @@ -130,8 +130,8 @@ TEST(DiscreteBayesNet, Asia) { insert(expectedSample)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 1)( Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 1)( LungCancer.first, 1)(Bronchitis.first, 0); - DiscreteFactor::sharedValues actualSample = chordal2->sample(); - EXPECT(assert_equal(expectedSample, *actualSample)); + auto actualSample = chordal2->sample(); + EXPECT(assert_equal(expectedSample, actualSample)); } /* ************************************************************************* */ diff --git a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp index 1defd5acf..6b7a43c1c 100644 --- a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp +++ b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp @@ -169,8 +169,8 @@ TEST( DiscreteFactorGraph, test) // Test optimization DiscreteFactor::Values expectedValues; insert(expectedValues)(0, 0)(1, 0)(2, 0); - DiscreteFactor::sharedValues actualValues = graph.optimize(); - EXPECT(assert_equal(expectedValues, *actualValues)); + auto actualValues = graph.optimize(); + EXPECT(assert_equal(expectedValues, actualValues)); } /* ************************************************************************* */ @@ -186,11 +186,11 @@ TEST( DiscreteFactorGraph, testMPE) // graph.product().print(); // DiscreteSequentialSolver(graph).eliminate()->print(); - DiscreteFactor::sharedValues actualMPE = graph.optimize(); + auto actualMPE = graph.optimize(); DiscreteFactor::Values expectedMPE; insert(expectedMPE)(0, 0)(1, 1)(2, 1); - EXPECT(assert_equal(expectedMPE, *actualMPE)); + EXPECT(assert_equal(expectedMPE, actualMPE)); } /* ************************************************************************* */ @@ -216,8 +216,8 @@ TEST( DiscreteFactorGraph, testMPE_Darwiche09book_p244) // Use the solver machinery. DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); - DiscreteFactor::sharedValues actualMPE = chordal->optimize(); - EXPECT(assert_equal(expectedMPE, *actualMPE)); + auto actualMPE = chordal->optimize(); + EXPECT(assert_equal(expectedMPE, actualMPE)); // DiscreteConditional::shared_ptr root = chordal->back(); // EXPECT_DOUBLES_EQUAL(0.4, (*root)(*actualMPE), 1e-9); @@ -244,8 +244,8 @@ ETree::shared_ptr eTree = ETree::Create(graph, structure); // eliminate normally and check solution DiscreteBayesNet::shared_ptr bayesNet = eTree->eliminate(&EliminateDiscrete); // bayesNet->print(">>>>>>>>>>>>>> Bayes Net <<<<<<<<<<<<<<<<<<"); -DiscreteFactor::sharedValues actualMPE = optimize(*bayesNet); -EXPECT(assert_equal(expectedMPE, *actualMPE)); +auto actualMPE = optimize(*bayesNet); +EXPECT(assert_equal(expectedMPE, actualMPE)); // Approximate and check solution // DiscreteBayesNet::shared_ptr approximateNet = eTree->approximate(); diff --git a/gtsam/geometry/Cal3Fisheye.cpp b/gtsam/geometry/Cal3Fisheye.cpp index 52d475d5d..fd2c7ab65 100644 --- a/gtsam/geometry/Cal3Fisheye.cpp +++ b/gtsam/geometry/Cal3Fisheye.cpp @@ -46,9 +46,9 @@ double Cal3Fisheye::Scaling(double r) { /* ************************************************************************* */ Point2 Cal3Fisheye::uncalibrate(const Point2& p, OptionalJacobian<2, 9> H1, OptionalJacobian<2, 2> H2) const { - const double xi = p.x(), yi = p.y(); + const double xi = p.x(), yi = p.y(), zi = 1; const double r2 = xi * xi + yi * yi, r = sqrt(r2); - const double t = atan(r); + const double t = atan2(r, zi); const double t2 = t * t, t4 = t2 * t2, t6 = t2 * t4, t8 = t4 * t4; Vector5 K, T; K << 1, k1_, k2_, k3_, k4_; @@ -76,28 +76,32 @@ Point2 Cal3Fisheye::uncalibrate(const Point2& p, OptionalJacobian<2, 9> H1, // Derivative for points in intrinsic coords (2 by 2) if (H2) { - const double dtd_dt = - 1 + 3 * k1_ * t2 + 5 * k2_ * t4 + 7 * k3_ * t6 + 9 * k4_ * t8; - const double dt_dr = 1 / (1 + r2); - const double rinv = 1 / r; - const double dr_dxi = xi * rinv; - const double dr_dyi = yi * rinv; - const double dtd_dxi = dtd_dt * dt_dr * dr_dxi; - const double dtd_dyi = dtd_dt * dt_dr * dr_dyi; + if (r2==0) { + *H2 = DK; + } else { + const double dtd_dt = + 1 + 3 * k1_ * t2 + 5 * k2_ * t4 + 7 * k3_ * t6 + 9 * k4_ * t8; + const double R2 = r2 + zi*zi; + const double dt_dr = zi / R2; + const double rinv = 1 / r; + const double dr_dxi = xi * rinv; + const double dr_dyi = yi * rinv; + const double dtd_dr = dtd_dt * dt_dr; + + const double c2 = dr_dxi * dr_dxi; + const double s2 = dr_dyi * dr_dyi; + const double cs = dr_dxi * dr_dyi; - const double td = t * K.dot(T); - const double rrinv = 1 / r2; - const double dxd_dxi = - dtd_dxi * dr_dxi + td * rinv - td * xi * rrinv * dr_dxi; - const double dxd_dyi = dtd_dyi * dr_dxi - td * xi * rrinv * dr_dyi; - const double dyd_dxi = dtd_dxi * dr_dyi - td * yi * rrinv * dr_dxi; - const double dyd_dyi = - dtd_dyi * dr_dyi + td * rinv - td * yi * rrinv * dr_dyi; + const double dxd_dxi = dtd_dr * c2 + s * (1 - c2); + const double dxd_dyi = (dtd_dr - s) * cs; + const double dyd_dxi = dxd_dyi; + const double dyd_dyi = dtd_dr * s2 + s * (1 - s2); - Matrix2 DR; - DR << dxd_dxi, dxd_dyi, dyd_dxi, dyd_dyi; + Matrix2 DR; + DR << dxd_dxi, dxd_dyi, dyd_dxi, dyd_dyi; - *H2 = DK * DR; + *H2 = DK * DR; + } } return uv; diff --git a/gtsam/geometry/PinholeCamera.h b/gtsam/geometry/PinholeCamera.h index c1f0b6b3f..61e9f0909 100644 --- a/gtsam/geometry/PinholeCamera.h +++ b/gtsam/geometry/PinholeCamera.h @@ -312,6 +312,16 @@ public: return range(camera.pose(), Dcamera, Dother); } + /// for Linear Triangulation + Matrix34 cameraProjectionMatrix() const { + return K_.K() * PinholeBase::pose().inverse().matrix().block(0, 0, 3, 4); + } + + /// for Nonlinear Triangulation + Vector defaultErrorWhenTriangulatingBehindCamera() const { + return Eigen::Matrix::dimension,1>::Constant(2.0 * K_.fx());; + } + private: /** Serialization function */ diff --git a/gtsam/geometry/PinholePose.h b/gtsam/geometry/PinholePose.h index cc6435a58..b4999af7c 100644 --- a/gtsam/geometry/PinholePose.h +++ b/gtsam/geometry/PinholePose.h @@ -35,7 +35,7 @@ class GTSAM_EXPORT PinholeBaseK: public PinholeBase { private: - GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION); + GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION) // Get dimensions of calibration type at compile time static const int DimK = FixedDimension::value; @@ -121,6 +121,13 @@ public: return _project(pw, Dpose, Dpoint, Dcal); } + /// project a 3D point from world coordinates into the image + Point2 reprojectionError(const Point3& pw, const Point2& measured, OptionalJacobian<2, 6> Dpose = boost::none, + OptionalJacobian<2, 3> Dpoint = boost::none, + OptionalJacobian<2, DimK> Dcal = boost::none) const { + return Point2(_project(pw, Dpose, Dpoint, Dcal) - measured); + } + /// project a point at infinity from world coordinates into the image Point2 project(const Unit3& pw, OptionalJacobian<2, 6> Dpose = boost::none, OptionalJacobian<2, 2> Dpoint = boost::none, @@ -159,7 +166,6 @@ public: return result; } - /// backproject a 2-dimensional point to a 3-dimensional point at infinity Unit3 backprojectPointAtInfinity(const Point2& p) const { const Point2 pn = calibration().calibrate(p); @@ -410,6 +416,16 @@ public: return PinholePose(); // assumes that the default constructor is valid } + /// for Linear Triangulation + Matrix34 cameraProjectionMatrix() const { + Matrix34 P = Matrix34(PinholeBase::pose().inverse().matrix().block(0, 0, 3, 4)); + return K_->K() * P; + } + + /// for Nonlinear Triangulation + Vector defaultErrorWhenTriangulatingBehindCamera() const { + return Eigen::Matrix::dimension,1>::Constant(2.0 * K_->fx());; + } /// @} private: diff --git a/gtsam/geometry/Pose2.cpp b/gtsam/geometry/Pose2.cpp index 8dafffee8..a0b3d502e 100644 --- a/gtsam/geometry/Pose2.cpp +++ b/gtsam/geometry/Pose2.cpp @@ -28,7 +28,7 @@ using namespace std; namespace gtsam { /** instantiate concept checks */ -GTSAM_CONCEPT_POSE_INST(Pose2); +GTSAM_CONCEPT_POSE_INST(Pose2) static const Rot2 R_PI_2(Rot2::fromCosSin(0., 1.)); diff --git a/gtsam/geometry/Pose3.cpp b/gtsam/geometry/Pose3.cpp index c183e32ed..1e399ad18 100644 --- a/gtsam/geometry/Pose3.cpp +++ b/gtsam/geometry/Pose3.cpp @@ -30,7 +30,7 @@ using std::vector; using Point3Pairs = vector; /** instantiate concept checks */ -GTSAM_CONCEPT_POSE_INST(Pose3); +GTSAM_CONCEPT_POSE_INST(Pose3) /* ************************************************************************* */ Pose3::Pose3(const Pose2& pose2) : @@ -63,6 +63,46 @@ Matrix6 Pose3::AdjointMap() const { return adj; } +/* ************************************************************************* */ +// Calculate AdjointMap applied to xi_b, with Jacobians +Vector6 Pose3::Adjoint(const Vector6& xi_b, OptionalJacobian<6, 6> H_pose, + OptionalJacobian<6, 6> H_xib) const { + const Matrix6 Ad = AdjointMap(); + + // Jacobians + // D1 Ad_T(xi_b) = D1 Ad_T Ad_I(xi_b) = Ad_T * D1 Ad_I(xi_b) = Ad_T * ad_xi_b + // D2 Ad_T(xi_b) = Ad_T + // See docs/math.pdf for more details. + // In D1 calculation, we could be more efficient by writing it out, but do not + // for readability + if (H_pose) *H_pose = -Ad * adjointMap(xi_b); + if (H_xib) *H_xib = Ad; + + return Ad * xi_b; +} + +/* ************************************************************************* */ +/// The dual version of Adjoint +Vector6 Pose3::AdjointTranspose(const Vector6& x, OptionalJacobian<6, 6> H_pose, + OptionalJacobian<6, 6> H_x) const { + const Matrix6 Ad = AdjointMap(); + const Vector6 AdTx = Ad.transpose() * x; + + // Jacobians + // See docs/math.pdf for more details. + if (H_pose) { + const auto w_T_hat = skewSymmetric(AdTx.head<3>()), + v_T_hat = skewSymmetric(AdTx.tail<3>()); + *H_pose << w_T_hat, v_T_hat, // + /* */ v_T_hat, Z_3x3; + } + if (H_x) { + *H_x = Ad.transpose(); + } + + return AdTx; +} + /* ************************************************************************* */ Matrix6 Pose3::adjointMap(const Vector6& xi) { Matrix3 w_hat = skewSymmetric(xi(0), xi(1), xi(2)); @@ -75,7 +115,7 @@ Matrix6 Pose3::adjointMap(const Vector6& xi) { /* ************************************************************************* */ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y, - OptionalJacobian<6, 6> Hxi) { + OptionalJacobian<6, 6> Hxi, OptionalJacobian<6, 6> H_y) { if (Hxi) { Hxi->setZero(); for (int i = 0; i < 6; ++i) { @@ -86,12 +126,14 @@ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y, Hxi->col(i) = Gi * y; } } - return adjointMap(xi) * y; + const Matrix6& ad_xi = adjointMap(xi); + if (H_y) *H_y = ad_xi; + return ad_xi * y; } /* ************************************************************************* */ Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y, - OptionalJacobian<6, 6> Hxi) { + OptionalJacobian<6, 6> Hxi, OptionalJacobian<6, 6> H_y) { if (Hxi) { Hxi->setZero(); for (int i = 0; i < 6; ++i) { @@ -102,7 +144,9 @@ Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y, Hxi->col(i) = GTi * y; } } - return adjointMap(xi).transpose() * y; + const Matrix6& adT_xi = adjointMap(xi).transpose(); + if (H_y) *H_y = adT_xi; + return adT_xi * y; } /* ************************************************************************* */ diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index d76e1b41a..884d0760c 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -145,15 +145,22 @@ public: * Calculate Adjoint map, transforming a twist in this pose's (i.e, body) frame to the world spatial frame * Ad_pose is 6*6 matrix that when applied to twist xi \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$, returns Ad_pose(xi) */ - Matrix6 AdjointMap() const; /// FIXME Not tested - marked as incorrect + Matrix6 AdjointMap() const; /** - * Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a body-fixed velocity, transforming it to the spatial frame + * Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a + * body-fixed velocity, transforming it to the spatial frame * \f$ \xi^s = g*\xi^b*g^{-1} = Ad_g * \xi^b \f$ + * Note that H_xib = AdjointMap() */ - Vector6 Adjoint(const Vector6& xi_b) const { - return AdjointMap() * xi_b; - } /// FIXME Not tested - marked as incorrect + Vector6 Adjoint(const Vector6& xi_b, + OptionalJacobian<6, 6> H_this = boost::none, + OptionalJacobian<6, 6> H_xib = boost::none) const; + + /// The dual version of Adjoint + Vector6 AdjointTranspose(const Vector6& x, + OptionalJacobian<6, 6> H_this = boost::none, + OptionalJacobian<6, 6> H_x = boost::none) const; /** * Compute the [ad(w,v)] operator as defined in [Kobilarov09siggraph], pg 11 @@ -170,13 +177,14 @@ public: * and its inverse transpose in the discrete Euler Poincare' (DEP) operator. * */ - static Matrix6 adjointMap(const Vector6 &xi); + static Matrix6 adjointMap(const Vector6& xi); /** * Action of the adjointMap on a Lie-algebra vector y, with optional derivatives */ - static Vector6 adjoint(const Vector6 &xi, const Vector6 &y, - OptionalJacobian<6, 6> Hxi = boost::none); + static Vector6 adjoint(const Vector6& xi, const Vector6& y, + OptionalJacobian<6, 6> Hxi = boost::none, + OptionalJacobian<6, 6> H_y = boost::none); // temporary fix for wrappers until case issue is resolved static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);} @@ -186,7 +194,8 @@ public: * The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space. */ static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y, - OptionalJacobian<6, 6> Hxi = boost::none); + OptionalJacobian<6, 6> Hxi = boost::none, + OptionalJacobian<6, 6> H_y = boost::none); /// Derivative of Expmap static Matrix6 ExpmapDerivative(const Vector6& xi); diff --git a/gtsam/geometry/Quaternion.h b/gtsam/geometry/Quaternion.h index 1557a09db..2ef47d58e 100644 --- a/gtsam/geometry/Quaternion.h +++ b/gtsam/geometry/Quaternion.h @@ -117,13 +117,23 @@ struct traits { omega = (-8. / 3. - 2. / 3. * qw) * q.vec(); } else { // Normal, away from zero case - _Scalar angle = 2 * acos(qw), s = sqrt(1 - qw * qw); - // Important: convert to [-pi,pi] to keep error continuous - if (angle > M_PI) - angle -= twoPi; - else if (angle < -M_PI) - angle += twoPi; - omega = (angle / s) * q.vec(); + if (qw > 0) { + _Scalar angle = 2 * acos(qw), s = sqrt(1 - qw * qw); + // Important: convert to [-pi,pi] to keep error continuous + if (angle > M_PI) + angle -= twoPi; + else if (angle < -M_PI) + angle += twoPi; + omega = (angle / s) * q.vec(); + } else { + // Make sure that we are using a canonical quaternion with w > 0 + _Scalar angle = 2 * acos(-qw), s = sqrt(1 - qw * qw); + if (angle > M_PI) + angle -= twoPi; + else if (angle < -M_PI) + angle += twoPi; + omega = (angle / s) * -q.vec(); + } } if(H) *H = SO3::LogmapDerivative(omega.template cast()); diff --git a/gtsam/geometry/SO3.cpp b/gtsam/geometry/SO3.cpp index 80c0171ad..2585c37be 100644 --- a/gtsam/geometry/SO3.cpp +++ b/gtsam/geometry/SO3.cpp @@ -261,25 +261,59 @@ Vector3 SO3::Logmap(const SO3& Q, ChartJacobian H) { // when trace == -1, i.e., when theta = +-pi, +-3pi, +-5pi, etc. // we do something special - if (tr + 1.0 < 1e-10) { - if (std::abs(R33 + 1.0) > 1e-5) - omega = (M_PI / sqrt(2.0 + 2.0 * R33)) * Vector3(R13, R23, 1.0 + R33); - else if (std::abs(R22 + 1.0) > 1e-5) - omega = (M_PI / sqrt(2.0 + 2.0 * R22)) * Vector3(R12, 1.0 + R22, R32); - else - // if(std::abs(R.r1_.x()+1.0) > 1e-5) This is implicit - omega = (M_PI / sqrt(2.0 + 2.0 * R11)) * Vector3(1.0 + R11, R21, R31); + if (tr + 1.0 < 1e-3) { + if (R33 > R22 && R33 > R11) { + // R33 is the largest diagonal, a=3, b=1, c=2 + const double W = R21 - R12; + const double Q1 = 2.0 + 2.0 * R33; + const double Q2 = R31 + R13; + const double Q3 = R23 + R32; + const double r = sqrt(Q1); + const double one_over_r = 1 / r; + const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W); + const double sgn_w = W < 0 ? -1.0 : 1.0; + const double mag = M_PI - (2 * sgn_w * W) / norm; + const double scale = 0.5 * one_over_r * mag; + omega = sgn_w * scale * Vector3(Q2, Q3, Q1); + } else if (R22 > R11) { + // R22 is the largest diagonal, a=2, b=3, c=1 + const double W = R13 - R31; + const double Q1 = 2.0 + 2.0 * R22; + const double Q2 = R23 + R32; + const double Q3 = R12 + R21; + const double r = sqrt(Q1); + const double one_over_r = 1 / r; + const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W); + const double sgn_w = W < 0 ? -1.0 : 1.0; + const double mag = M_PI - (2 * sgn_w * W) / norm; + const double scale = 0.5 * one_over_r * mag; + omega = sgn_w * scale * Vector3(Q3, Q1, Q2); + } else { + // R11 is the largest diagonal, a=1, b=2, c=3 + const double W = R32 - R23; + const double Q1 = 2.0 + 2.0 * R11; + const double Q2 = R12 + R21; + const double Q3 = R31 + R13; + const double r = sqrt(Q1); + const double one_over_r = 1 / r; + const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W); + const double sgn_w = W < 0 ? -1.0 : 1.0; + const double mag = M_PI - (2 * sgn_w * W) / norm; + const double scale = 0.5 * one_over_r * mag; + omega = sgn_w * scale * Vector3(Q1, Q2, Q3); + } } else { double magnitude; - const double tr_3 = tr - 3.0; // always negative - if (tr_3 < -1e-7) { + const double tr_3 = tr - 3.0; // could be non-negative if the matrix is off orthogonal + if (tr_3 < -1e-6) { + // this is the normal case -1 < trace < 3 double theta = acos((tr - 1.0) / 2.0); magnitude = theta / (2.0 * sin(theta)); } else { // when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0) // use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2) // see https://github.com/borglab/gtsam/issues/746 for details - magnitude = 0.5 - tr_3 / 12.0; + magnitude = 0.5 - tr_3 / 12.0 + tr_3*tr_3/60.0; } omega = magnitude * Vector3(R32 - R23, R13 - R31, R21 - R12); } diff --git a/gtsam/geometry/Similarity3.cpp b/gtsam/geometry/Similarity3.cpp index fcaf0c874..ea7a6d049 100644 --- a/gtsam/geometry/Similarity3.cpp +++ b/gtsam/geometry/Similarity3.cpp @@ -40,8 +40,8 @@ static Point3Pairs subtractCentroids(const Point3Pairs &abPointPairs, } /// Form inner products x and y and calculate scale. -static const double calculateScale(const Point3Pairs &d_abPointPairs, - const Rot3 &aRb) { +static double calculateScale(const Point3Pairs &d_abPointPairs, + const Rot3 &aRb) { double x = 0, y = 0; Point3 da, db; for (const Point3Pair& d_abPair : d_abPointPairs) { diff --git a/gtsam/geometry/SphericalCamera.cpp b/gtsam/geometry/SphericalCamera.cpp new file mode 100644 index 000000000..58a29dc09 --- /dev/null +++ b/gtsam/geometry/SphericalCamera.cpp @@ -0,0 +1,109 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file SphericalCamera.h + * @brief Calibrated camera with spherical projection + * @date Aug 26, 2021 + * @author Luca Carlone + */ + +#include + +using namespace std; + +namespace gtsam { + +/* ************************************************************************* */ +bool SphericalCamera::equals(const SphericalCamera& camera, double tol) const { + return pose_.equals(camera.pose(), tol); +} + +/* ************************************************************************* */ +void SphericalCamera::print(const string& s) const { pose_.print(s + ".pose"); } + +/* ************************************************************************* */ +pair SphericalCamera::projectSafe(const Point3& pw) const { + const Point3 pc = pose().transformTo(pw); + Unit3 pu = Unit3::FromPoint3(pc); + return make_pair(pu, pc.norm() > 1e-8); +} + +/* ************************************************************************* */ +Unit3 SphericalCamera::project2(const Point3& pw, OptionalJacobian<2, 6> Dpose, + OptionalJacobian<2, 3> Dpoint) const { + Matrix36 Dtf_pose; + Matrix3 Dtf_point; // calculated by transformTo if needed + const Point3 pc = + pose().transformTo(pw, Dpose ? &Dtf_pose : 0, Dpoint ? &Dtf_point : 0); + + if (pc.norm() <= 1e-8) throw("point cannot be at the center of the camera"); + + Matrix23 Dunit; // calculated by FromPoint3 if needed + Unit3 pu = Unit3::FromPoint3(Point3(pc), Dpoint ? &Dunit : 0); + + if (Dpose) *Dpose = Dunit * Dtf_pose; // 2x3 * 3x6 = 2x6 + if (Dpoint) *Dpoint = Dunit * Dtf_point; // 2x3 * 3x3 = 2x3 + return pu; +} + +/* ************************************************************************* */ +Unit3 SphericalCamera::project2(const Unit3& pwu, OptionalJacobian<2, 6> Dpose, + OptionalJacobian<2, 2> Dpoint) const { + Matrix23 Dtf_rot; + Matrix2 Dtf_point; // calculated by transformTo if needed + const Unit3 pu = pose().rotation().unrotate(pwu, Dpose ? &Dtf_rot : 0, + Dpoint ? &Dtf_point : 0); + + if (Dpose) + *Dpose << Dtf_rot, Matrix::Zero(2, 3); // 2x6 (translation part is zero) + if (Dpoint) *Dpoint = Dtf_point; // 2x2 + return pu; +} + +/* ************************************************************************* */ +Point3 SphericalCamera::backproject(const Unit3& pu, const double depth) const { + return pose().transformFrom(depth * pu); +} + +/* ************************************************************************* */ +Unit3 SphericalCamera::backprojectPointAtInfinity(const Unit3& p) const { + return pose().rotation().rotate(p); +} + +/* ************************************************************************* */ +Unit3 SphericalCamera::project(const Point3& point, + OptionalJacobian<2, 6> Dcamera, + OptionalJacobian<2, 3> Dpoint) const { + return project2(point, Dcamera, Dpoint); +} + +/* ************************************************************************* */ +Vector2 SphericalCamera::reprojectionError( + const Point3& point, const Unit3& measured, OptionalJacobian<2, 6> Dpose, + OptionalJacobian<2, 3> Dpoint) const { + // project point + if (Dpose || Dpoint) { + Matrix26 H_project_pose; + Matrix23 H_project_point; + Matrix22 H_error; + Unit3 projected = project2(point, H_project_pose, H_project_point); + Vector2 error = measured.errorVector(projected, boost::none, H_error); + if (Dpose) *Dpose = H_error * H_project_pose; + if (Dpoint) *Dpoint = H_error * H_project_point; + return error; + } else { + return measured.errorVector(project2(point, Dpose, Dpoint)); + } +} + +/* ************************************************************************* */ +} // namespace gtsam diff --git a/gtsam/geometry/SphericalCamera.h b/gtsam/geometry/SphericalCamera.h new file mode 100644 index 000000000..59658f3ce --- /dev/null +++ b/gtsam/geometry/SphericalCamera.h @@ -0,0 +1,225 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file SphericalCamera.h + * @brief Calibrated camera with spherical projection + * @date Aug 26, 2021 + * @author Luca Carlone + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace gtsam { + +/** + * Empty calibration. Only needed to play well with other cameras + * (e.g., when templating functions wrt cameras), since other cameras + * have constuctors in the form ‘camera(pose,calibration)’ + * @addtogroup geometry + * \nosubgrouping + */ +class GTSAM_EXPORT EmptyCal { + public: + enum { dimension = 0 }; + EmptyCal() {} + virtual ~EmptyCal() = default; + using shared_ptr = boost::shared_ptr; + void print(const std::string& s) const { + std::cout << "empty calibration: " << s << std::endl; + } +}; + +/** + * A spherical camera class that has a Pose3 and measures bearing vectors. + * The camera has an ‘Empty’ calibration and the only 6 dof are the pose + * @addtogroup geometry + * \nosubgrouping + */ +class GTSAM_EXPORT SphericalCamera { + public: + enum { dimension = 6 }; + + typedef Unit3 Measurement; + typedef std::vector MeasurementVector; + typedef EmptyCal CalibrationType; + + private: + Pose3 pose_; ///< 3D pose of camera + + protected: + EmptyCal::shared_ptr emptyCal_; + + public: + /// @} + /// @name Standard Constructors + /// @{ + + /// Default constructor + SphericalCamera() + : pose_(Pose3::identity()), emptyCal_(boost::make_shared()) {} + + /// Constructor with pose + explicit SphericalCamera(const Pose3& pose) + : pose_(pose), emptyCal_(boost::make_shared()) {} + + /// Constructor with empty intrinsics (needed for smart factors) + explicit SphericalCamera(const Pose3& pose, + const boost::shared_ptr& cal) + : pose_(pose), emptyCal_(boost::make_shared()) {} + + /// @} + /// @name Advanced Constructors + /// @{ + explicit SphericalCamera(const Vector& v) : pose_(Pose3::Expmap(v)) {} + + /// Default destructor + virtual ~SphericalCamera() = default; + + /// return shared pointer to calibration + const boost::shared_ptr& sharedCalibration() const { + return emptyCal_; + } + + /// return calibration + const EmptyCal& calibration() const { return *emptyCal_; } + + /// @} + /// @name Testable + /// @{ + + /// assert equality up to a tolerance + bool equals(const SphericalCamera& camera, double tol = 1e-9) const; + + /// print + virtual void print(const std::string& s = "SphericalCamera") const; + + /// @} + /// @name Standard Interface + /// @{ + + /// return pose, constant version + const Pose3& pose() const { return pose_; } + + /// get rotation + const Rot3& rotation() const { return pose_.rotation(); } + + /// get translation + const Point3& translation() const { return pose_.translation(); } + + // /// return pose, with derivative + // const Pose3& getPose(OptionalJacobian<6, 6> H) const; + + /// @} + /// @name Transformations and measurement functions + /// @{ + + /// Project a point into the image and check depth + std::pair projectSafe(const Point3& pw) const; + + /** Project point into the image + * (note: there is no CheiralityException for a spherical camera) + * @param point 3D point in world coordinates + * @return the intrinsic coordinates of the projected point + */ + Unit3 project2(const Point3& pw, OptionalJacobian<2, 6> Dpose = boost::none, + OptionalJacobian<2, 3> Dpoint = boost::none) const; + + /** Project point into the image + * (note: there is no CheiralityException for a spherical camera) + * @param point 3D direction in world coordinates + * @return the intrinsic coordinates of the projected point + */ + Unit3 project2(const Unit3& pwu, OptionalJacobian<2, 6> Dpose = boost::none, + OptionalJacobian<2, 2> Dpoint = boost::none) const; + + /// backproject a 2-dimensional point to a 3-dimensional point at given depth + Point3 backproject(const Unit3& p, const double depth) const; + + /// backproject point at infinity + Unit3 backprojectPointAtInfinity(const Unit3& p) const; + + /** Project point into the image + * (note: there is no CheiralityException for a spherical camera) + * @param point 3D point in world coordinates + * @return the intrinsic coordinates of the projected point + */ + Unit3 project(const Point3& point, OptionalJacobian<2, 6> Dpose = boost::none, + OptionalJacobian<2, 3> Dpoint = boost::none) const; + + /** Compute reprojection error for a given 3D point in world coordinates + * @param point 3D point in world coordinates + * @return the tangent space error between the projection and the measurement + */ + Vector2 reprojectionError(const Point3& point, const Unit3& measured, + OptionalJacobian<2, 6> Dpose = boost::none, + OptionalJacobian<2, 3> Dpoint = boost::none) const; + /// @} + + /// move a cameras according to d + SphericalCamera retract(const Vector6& d) const { + return SphericalCamera(pose().retract(d)); + } + + /// return canonical coordinate + Vector6 localCoordinates(const SphericalCamera& p) const { + return pose().localCoordinates(p.pose()); + } + + /// for Canonical + static SphericalCamera identity() { + return SphericalCamera( + Pose3::identity()); // assumes that the default constructor is valid + } + + /// for Linear Triangulation + Matrix34 cameraProjectionMatrix() const { + return Matrix34(pose_.inverse().matrix().block(0, 0, 3, 4)); + } + + /// for Nonlinear Triangulation + Vector defaultErrorWhenTriangulatingBehindCamera() const { + return Eigen::Matrix::dimension, 1>::Constant(0.0); + } + + /// @deprecated + size_t dim() const { return 6; } + + /// @deprecated + static size_t Dim() { return 6; } + + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& BOOST_SERIALIZATION_NVP(pose_); + } +}; +// end of class SphericalCamera + +template <> +struct traits : public internal::LieGroup {}; + +template <> +struct traits : public internal::LieGroup {}; + +} // namespace gtsam diff --git a/gtsam/geometry/StereoCamera.h b/gtsam/geometry/StereoCamera.h index 3b5bdaefc..c53fc11c9 100644 --- a/gtsam/geometry/StereoCamera.h +++ b/gtsam/geometry/StereoCamera.h @@ -170,6 +170,11 @@ public: OptionalJacobian<3, 3> H2 = boost::none, OptionalJacobian<3, 0> H3 = boost::none) const; + /// for Nonlinear Triangulation + Vector defaultErrorWhenTriangulatingBehindCamera() const { + return Eigen::Matrix::dimension,1>::Constant(2.0 * K_->fx());; + } + /// @} private: diff --git a/gtsam/geometry/concepts.h b/gtsam/geometry/concepts.h index 207b48f56..bafb62418 100644 --- a/gtsam/geometry/concepts.h +++ b/gtsam/geometry/concepts.h @@ -72,5 +72,5 @@ private: /** Pose Concept macros */ #define GTSAM_CONCEPT_POSE_INST(T) template class gtsam::PoseConcept; -#define GTSAM_CONCEPT_POSE_TYPE(T) typedef gtsam::PoseConcept _gtsam_PoseConcept##T; +#define GTSAM_CONCEPT_POSE_TYPE(T) using _gtsam_PoseConcept##T = gtsam::PoseConcept; diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index 9baa49e8e..a40951d3e 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -473,6 +473,9 @@ class Pose3 { Vector logmap(const gtsam::Pose3& pose); Matrix AdjointMap() const; Vector Adjoint(Vector xi) const; + Vector AdjointTranspose(Vector xi) const; + static Matrix adjointMap(Vector xi); + static Vector adjoint(Vector xi, Vector y); static Matrix adjointMap_(Vector xi); static Vector adjoint_(Vector xi, Vector y); static Vector adjointTranspose(Vector xi, Vector y); diff --git a/gtsam/geometry/tests/testPose3.cpp b/gtsam/geometry/tests/testPose3.cpp index 7c1fa81e6..e1d3d5ea2 100644 --- a/gtsam/geometry/tests/testPose3.cpp +++ b/gtsam/geometry/tests/testPose3.cpp @@ -145,6 +145,81 @@ TEST(Pose3, Adjoint_full) EXPECT(assert_equal(expected3, Pose3::Expmap(xiprime3), 1e-6)); } +/* ************************************************************************* */ +// Check Adjoint numerical derivatives +TEST(Pose3, Adjoint_jacobians) +{ + Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished(); + + // Check evaluation sanity check + EQUALITY(static_cast(T.AdjointMap() * xi), T.Adjoint(xi)); + EQUALITY(static_cast(T2.AdjointMap() * xi), T2.Adjoint(xi)); + EQUALITY(static_cast(T3.AdjointMap() * xi), T3.Adjoint(xi)); + + // Check jacobians + Matrix6 actualH1, actualH2, expectedH1, expectedH2; + std::function Adjoint_proxy = + [&](const Pose3& T, const Vector6& xi) { return T.Adjoint(xi); }; + + T.Adjoint(xi, actualH1, actualH2); + expectedH1 = numericalDerivative21(Adjoint_proxy, T, xi); + expectedH2 = numericalDerivative22(Adjoint_proxy, T, xi); + EXPECT(assert_equal(expectedH1, actualH1)); + EXPECT(assert_equal(expectedH2, actualH2)); + + T2.Adjoint(xi, actualH1, actualH2); + expectedH1 = numericalDerivative21(Adjoint_proxy, T2, xi); + expectedH2 = numericalDerivative22(Adjoint_proxy, T2, xi); + EXPECT(assert_equal(expectedH1, actualH1)); + EXPECT(assert_equal(expectedH2, actualH2)); + + T3.Adjoint(xi, actualH1, actualH2); + expectedH1 = numericalDerivative21(Adjoint_proxy, T3, xi); + expectedH2 = numericalDerivative22(Adjoint_proxy, T3, xi); + EXPECT(assert_equal(expectedH1, actualH1)); + EXPECT(assert_equal(expectedH2, actualH2)); +} + +/* ************************************************************************* */ +// Check AdjointTranspose and jacobians +TEST(Pose3, AdjointTranspose) +{ + Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished(); + + // Check evaluation + EQUALITY(static_cast(T.AdjointMap().transpose() * xi), + T.AdjointTranspose(xi)); + EQUALITY(static_cast(T2.AdjointMap().transpose() * xi), + T2.AdjointTranspose(xi)); + EQUALITY(static_cast(T3.AdjointMap().transpose() * xi), + T3.AdjointTranspose(xi)); + + // Check jacobians + Matrix6 actualH1, actualH2, expectedH1, expectedH2; + std::function AdjointTranspose_proxy = + [&](const Pose3& T, const Vector6& xi) { + return T.AdjointTranspose(xi); + }; + + T.AdjointTranspose(xi, actualH1, actualH2); + expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T, xi); + expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T, xi); + EXPECT(assert_equal(expectedH1, actualH1, 1e-8)); + EXPECT(assert_equal(expectedH2, actualH2)); + + T2.AdjointTranspose(xi, actualH1, actualH2); + expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T2, xi); + expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T2, xi); + EXPECT(assert_equal(expectedH1, actualH1, 1e-8)); + EXPECT(assert_equal(expectedH2, actualH2)); + + T3.AdjointTranspose(xi, actualH1, actualH2); + expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T3, xi); + expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T3, xi); + EXPECT(assert_equal(expectedH1, actualH1, 1e-8)); + EXPECT(assert_equal(expectedH2, actualH2)); +} + /* ************************************************************************* */ // assert that T*wedge(xi)*T^-1 is equal to wedge(Ad_T(xi)) TEST(Pose3, Adjoint_hat) @@ -837,16 +912,20 @@ Vector6 testDerivAdjoint(const Vector6& xi, const Vector6& v) { } TEST( Pose3, adjoint) { - Vector expected = testDerivAdjoint(screwPose3::xi, screwPose3::xi); + Vector6 v = (Vector6() << 1, 2, 3, 4, 5, 6).finished(); + Vector expected = testDerivAdjoint(screwPose3::xi, v); - Matrix actualH; - Vector actual = Pose3::adjoint(screwPose3::xi, screwPose3::xi, actualH); + Matrix actualH1, actualH2; + Vector actual = Pose3::adjoint(screwPose3::xi, v, actualH1, actualH2); - Matrix numericalH = numericalDerivative21( - testDerivAdjoint, screwPose3::xi, screwPose3::xi, 1e-5); + Matrix numericalH1 = numericalDerivative21( + testDerivAdjoint, screwPose3::xi, v, 1e-5); + Matrix numericalH2 = numericalDerivative22( + testDerivAdjoint, screwPose3::xi, v, 1e-5); EXPECT(assert_equal(expected,actual,1e-5)); - EXPECT(assert_equal(numericalH,actualH,1e-5)); + EXPECT(assert_equal(numericalH1,actualH1,1e-5)); + EXPECT(assert_equal(numericalH2,actualH2,1e-5)); } /* ************************************************************************* */ @@ -859,14 +938,17 @@ TEST( Pose3, adjointTranspose) { Vector v = (Vector(6) << 0.04, 0.05, 0.06, 4.0, 5.0, 6.0).finished(); Vector expected = testDerivAdjointTranspose(xi, v); - Matrix actualH; - Vector actual = Pose3::adjointTranspose(xi, v, actualH); + Matrix actualH1, actualH2; + Vector actual = Pose3::adjointTranspose(xi, v, actualH1, actualH2); - Matrix numericalH = numericalDerivative21( + Matrix numericalH1 = numericalDerivative21( + testDerivAdjointTranspose, xi, v, 1e-5); + Matrix numericalH2 = numericalDerivative22( testDerivAdjointTranspose, xi, v, 1e-5); EXPECT(assert_equal(expected,actual,1e-15)); - EXPECT(assert_equal(numericalH,actualH,1e-5)); + EXPECT(assert_equal(numericalH1,actualH1,1e-5)); + EXPECT(assert_equal(numericalH2,actualH2,1e-5)); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testRot3.cpp b/gtsam/geometry/tests/testRot3.cpp index 34f90c8cc..dc4b888b3 100644 --- a/gtsam/geometry/tests/testRot3.cpp +++ b/gtsam/geometry/tests/testRot3.cpp @@ -122,6 +122,21 @@ TEST( Rot3, AxisAngle) CHECK(assert_equal(expected,actual3,1e-5)); } +/* ************************************************************************* */ +TEST( Rot3, AxisAngle2) +{ + // constructor from a rotation matrix, as doubles in *row-major* order. + Rot3 R1(-0.999957, 0.00922903, 0.00203116, 0.00926964, 0.999739, 0.0208927, -0.0018374, 0.0209105, -0.999781); + + Unit3 actualAxis; + double actualAngle; + // convert Rot3 to quaternion using GTSAM + std::tie(actualAxis, actualAngle) = R1.axisAngle(); + + double expectedAngle = 3.1396582; + CHECK(assert_equal(expectedAngle, actualAngle, 1e-5)); +} + /* ************************************************************************* */ TEST( Rot3, Rodrigues) { @@ -181,13 +196,13 @@ TEST( Rot3, retract) } /* ************************************************************************* */ -TEST(Rot3, log) { +TEST( Rot3, log) { static const double PI = boost::math::constants::pi(); Vector w; Rot3 R; #define CHECK_OMEGA(X, Y, Z) \ - w = (Vector(3) << X, Y, Z).finished(); \ + w = (Vector(3) << (X), (Y), (Z)).finished(); \ R = Rot3::Rodrigues(w); \ EXPECT(assert_equal(w, Rot3::Logmap(R), 1e-12)); @@ -219,17 +234,17 @@ TEST(Rot3, log) { CHECK_OMEGA(0, 0, PI) // Windows and Linux have flipped sign in quaternion mode -#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS) +//#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS) w = (Vector(3) << x * PI, y * PI, z * PI).finished(); R = Rot3::Rodrigues(w); EXPECT(assert_equal(Vector(-w), Rot3::Logmap(R), 1e-12)); -#else - CHECK_OMEGA(x * PI, y * PI, z * PI) -#endif +//#else +// CHECK_OMEGA(x * PI, y * PI, z * PI) +//#endif // Check 360 degree rotations #define CHECK_OMEGA_ZERO(X, Y, Z) \ - w = (Vector(3) << X, Y, Z).finished(); \ + w = (Vector(3) << (X), (Y), (Z)).finished(); \ R = Rot3::Rodrigues(w); \ EXPECT(assert_equal((Vector)Z_3x1, Rot3::Logmap(R))); @@ -247,15 +262,15 @@ TEST(Rot3, log) { // Rot3's Logmap returns different, but equivalent compacted // axis-angle vectors depending on whether Rot3 is implemented // by Quaternions or SO3. - #if defined(GTSAM_USE_QUATERNIONS) - // Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees - EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211), +#if defined(GTSAM_USE_QUATERNIONS) + // Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees + EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211), + (Vector)Rot3::Logmap(Rlund), 1e-8)); +#else + // SO3 will be approximate because of the non-orthogonality + EXPECT(assert_equal(Vector3(0.264452, -0.742197708, -3.04098184), (Vector)Rot3::Logmap(Rlund), 1e-8)); - #else - // SO3 does not bound angle resulting in ~180.1 degrees - EXPECT(assert_equal(Vector3(-0.264544406, 0.742217405, 3.04117314), - (Vector)Rot3::Logmap(Rlund), 1e-8)); - #endif +#endif } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testSphericalCamera.cpp b/gtsam/geometry/tests/testSphericalCamera.cpp new file mode 100644 index 000000000..4bc851f35 --- /dev/null +++ b/gtsam/geometry/tests/testSphericalCamera.cpp @@ -0,0 +1,163 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file SphericalCamera.h + * @brief Calibrated camera with spherical projection + * @date Aug 26, 2021 + * @author Luca Carlone + */ + +#include +#include +#include +#include + +#include +#include + +using namespace std::placeholders; +using namespace std; +using namespace gtsam; + +typedef SphericalCamera Camera; + +// static const Cal3_S2 K(625, 625, 0, 0, 0); +// +static const Pose3 pose(Rot3(Vector3(1, -1, -1).asDiagonal()), + Point3(0, 0, 0.5)); +static const Camera camera(pose); +// +static const Pose3 pose1(Rot3(), Point3(0, 1, 0.5)); +static const Camera camera1(pose1); + +static const Point3 point1(-0.08, -0.08, 0.0); +static const Point3 point2(-0.08, 0.08, 0.0); +static const Point3 point3(0.08, 0.08, 0.0); +static const Point3 point4(0.08, -0.08, 0.0); + +// manually computed in matlab +static const Unit3 bearing1(-0.156054862928174, 0.156054862928174, + 0.975342893301088); +static const Unit3 bearing2(-0.156054862928174, -0.156054862928174, + 0.975342893301088); +static const Unit3 bearing3(0.156054862928174, -0.156054862928174, + 0.975342893301088); +static const Unit3 bearing4(0.156054862928174, 0.156054862928174, + 0.975342893301088); + +static double depth = 0.512640224719052; +/* ************************************************************************* */ +TEST(SphericalCamera, constructor) { + EXPECT(assert_equal(pose, camera.pose())); +} + +/* ************************************************************************* */ +TEST(SphericalCamera, project) { + // expected from manual calculation in Matlab + EXPECT(assert_equal(camera.project(point1), bearing1)); + EXPECT(assert_equal(camera.project(point2), bearing2)); + EXPECT(assert_equal(camera.project(point3), bearing3)); + EXPECT(assert_equal(camera.project(point4), bearing4)); +} + +/* ************************************************************************* */ +TEST(SphericalCamera, backproject) { + EXPECT(assert_equal(camera.backproject(bearing1, depth), point1)); + EXPECT(assert_equal(camera.backproject(bearing2, depth), point2)); + EXPECT(assert_equal(camera.backproject(bearing3, depth), point3)); + EXPECT(assert_equal(camera.backproject(bearing4, depth), point4)); +} + +/* ************************************************************************* */ +TEST(SphericalCamera, backproject2) { + Point3 origin(0, 0, 0); + Rot3 rot(1., 0., 0., 0., 0., 1., 0., -1., 0.); // a camera1 looking down + Camera camera(Pose3(rot, origin)); + + Point3 actual = camera.backproject(Unit3(0, 0, 1), 1.); + Point3 expected(0., 1., 0.); + pair x = camera.projectSafe(expected); + + EXPECT(assert_equal(expected, actual)); + EXPECT(assert_equal(Unit3(0, 0, 1), x.first)); + EXPECT(x.second); +} + +/* ************************************************************************* */ +static Unit3 project3(const Pose3& pose, const Point3& point) { + return Camera(pose).project(point); +} + +/* ************************************************************************* */ +TEST(SphericalCamera, Dproject) { + Matrix Dpose, Dpoint; + Unit3 result = camera.project(point1, Dpose, Dpoint); + Matrix numerical_pose = numericalDerivative21(project3, pose, point1); + Matrix numerical_point = numericalDerivative22(project3, pose, point1); + EXPECT(assert_equal(bearing1, result)); + EXPECT(assert_equal(numerical_pose, Dpose, 1e-7)); + EXPECT(assert_equal(numerical_point, Dpoint, 1e-7)); +} + +/* ************************************************************************* */ +static Vector2 reprojectionError2(const Pose3& pose, const Point3& point, + const Unit3& measured) { + return Camera(pose).reprojectionError(point, measured); +} + +/* ************************************************************************* */ +TEST(SphericalCamera, reprojectionError) { + Matrix Dpose, Dpoint; + Vector2 result = camera.reprojectionError(point1, bearing1, Dpose, Dpoint); + Matrix numerical_pose = + numericalDerivative31(reprojectionError2, pose, point1, bearing1); + Matrix numerical_point = + numericalDerivative32(reprojectionError2, pose, point1, bearing1); + EXPECT(assert_equal(Vector2(0.0, 0.0), result)); + EXPECT(assert_equal(numerical_pose, Dpose, 1e-7)); + EXPECT(assert_equal(numerical_point, Dpoint, 1e-7)); +} + +/* ************************************************************************* */ +TEST(SphericalCamera, reprojectionError_noisy) { + Matrix Dpose, Dpoint; + Unit3 bearing_noisy = bearing1.retract(Vector2(0.01, 0.05)); + Vector2 result = + camera.reprojectionError(point1, bearing_noisy, Dpose, Dpoint); + Matrix numerical_pose = + numericalDerivative31(reprojectionError2, pose, point1, bearing_noisy); + Matrix numerical_point = + numericalDerivative32(reprojectionError2, pose, point1, bearing_noisy); + EXPECT(assert_equal(Vector2(-0.050282, 0.00833482), result, 1e-5)); + EXPECT(assert_equal(numerical_pose, Dpose, 1e-7)); + EXPECT(assert_equal(numerical_point, Dpoint, 1e-7)); +} + +/* ************************************************************************* */ +// Add a test with more arbitrary rotation +TEST(SphericalCamera, Dproject2) { + static const Pose3 pose1(Rot3::Ypr(0.1, -0.1, 0.4), Point3(0, 0, -10)); + static const Camera camera(pose1); + Matrix Dpose, Dpoint; + camera.project2(point1, Dpose, Dpoint); + Matrix numerical_pose = numericalDerivative21(project3, pose1, point1); + Matrix numerical_point = numericalDerivative22(project3, pose1, point1); + CHECK(assert_equal(numerical_pose, Dpose, 1e-7)); + CHECK(assert_equal(numerical_point, Dpoint, 1e-7)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testTriangulation.cpp b/gtsam/geometry/tests/testTriangulation.cpp index 4f71a48da..5fdb911d0 100644 --- a/gtsam/geometry/tests/testTriangulation.cpp +++ b/gtsam/geometry/tests/testTriangulation.cpp @@ -10,22 +10,23 @@ * -------------------------------------------------------------------------- */ /** - * testTriangulation.cpp - * - * Created on: July 30th, 2013 - * Author: cbeall3 + * @file testTriangulation.cpp + * @brief triangulation utilities + * @date July 30th, 2013 + * @author Chris Beall (cbeall3) + * @author Luca Carlone */ -#include -#include -#include -#include -#include -#include -#include -#include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include @@ -36,7 +37,7 @@ using namespace boost::assign; // Some common constants -static const boost::shared_ptr sharedCal = // +static const boost::shared_ptr sharedCal = // boost::make_shared(1500, 1200, 0, 640, 480); // Looking along X-axis, 1 meter above ground plane (x-y) @@ -57,8 +58,7 @@ Point2 z2 = camera2.project(landmark); //****************************************************************************** // Simple test with a well-behaved two camera situation -TEST( triangulation, twoPoses) { - +TEST(triangulation, twoPoses) { vector poses; Point2Vector measurements; @@ -69,36 +69,36 @@ TEST( triangulation, twoPoses) { // 1. Test simple DLT, perfect in no noise situation bool optimize = false; - boost::optional actual1 = // - triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); + boost::optional actual1 = // + triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(landmark, *actual1, 1e-7)); // 2. test with optimization on, same answer optimize = true; - boost::optional actual2 = // - triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); + boost::optional actual2 = // + triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(landmark, *actual2, 1e-7)); - // 3. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) + // 3. Add some noise and try again: result should be ~ (4.995, + // 0.499167, 1.19814) measurements.at(0) += Point2(0.1, 0.5); measurements.at(1) += Point2(-0.2, 0.3); optimize = false; - boost::optional actual3 = // - triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); + boost::optional actual3 = // + triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-4)); // 4. Now with optimization on optimize = true; - boost::optional actual4 = // - triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); + boost::optional actual4 = // + triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4)); } //****************************************************************************** // Similar, but now with Bundler calibration -TEST( triangulation, twoPosesBundler) { - - boost::shared_ptr bundlerCal = // +TEST(triangulation, twoPosesBundler) { + boost::shared_ptr bundlerCal = // boost::make_shared(1500, 0, 0, 640, 480); PinholeCamera camera1(pose1, *bundlerCal); PinholeCamera camera2(pose2, *bundlerCal); @@ -116,37 +116,38 @@ TEST( triangulation, twoPosesBundler) { bool optimize = true; double rank_tol = 1e-9; - boost::optional actual = // - triangulatePoint3(poses, bundlerCal, measurements, rank_tol, optimize); + boost::optional actual = // + triangulatePoint3(poses, bundlerCal, measurements, rank_tol, optimize); EXPECT(assert_equal(landmark, *actual, 1e-7)); // Add some noise and try again measurements.at(0) += Point2(0.1, 0.5); measurements.at(1) += Point2(-0.2, 0.3); - boost::optional actual2 = // - triangulatePoint3(poses, bundlerCal, measurements, rank_tol, optimize); + boost::optional actual2 = // + triangulatePoint3(poses, bundlerCal, measurements, rank_tol, optimize); EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19847), *actual2, 1e-4)); } //****************************************************************************** -TEST( triangulation, fourPoses) { +TEST(triangulation, fourPoses) { vector poses; Point2Vector measurements; poses += pose1, pose2; measurements += z1, z2; - boost::optional actual = triangulatePoint3(poses, sharedCal, - measurements); + boost::optional actual = + triangulatePoint3(poses, sharedCal, measurements); EXPECT(assert_equal(landmark, *actual, 1e-2)); - // 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) + // 2. Add some noise and try again: result should be ~ (4.995, + // 0.499167, 1.19814) measurements.at(0) += Point2(0.1, 0.5); measurements.at(1) += Point2(-0.2, 0.3); - boost::optional actual2 = // - triangulatePoint3(poses, sharedCal, measurements); + boost::optional actual2 = // + triangulatePoint3(poses, sharedCal, measurements); EXPECT(assert_equal(landmark, *actual2, 1e-2)); // 3. Add a slightly rotated third camera above, again with measurement noise @@ -157,13 +158,13 @@ TEST( triangulation, fourPoses) { poses += pose3; measurements += z3 + Point2(0.1, -0.1); - boost::optional triangulated_3cameras = // - triangulatePoint3(poses, sharedCal, measurements); + boost::optional triangulated_3cameras = // + triangulatePoint3(poses, sharedCal, measurements); EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2)); // Again with nonlinear optimization - boost::optional triangulated_3cameras_opt = triangulatePoint3(poses, - sharedCal, measurements, 1e-9, true); + boost::optional triangulated_3cameras_opt = + triangulatePoint3(poses, sharedCal, measurements, 1e-9, true); EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2)); // 4. Test failure: Add a 4th camera facing the wrong way @@ -176,13 +177,13 @@ TEST( triangulation, fourPoses) { poses += pose4; measurements += Point2(400, 400); - CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), - TriangulationCheiralityException); + CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), + TriangulationCheiralityException); #endif } //****************************************************************************** -TEST( triangulation, fourPoses_distinct_Ks) { +TEST(triangulation, fourPoses_distinct_Ks) { Cal3_S2 K1(1500, 1200, 0, 640, 480); // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) PinholeCamera camera1(pose1, K1); @@ -195,22 +196,23 @@ TEST( triangulation, fourPoses_distinct_Ks) { Point2 z1 = camera1.project(landmark); Point2 z2 = camera2.project(landmark); - CameraSet > cameras; + CameraSet> cameras; Point2Vector measurements; cameras += camera1, camera2; measurements += z1, z2; - boost::optional actual = // - triangulatePoint3(cameras, measurements); + boost::optional actual = // + triangulatePoint3(cameras, measurements); EXPECT(assert_equal(landmark, *actual, 1e-2)); - // 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) + // 2. Add some noise and try again: result should be ~ (4.995, + // 0.499167, 1.19814) measurements.at(0) += Point2(0.1, 0.5); measurements.at(1) += Point2(-0.2, 0.3); - boost::optional actual2 = // - triangulatePoint3(cameras, measurements); + boost::optional actual2 = // + triangulatePoint3(cameras, measurements); EXPECT(assert_equal(landmark, *actual2, 1e-2)); // 3. Add a slightly rotated third camera above, again with measurement noise @@ -222,13 +224,13 @@ TEST( triangulation, fourPoses_distinct_Ks) { cameras += camera3; measurements += z3 + Point2(0.1, -0.1); - boost::optional triangulated_3cameras = // - triangulatePoint3(cameras, measurements); + boost::optional triangulated_3cameras = // + triangulatePoint3(cameras, measurements); EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2)); // Again with nonlinear optimization - boost::optional triangulated_3cameras_opt = triangulatePoint3(cameras, - measurements, 1e-9, true); + boost::optional triangulated_3cameras_opt = + triangulatePoint3(cameras, measurements, 1e-9, true); EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2)); // 4. Test failure: Add a 4th camera facing the wrong way @@ -241,13 +243,13 @@ TEST( triangulation, fourPoses_distinct_Ks) { cameras += camera4; measurements += Point2(400, 400); - CHECK_EXCEPTION(triangulatePoint3(cameras, measurements), - TriangulationCheiralityException); + CHECK_EXCEPTION(triangulatePoint3(cameras, measurements), + TriangulationCheiralityException); #endif } //****************************************************************************** -TEST( triangulation, outliersAndFarLandmarks) { +TEST(triangulation, outliersAndFarLandmarks) { Cal3_S2 K1(1500, 1200, 0, 640, 480); // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) PinholeCamera camera1(pose1, K1); @@ -260,24 +262,29 @@ TEST( triangulation, outliersAndFarLandmarks) { Point2 z1 = camera1.project(landmark); Point2 z2 = camera2.project(landmark); - CameraSet > cameras; + CameraSet> cameras; Point2Vector measurements; cameras += camera1, camera2; measurements += z1, z2; - double landmarkDistanceThreshold = 10; // landmark is closer than that - TriangulationParameters params(1.0, false, landmarkDistanceThreshold); // all default except landmarkDistanceThreshold - TriangulationResult actual = triangulateSafe(cameras,measurements,params); + double landmarkDistanceThreshold = 10; // landmark is closer than that + TriangulationParameters params( + 1.0, false, landmarkDistanceThreshold); // all default except + // landmarkDistanceThreshold + TriangulationResult actual = triangulateSafe(cameras, measurements, params); EXPECT(assert_equal(landmark, *actual, 1e-2)); EXPECT(actual.valid()); - landmarkDistanceThreshold = 4; // landmark is farther than that - TriangulationParameters params2(1.0, false, landmarkDistanceThreshold); // all default except landmarkDistanceThreshold - actual = triangulateSafe(cameras,measurements,params2); + landmarkDistanceThreshold = 4; // landmark is farther than that + TriangulationParameters params2( + 1.0, false, landmarkDistanceThreshold); // all default except + // landmarkDistanceThreshold + actual = triangulateSafe(cameras, measurements, params2); EXPECT(actual.farPoint()); - // 3. Add a slightly rotated third camera above with a wrong measurement (OUTLIER) + // 3. Add a slightly rotated third camera above with a wrong measurement + // (OUTLIER) Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1)); Cal3_S2 K3(700, 500, 0, 640, 480); PinholeCamera camera3(pose3, K3); @@ -286,21 +293,23 @@ TEST( triangulation, outliersAndFarLandmarks) { cameras += camera3; measurements += z3 + Point2(10, -10); - landmarkDistanceThreshold = 10; // landmark is closer than that - double outlierThreshold = 100; // loose, the outlier is going to pass - TriangulationParameters params3(1.0, false, landmarkDistanceThreshold,outlierThreshold); - actual = triangulateSafe(cameras,measurements,params3); + landmarkDistanceThreshold = 10; // landmark is closer than that + double outlierThreshold = 100; // loose, the outlier is going to pass + TriangulationParameters params3(1.0, false, landmarkDistanceThreshold, + outlierThreshold); + actual = triangulateSafe(cameras, measurements, params3); EXPECT(actual.valid()); // now set stricter threshold for outlier rejection - outlierThreshold = 5; // tighter, the outlier is not going to pass - TriangulationParameters params4(1.0, false, landmarkDistanceThreshold,outlierThreshold); - actual = triangulateSafe(cameras,measurements,params4); + outlierThreshold = 5; // tighter, the outlier is not going to pass + TriangulationParameters params4(1.0, false, landmarkDistanceThreshold, + outlierThreshold); + actual = triangulateSafe(cameras, measurements, params4); EXPECT(actual.outlier()); } //****************************************************************************** -TEST( triangulation, twoIdenticalPoses) { +TEST(triangulation, twoIdenticalPoses) { // create first camera. Looking along X-axis, 1 meter above ground plane (x-y) PinholeCamera camera1(pose1, *sharedCal); @@ -313,12 +322,12 @@ TEST( triangulation, twoIdenticalPoses) { poses += pose1, pose1; measurements += z1, z1; - CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), - TriangulationUnderconstrainedException); + CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), + TriangulationUnderconstrainedException); } //****************************************************************************** -TEST( triangulation, onePose) { +TEST(triangulation, onePose) { // we expect this test to fail with a TriangulationUnderconstrainedException // because there's only one camera observation @@ -326,28 +335,26 @@ TEST( triangulation, onePose) { Point2Vector measurements; poses += Pose3(); - measurements += Point2(0,0); + measurements += Point2(0, 0); - CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), - TriangulationUnderconstrainedException); + CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements), + TriangulationUnderconstrainedException); } //****************************************************************************** -TEST( triangulation, StereotriangulateNonlinear ) { - - auto stereoK = boost::make_shared(1733.75, 1733.75, 0, 689.645, 508.835, 0.0699612); +TEST(triangulation, StereotriangulateNonlinear) { + auto stereoK = boost::make_shared(1733.75, 1733.75, 0, 689.645, + 508.835, 0.0699612); // two camera poses m1, m2 Matrix4 m1, m2; - m1 << 0.796888717, 0.603404026, -0.0295271487, 46.6673779, - 0.592783835, -0.77156583, 0.230856632, 66.2186159, - 0.116517574, -0.201470143, -0.9725393, -4.28382528, - 0, 0, 0, 1; + m1 << 0.796888717, 0.603404026, -0.0295271487, 46.6673779, 0.592783835, + -0.77156583, 0.230856632, 66.2186159, 0.116517574, -0.201470143, + -0.9725393, -4.28382528, 0, 0, 0, 1; - m2 << -0.955959025, -0.29288915, -0.0189328569, 45.7169799, - -0.29277519, 0.947083213, 0.131587097, 65.843136, - -0.0206094928, 0.131334858, -0.991123524, -4.3525033, - 0, 0, 0, 1; + m2 << -0.955959025, -0.29288915, -0.0189328569, 45.7169799, -0.29277519, + 0.947083213, 0.131587097, 65.843136, -0.0206094928, 0.131334858, + -0.991123524, -4.3525033, 0, 0, 0, 1; typedef CameraSet Cameras; Cameras cameras; @@ -358,18 +365,19 @@ TEST( triangulation, StereotriangulateNonlinear ) { measurements += StereoPoint2(226.936, 175.212, 424.469); measurements += StereoPoint2(339.571, 285.547, 669.973); - Point3 initial = Point3(46.0536958, 66.4621179, -6.56285929); // error: 96.5715555191 + Point3 initial = + Point3(46.0536958, 66.4621179, -6.56285929); // error: 96.5715555191 - Point3 actual = triangulateNonlinear(cameras, measurements, initial); + Point3 actual = triangulateNonlinear(cameras, measurements, initial); - Point3 expected(46.0484569, 66.4710686, -6.55046613); // error: 0.763510644187 + Point3 expected(46.0484569, 66.4710686, + -6.55046613); // error: 0.763510644187 EXPECT(assert_equal(expected, actual, 1e-4)); - // regular stereo factor comparison - expect very similar result as above { - typedef GenericStereoFactor StereoFactor; + typedef GenericStereoFactor StereoFactor; Values values; values.insert(Symbol('x', 1), Pose3(m1)); @@ -378,17 +386,19 @@ TEST( triangulation, StereotriangulateNonlinear ) { NonlinearFactorGraph graph; static SharedNoiseModel unit(noiseModel::Unit::Create(3)); - graph.emplace_shared(measurements[0], unit, Symbol('x',1), Symbol('l',1), stereoK); - graph.emplace_shared(measurements[1], unit, Symbol('x',2), Symbol('l',1), stereoK); + graph.emplace_shared(measurements[0], unit, Symbol('x', 1), + Symbol('l', 1), stereoK); + graph.emplace_shared(measurements[1], unit, Symbol('x', 2), + Symbol('l', 1), stereoK); const SharedDiagonal posePrior = noiseModel::Isotropic::Sigma(6, 1e-9); - graph.addPrior(Symbol('x',1), Pose3(m1), posePrior); - graph.addPrior(Symbol('x',2), Pose3(m2), posePrior); + graph.addPrior(Symbol('x', 1), Pose3(m1), posePrior); + graph.addPrior(Symbol('x', 2), Pose3(m2), posePrior); LevenbergMarquardtOptimizer optimizer(graph, values); Values result = optimizer.optimize(); - EXPECT(assert_equal(expected, result.at(Symbol('l',1)), 1e-4)); + EXPECT(assert_equal(expected, result.at(Symbol('l', 1)), 1e-4)); } // use Triangulation Factor directly - expect same result as above @@ -399,13 +409,15 @@ TEST( triangulation, StereotriangulateNonlinear ) { NonlinearFactorGraph graph; static SharedNoiseModel unit(noiseModel::Unit::Create(3)); - graph.emplace_shared >(cameras[0], measurements[0], unit, Symbol('l',1)); - graph.emplace_shared >(cameras[1], measurements[1], unit, Symbol('l',1)); + graph.emplace_shared>( + cameras[0], measurements[0], unit, Symbol('l', 1)); + graph.emplace_shared>( + cameras[1], measurements[1], unit, Symbol('l', 1)); LevenbergMarquardtOptimizer optimizer(graph, values); Values result = optimizer.optimize(); - EXPECT(assert_equal(expected, result.at(Symbol('l',1)), 1e-4)); + EXPECT(assert_equal(expected, result.at(Symbol('l', 1)), 1e-4)); } // use ExpressionFactor - expect same result as above @@ -416,11 +428,13 @@ TEST( triangulation, StereotriangulateNonlinear ) { NonlinearFactorGraph graph; static SharedNoiseModel unit(noiseModel::Unit::Create(3)); - Expression point_(Symbol('l',1)); + Expression point_(Symbol('l', 1)); Expression camera0_(cameras[0]); Expression camera1_(cameras[1]); - Expression project0_(camera0_, &StereoCamera::project2, point_); - Expression project1_(camera1_, &StereoCamera::project2, point_); + Expression project0_(camera0_, &StereoCamera::project2, + point_); + Expression project1_(camera1_, &StereoCamera::project2, + point_); graph.addExpressionFactor(unit, measurements[0], project0_); graph.addExpressionFactor(unit, measurements[1], project1_); @@ -428,10 +442,172 @@ TEST( triangulation, StereotriangulateNonlinear ) { LevenbergMarquardtOptimizer optimizer(graph, values); Values result = optimizer.optimize(); - EXPECT(assert_equal(expected, result.at(Symbol('l',1)), 1e-4)); + EXPECT(assert_equal(expected, result.at(Symbol('l', 1)), 1e-4)); } } +//****************************************************************************** +// Simple test with a well-behaved two camera situation +TEST(triangulation, twoPoses_sphericalCamera) { + vector poses; + std::vector measurements; + + // Project landmark into two cameras and triangulate + SphericalCamera cam1(pose1); + SphericalCamera cam2(pose2); + Unit3 u1 = cam1.project(landmark); + Unit3 u2 = cam2.project(landmark); + + poses += pose1, pose2; + measurements += u1, u2; + + CameraSet cameras; + cameras.push_back(cam1); + cameras.push_back(cam2); + + double rank_tol = 1e-9; + + // 1. Test linear triangulation via DLT + auto projection_matrices = projectionMatricesFromCameras(cameras); + Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol); + EXPECT(assert_equal(landmark, point, 1e-7)); + + // 2. Test nonlinear triangulation + point = triangulateNonlinear(cameras, measurements, point); + EXPECT(assert_equal(landmark, point, 1e-7)); + + // 3. Test simple DLT, now within triangulatePoint3 + bool optimize = false; + boost::optional actual1 = // + triangulatePoint3(cameras, measurements, rank_tol, + optimize); + EXPECT(assert_equal(landmark, *actual1, 1e-7)); + + // 4. test with optimization on, same answer + optimize = true; + boost::optional actual2 = // + triangulatePoint3(cameras, measurements, rank_tol, + optimize); + EXPECT(assert_equal(landmark, *actual2, 1e-7)); + + // 5. Add some noise and try again: result should be ~ (4.995, + // 0.499167, 1.19814) + measurements.at(0) = + u1.retract(Vector2(0.01, 0.05)); // note: perturbation smaller for Unit3 + measurements.at(1) = u2.retract(Vector2(-0.02, 0.03)); + optimize = false; + boost::optional actual3 = // + triangulatePoint3(cameras, measurements, rank_tol, + optimize); + EXPECT(assert_equal(Point3(5.9432, 0.654319, 1.48192), *actual3, 1e-3)); + + // 6. Now with optimization on + optimize = true; + boost::optional actual4 = // + triangulatePoint3(cameras, measurements, rank_tol, + optimize); + EXPECT(assert_equal(Point3(5.9432, 0.654334, 1.48192), *actual4, 1e-3)); +} + +//****************************************************************************** +TEST(triangulation, twoPoses_sphericalCamera_extremeFOV) { + vector poses; + std::vector measurements; + + // Project landmark into two cameras and triangulate + Pose3 poseA = Pose3( + Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), + Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame + Pose3 poseB = Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), + Point3(2.0, 0.0, 0.0)); // 2m in front of poseA + Point3 landmarkL( + 1.0, -1.0, + 0.0); // 1m to the right of both cameras, in front of poseA, behind poseB + SphericalCamera cam1(poseA); + SphericalCamera cam2(poseB); + Unit3 u1 = cam1.project(landmarkL); + Unit3 u2 = cam2.project(landmarkL); + + EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, 1.0)), u1, + 1e-7)); // in front and to the right of PoseA + EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, -1.0)), u2, + 1e-7)); // behind and to the right of PoseB + + poses += pose1, pose2; + measurements += u1, u2; + + CameraSet cameras; + cameras.push_back(cam1); + cameras.push_back(cam2); + + double rank_tol = 1e-9; + + { + // 1. Test simple DLT, when 1 point is behind spherical camera + bool optimize = false; +#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION + CHECK_EXCEPTION(triangulatePoint3(cameras, measurements, + rank_tol, optimize), + TriangulationCheiralityException); +#else // otherwise project should not throw the exception + boost::optional actual1 = // + triangulatePoint3(cameras, measurements, rank_tol, + optimize); + EXPECT(assert_equal(landmarkL, *actual1, 1e-7)); +#endif + } + { + // 2. test with optimization on, same answer + bool optimize = true; +#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION + CHECK_EXCEPTION(triangulatePoint3(cameras, measurements, + rank_tol, optimize), + TriangulationCheiralityException); +#else // otherwise project should not throw the exception + boost::optional actual1 = // + triangulatePoint3(cameras, measurements, rank_tol, + optimize); + EXPECT(assert_equal(landmarkL, *actual1, 1e-7)); +#endif + } +} + +//****************************************************************************** +TEST(triangulation, reprojectionError_cameraComparison) { + Pose3 poseA = Pose3( + Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), + Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame + Point3 landmarkL(5.0, 0.0, 0.0); // 1m in front of poseA + SphericalCamera sphericalCamera(poseA); + Unit3 u = sphericalCamera.project(landmarkL); + + static Cal3_S2::shared_ptr sharedK(new Cal3_S2(60, 640, 480)); + PinholePose pinholeCamera(poseA, sharedK); + Vector2 px = pinholeCamera.project(landmarkL); + + // add perturbation and compare error in both cameras + Vector2 px_noise(1.0, 2.0); // px perturbation vertically and horizontally + Vector2 measured_px = px + px_noise; + Vector2 measured_px_calibrated = sharedK->calibrate(measured_px); + Unit3 measured_u = + Unit3(measured_px_calibrated[0], measured_px_calibrated[1], 1.0); + Unit3 expected_measured_u = + Unit3(px_noise[0] / sharedK->fx(), px_noise[1] / sharedK->fy(), 1.0); + EXPECT(assert_equal(expected_measured_u, measured_u, 1e-7)); + + Vector2 actualErrorPinhole = + pinholeCamera.reprojectionError(landmarkL, measured_px); + Vector2 expectedErrorPinhole = Vector2(-px_noise[0], -px_noise[1]); + EXPECT(assert_equal(expectedErrorPinhole, actualErrorPinhole, + 1e-7)); //- sign due to definition of error + + Vector2 actualErrorSpherical = + sphericalCamera.reprojectionError(landmarkL, measured_u); + // expectedError: not easy to calculate, since it involves the unit3 basis + Vector2 expectedErrorSpherical(-0.00360842, 0.00180419); + EXPECT(assert_equal(expectedErrorSpherical, actualErrorSpherical, 1e-7)); +} + //****************************************************************************** int main() { TestResult tr; diff --git a/gtsam/geometry/triangulation.cpp b/gtsam/geometry/triangulation.cpp index a5d2e04cd..026afef24 100644 --- a/gtsam/geometry/triangulation.cpp +++ b/gtsam/geometry/triangulation.cpp @@ -53,15 +53,57 @@ Vector4 triangulateHomogeneousDLT( return v; } -Point3 triangulateDLT(const std::vector>& projection_matrices, +Vector4 triangulateHomogeneousDLT( + const std::vector>& projection_matrices, + const std::vector& measurements, double rank_tol) { + + // number of cameras + size_t m = projection_matrices.size(); + + // Allocate DLT matrix + Matrix A = Matrix::Zero(m * 2, 4); + + for (size_t i = 0; i < m; i++) { + size_t row = i * 2; + const Matrix34& projection = projection_matrices.at(i); + const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector + + // build system of equations + A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0); + A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1); + } + int rank; + double error; + Vector v; + boost::tie(rank, error, v) = DLT(A, rank_tol); + + if (rank < 3) + throw(TriangulationUnderconstrainedException()); + + return v; +} + +Point3 triangulateDLT( + const std::vector>& projection_matrices, const Point2Vector& measurements, double rank_tol) { - Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); - + Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, + rank_tol); // Create 3D point from homogeneous coordinates return Point3(v.head<3>() / v[3]); } +Point3 triangulateDLT( + const std::vector>& projection_matrices, + const std::vector& measurements, double rank_tol) { + + // contrary to previous triangulateDLT, this is now taking Unit3 inputs + Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, + rank_tol); + // Create 3D point from homogeneous coordinates + return Point3(v.head<3>() / v[3]); +} + /// /** * Optimize for triangulation @@ -71,7 +113,7 @@ Point3 triangulateDLT(const std::vector #include #include +#include #include #include #include @@ -59,6 +60,18 @@ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( const std::vector>& projection_matrices, const Point2Vector& measurements, double rank_tol = 1e-9); +/** + * Same math as Hartley and Zisserman, 2nd Ed., page 312, but with unit-norm bearing vectors + * (contrarily to pinhole projection, the z entry is not assumed to be 1 as in Hartley and Zisserman) + * @param projection_matrices Projection matrices (K*P^-1) + * @param measurements Unit3 bearing measurements + * @param rank_tol SVD rank tolerance + * @return Triangulated point, in homogeneous coordinates + */ +GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( + const std::vector>& projection_matrices, + const std::vector& measurements, double rank_tol = 1e-9); + /** * DLT triangulation: See Hartley and Zisserman, 2nd Ed., page 312 * @param projection_matrices Projection matrices (K*P^-1) @@ -71,6 +84,14 @@ GTSAM_EXPORT Point3 triangulateDLT( const Point2Vector& measurements, double rank_tol = 1e-9); +/** + * overload of previous function to work with Unit3 (projected to canonical camera) + */ +GTSAM_EXPORT Point3 triangulateDLT( + const std::vector>& projection_matrices, + const std::vector& measurements, + double rank_tol = 1e-9); + /** * Create a factor graph with projection factors from poses and one calibration * @param poses Camera poses @@ -180,26 +201,27 @@ Point3 triangulateNonlinear( return optimize(graph, values, Symbol('p', 0)); } -/** - * Create a 3*4 camera projection matrix from calibration and pose. - * Functor for partial application on calibration - * @param pose The camera pose - * @param cal The calibration - * @return Returns a Matrix34 - */ +template +std::vector> +projectionMatricesFromCameras(const CameraSet &cameras) { + std::vector> projection_matrices; + for (const CAMERA &camera: cameras) { + projection_matrices.push_back(camera.cameraProjectionMatrix()); + } + return projection_matrices; +} + +// overload, assuming pinholePose template -struct CameraProjectionMatrix { - CameraProjectionMatrix(const CALIBRATION& calibration) : - K_(calibration.K()) { +std::vector> projectionMatricesFromPoses( + const std::vector &poses, boost::shared_ptr sharedCal) { + std::vector> projection_matrices; + for (size_t i = 0; i < poses.size(); i++) { + PinholePose camera(poses.at(i), sharedCal); + projection_matrices.push_back(camera.cameraProjectionMatrix()); } - Matrix34 operator()(const Pose3& pose) const { - return K_ * (pose.inverse().matrix()).block<3, 4>(0, 0); - } -private: - const Matrix3 K_; -public: - GTSAM_MAKE_ALIGNED_OPERATOR_NEW -}; + return projection_matrices; +} /** * Function to triangulate 3D landmark point from an arbitrary number @@ -224,10 +246,7 @@ Point3 triangulatePoint3(const std::vector& poses, throw(TriangulationUnderconstrainedException()); // construct projection matrices from poses & calibration - std::vector> projection_matrices; - CameraProjectionMatrix createP(*sharedCal); // partially apply - for(const Pose3& pose: poses) - projection_matrices.push_back(createP(pose)); + auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal); // Triangulate linearly Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol); @@ -274,11 +293,7 @@ Point3 triangulatePoint3( throw(TriangulationUnderconstrainedException()); // construct projection matrices from poses & calibration - std::vector> projection_matrices; - for(const CAMERA& camera: cameras) - projection_matrices.push_back( - CameraProjectionMatrix(camera.calibration())( - camera.pose())); + auto projection_matrices = projectionMatricesFromCameras(cameras); Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol); // The n refine using non-linear optimization @@ -474,8 +489,8 @@ TriangulationResult triangulateSafe(const CameraSet& cameras, #endif // Check reprojection error if (params.dynamicOutlierRejectionThreshold > 0) { - const Point2& zi = measured.at(i); - Point2 reprojectionError(camera.project(point) - zi); + const typename CAMERA::Measurement& zi = measured.at(i); + Point2 reprojectionError = camera.reprojectionError(point, zi); maxReprojError = std::max(maxReprojError, reprojectionError.norm()); } i += 1; @@ -503,6 +518,6 @@ using CameraSetCal3Bundler = CameraSet>; using CameraSetCal3_S2 = CameraSet>; using CameraSetCal3Fisheye = CameraSet>; using CameraSetCal3Unified = CameraSet>; - +using CameraSetSpherical = CameraSet; } // \namespace gtsam diff --git a/gtsam/inference/BayesTree.h b/gtsam/inference/BayesTree.h index 7199da0ad..cc003d8dc 100644 --- a/gtsam/inference/BayesTree.h +++ b/gtsam/inference/BayesTree.h @@ -19,6 +19,8 @@ #pragma once +#include + #include #include #include diff --git a/gtsam/inference/ClusterTree.h b/gtsam/inference/ClusterTree.h index e225bac5f..7dd414193 100644 --- a/gtsam/inference/ClusterTree.h +++ b/gtsam/inference/ClusterTree.h @@ -110,7 +110,7 @@ class ClusterTree { typedef sharedCluster sharedNode; /** concept check */ - GTSAM_CONCEPT_TESTABLE_TYPE(FactorType); + GTSAM_CONCEPT_TESTABLE_TYPE(FactorType) protected: FastVector roots_; diff --git a/gtsam/inference/EliminateableFactorGraph-inst.h b/gtsam/inference/EliminateableFactorGraph-inst.h index 81f4047a1..35e7505c9 100644 --- a/gtsam/inference/EliminateableFactorGraph-inst.h +++ b/gtsam/inference/EliminateableFactorGraph-inst.h @@ -36,17 +36,17 @@ namespace gtsam { // no Ordering is provided. When removing optional from VariableIndex, create VariableIndex // before creating ordering. VariableIndex computedVariableIndex(asDerived()); - return eliminateSequential(function, computedVariableIndex, orderingType); + return eliminateSequential(orderingType, function, computedVariableIndex); } else { // Compute an ordering and call this function again. We are guaranteed to have a // VariableIndex already here because we computed one if needed in the previous 'if' block. if (orderingType == Ordering::METIS) { Ordering computedOrdering = Ordering::Metis(asDerived()); - return eliminateSequential(computedOrdering, function, variableIndex, orderingType); + return eliminateSequential(computedOrdering, function, variableIndex); } else { Ordering computedOrdering = Ordering::Colamd(*variableIndex); - return eliminateSequential(computedOrdering, function, variableIndex, orderingType); + return eliminateSequential(computedOrdering, function, variableIndex); } } } @@ -78,29 +78,31 @@ namespace gtsam { } /* ************************************************************************* */ - template - boost::shared_ptr::BayesTreeType> - EliminateableFactorGraph::eliminateMultifrontal( - OptionalOrderingType orderingType, const Eliminate& function, - OptionalVariableIndex variableIndex) const - { - if(!variableIndex) { - // If no VariableIndex provided, compute one and call this function again IMPORTANT: we check - // for no variable index first so that it's always computed if we need to call COLAMD because - // no Ordering is provided. When removing optional from VariableIndex, create VariableIndex - // before creating ordering. + template + boost::shared_ptr< + typename EliminateableFactorGraph::BayesTreeType> + EliminateableFactorGraph::eliminateMultifrontal( + OptionalOrderingType orderingType, const Eliminate& function, + OptionalVariableIndex variableIndex) const { + if (!variableIndex) { + // If no VariableIndex provided, compute one and call this function again + // IMPORTANT: we check for no variable index first so that it's always + // computed if we need to call COLAMD because no Ordering is provided. + // When removing optional from VariableIndex, create VariableIndex before + // creating ordering. VariableIndex computedVariableIndex(asDerived()); - return eliminateMultifrontal(function, computedVariableIndex, orderingType); - } - else { - // Compute an ordering and call this function again. We are guaranteed to have a - // VariableIndex already here because we computed one if needed in the previous 'if' block. + return eliminateMultifrontal(orderingType, function, + computedVariableIndex); + } else { + // Compute an ordering and call this function again. We are guaranteed to + // have a VariableIndex already here because we computed one if needed in + // the previous 'if' block. if (orderingType == Ordering::METIS) { Ordering computedOrdering = Ordering::Metis(asDerived()); - return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType); + return eliminateMultifrontal(computedOrdering, function, variableIndex); } else { Ordering computedOrdering = Ordering::Colamd(*variableIndex); - return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType); + return eliminateMultifrontal(computedOrdering, function, variableIndex); } } } @@ -273,7 +275,7 @@ namespace gtsam { else { // No ordering was provided for the unmarginalized variables, so order them with COLAMD. - return factorGraph->eliminateSequential(function); + return factorGraph->eliminateSequential(Ordering::COLAMD, function); } } } @@ -340,7 +342,7 @@ namespace gtsam { else { // No ordering was provided for the unmarginalized variables, so order them with COLAMD. - return factorGraph->eliminateMultifrontal(function); + return factorGraph->eliminateMultifrontal(Ordering::COLAMD, function); } } } diff --git a/gtsam/inference/EliminateableFactorGraph.h b/gtsam/inference/EliminateableFactorGraph.h index edc4883e7..579eed7f9 100644 --- a/gtsam/inference/EliminateableFactorGraph.h +++ b/gtsam/inference/EliminateableFactorGraph.h @@ -288,8 +288,9 @@ namespace gtsam { FactorGraphType& asDerived() { return static_cast(*this); } public: + #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /** \deprecated ordering and orderingType shouldn't both be specified */ - boost::shared_ptr eliminateSequential( + boost::shared_ptr GTSAM_DEPRECATED eliminateSequential( const Ordering& ordering, const Eliminate& function, OptionalVariableIndex variableIndex, @@ -298,7 +299,7 @@ namespace gtsam { } /** \deprecated orderingType specified first for consistency */ - boost::shared_ptr eliminateSequential( + boost::shared_ptr GTSAM_DEPRECATED eliminateSequential( const Eliminate& function, OptionalVariableIndex variableIndex = boost::none, OptionalOrderingType orderingType = boost::none) const { @@ -306,7 +307,7 @@ namespace gtsam { } /** \deprecated ordering and orderingType shouldn't both be specified */ - boost::shared_ptr eliminateMultifrontal( + boost::shared_ptr GTSAM_DEPRECATED eliminateMultifrontal( const Ordering& ordering, const Eliminate& function, OptionalVariableIndex variableIndex, @@ -315,7 +316,7 @@ namespace gtsam { } /** \deprecated orderingType specified first for consistency */ - boost::shared_ptr eliminateMultifrontal( + boost::shared_ptr GTSAM_DEPRECATED eliminateMultifrontal( const Eliminate& function, OptionalVariableIndex variableIndex = boost::none, OptionalOrderingType orderingType = boost::none) const { @@ -323,7 +324,7 @@ namespace gtsam { } /** \deprecated */ - boost::shared_ptr marginalMultifrontalBayesNet( + boost::shared_ptr GTSAM_DEPRECATED marginalMultifrontalBayesNet( boost::variant variables, boost::none_t, const Eliminate& function = EliminationTraitsType::DefaultEliminate, @@ -332,13 +333,14 @@ namespace gtsam { } /** \deprecated */ - boost::shared_ptr marginalMultifrontalBayesTree( + boost::shared_ptr GTSAM_DEPRECATED marginalMultifrontalBayesTree( boost::variant variables, boost::none_t, const Eliminate& function = EliminationTraitsType::DefaultEliminate, OptionalVariableIndex variableIndex = boost::none) const { return marginalMultifrontalBayesTree(variables, function, variableIndex); } + #endif }; } diff --git a/gtsam/inference/EliminationTree.h b/gtsam/inference/EliminationTree.h index e4a64c589..70e10b3bd 100644 --- a/gtsam/inference/EliminationTree.h +++ b/gtsam/inference/EliminationTree.h @@ -81,7 +81,7 @@ namespace gtsam { protected: /** concept check */ - GTSAM_CONCEPT_TESTABLE_TYPE(FactorType); + GTSAM_CONCEPT_TESTABLE_TYPE(FactorType) FastVector roots_; FastVector remainingFactors_; diff --git a/gtsam/inference/Factor.h b/gtsam/inference/Factor.h index 6ea81030a..c0ea4ea78 100644 --- a/gtsam/inference/Factor.h +++ b/gtsam/inference/Factor.h @@ -22,6 +22,7 @@ #pragma once #include +#include #include #include diff --git a/gtsam/linear/Errors.cpp b/gtsam/linear/Errors.cpp index 3fe2f3307..4b30dcc08 100644 --- a/gtsam/linear/Errors.cpp +++ b/gtsam/linear/Errors.cpp @@ -111,10 +111,10 @@ double dot(const Errors& a, const Errors& b) { /* ************************************************************************* */ template<> -void axpy(double alpha, const Errors& x, Errors& y) { +void axpy(double alpha, const Errors& x, Errors& y) { Errors::const_iterator it = x.begin(); for(Vector& yi: y) - axpy(alpha,*(it++),yi); + yi += alpha * (*(it++)); } /* ************************************************************************* */ diff --git a/gtsam/linear/Errors.h b/gtsam/linear/Errors.h index eb844e04d..e8ba7344e 100644 --- a/gtsam/linear/Errors.h +++ b/gtsam/linear/Errors.h @@ -66,7 +66,7 @@ namespace gtsam { * BLAS level 2 style */ template <> - GTSAM_EXPORT void axpy(double alpha, const Errors& x, Errors& y); + GTSAM_EXPORT void axpy(double alpha, const Errors& x, Errors& y); /** print with optional string */ GTSAM_EXPORT void print(const Errors& a, const std::string& s = "Error"); diff --git a/gtsam/linear/GaussianFactorGraph.cpp b/gtsam/linear/GaussianFactorGraph.cpp index 13eaee7e3..664aeff6d 100644 --- a/gtsam/linear/GaussianFactorGraph.cpp +++ b/gtsam/linear/GaussianFactorGraph.cpp @@ -19,7 +19,6 @@ */ #include -#include #include #include #include @@ -290,10 +289,11 @@ namespace gtsam { return blocks; } - /* ************************************************************************* */ + /* ************************************************************************ */ VectorValues GaussianFactorGraph::optimize(const Eliminate& function) const { gttic(GaussianFactorGraph_optimize); - return BaseEliminateable::eliminateMultifrontal(function)->optimize(); + return BaseEliminateable::eliminateMultifrontal(Ordering::COLAMD, function) + ->optimize(); } /* ************************************************************************* */ @@ -504,10 +504,32 @@ namespace gtsam { } /* ************************************************************************* */ - /** \deprecated */ - VectorValues GaussianFactorGraph::optimize(boost::none_t, - const Eliminate& function) const { - return optimize(function); + void GaussianFactorGraph::printErrors( + const VectorValues& values, const std::string& str, + const KeyFormatter& keyFormatter, + const std::function& + printCondition) const { + cout << str << "size: " << size() << endl << endl; + for (size_t i = 0; i < (*this).size(); i++) { + const sharedFactor& factor = (*this)[i]; + const double errorValue = + (factor != nullptr ? (*this)[i]->error(values) : .0); + if (!printCondition(factor.get(), errorValue, i)) + continue; // User-provided filter did not pass + + stringstream ss; + ss << "Factor " << i << ": "; + if (factor == nullptr) { + cout << "nullptr" + << "\n"; + } else { + factor->print(ss.str(), keyFormatter); + cout << "error = " << errorValue << "\n"; + } + cout << endl; // only one "endl" at end might be faster, \n for each + // factor + } } } // namespace gtsam diff --git a/gtsam/linear/GaussianFactorGraph.h b/gtsam/linear/GaussianFactorGraph.h index e3304d5e8..5ccb0ce91 100644 --- a/gtsam/linear/GaussianFactorGraph.h +++ b/gtsam/linear/GaussianFactorGraph.h @@ -21,12 +21,13 @@ #pragma once -#include #include +#include +#include // Included here instead of fw-declared so we can use Errors::iterator #include -#include #include -#include // Included here instead of fw-declared so we can use Errors::iterator +#include +#include namespace gtsam { @@ -375,6 +376,14 @@ namespace gtsam { /** In-place version e <- A*x that takes an iterator. */ void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const; + void printErrors( + const VectorValues& x, + const std::string& str = "GaussianFactorGraph: ", + const KeyFormatter& keyFormatter = DefaultKeyFormatter, + const std::function& + printCondition = + [](const Factor*, double, size_t) { return true; }) const; /// @} private: @@ -387,9 +396,14 @@ namespace gtsam { public: - /** \deprecated */ - VectorValues optimize(boost::none_t, - const Eliminate& function = EliminationTraitsType::DefaultEliminate) const; +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 + /** \deprecated */ + VectorValues optimize(boost::none_t, + const Eliminate& function = + EliminationTraitsType::DefaultEliminate) const { + return optimize(function); + } +#endif }; diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 2fb54d329..54b9d955f 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -177,17 +177,16 @@ namespace gtsam { return *sqrt_information_; } - protected: - - /** protected constructor takes square root information matrix */ - Gaussian(size_t dim = 1, const boost::optional& sqrt_information = boost::none) : - Base(dim), sqrt_information_(sqrt_information) { - } public: typedef boost::shared_ptr shared_ptr; + /** constructor takes square root information matrix */ + Gaussian(size_t dim = 1, + const boost::optional& sqrt_information = boost::none) + : Base(dim), sqrt_information_(sqrt_information) {} + ~Gaussian() override {} /** @@ -290,13 +289,13 @@ namespace gtsam { Vector sigmas_, invsigmas_, precisions_; protected: - /** protected constructor - no initializations */ - Diagonal(); /** constructor to allow for disabling initialization of invsigmas */ Diagonal(const Vector& sigmas); public: + /** constructor - no initializations, for serialization */ + Diagonal(); typedef boost::shared_ptr shared_ptr; @@ -387,14 +386,6 @@ namespace gtsam { // Sigmas are contained in the base class Vector mu_; ///< Penalty function weight - needs to be large enough to dominate soft constraints - /** - * protected constructor takes sigmas. - * prevents any inf values - * from appearing in invsigmas or precisions. - * mu set to large default value (1000.0) - */ - Constrained(const Vector& sigmas = Z_1x1); - /** * Constructor that prevents any inf values * from appearing in invsigmas or precisions. @@ -406,6 +397,14 @@ namespace gtsam { typedef boost::shared_ptr shared_ptr; + /** + * protected constructor takes sigmas. + * prevents any inf values + * from appearing in invsigmas or precisions. + * mu set to large default value (1000.0) + */ + Constrained(const Vector& sigmas = Z_1x1); + ~Constrained() override {} /// true if a constrained noise mode, saves slow/clumsy dynamic casting @@ -461,6 +460,11 @@ namespace gtsam { return MixedVariances(precisions.array().inverse()); } + /** + * The squaredMahalanobisDistance function for a constrained noisemodel, + * for non-constrained versions, uses sigmas, otherwise + * uses the penalty function with mu + */ double squaredMahalanobisDistance(const Vector& v) const override; /** Fully constrained variations */ @@ -531,11 +535,11 @@ namespace gtsam { Isotropic(size_t dim, double sigma) : Diagonal(Vector::Constant(dim, sigma)),sigma_(sigma),invsigma_(1.0/sigma) {} + public: + /* dummy constructor to allow for serialization */ Isotropic() : Diagonal(Vector1::Constant(1.0)),sigma_(1.0),invsigma_(1.0) {} - public: - ~Isotropic() override {} typedef boost::shared_ptr shared_ptr; @@ -592,14 +596,13 @@ namespace gtsam { * Unit: i.i.d. unit-variance noise on all m dimensions. */ class GTSAM_EXPORT Unit : public Isotropic { - protected: - - Unit(size_t dim=1): Isotropic(dim,1.0) {} - public: typedef boost::shared_ptr shared_ptr; + /** constructor for serialization */ + Unit(size_t dim=1): Isotropic(dim,1.0) {} + ~Unit() override {} /** @@ -682,19 +685,19 @@ namespace gtsam { /// Return the contained noise model const NoiseModel::shared_ptr& noise() const { return noise_; } - // TODO: functions below are dummy but necessary for the noiseModel::Base + // Functions below are dummy but necessary for the noiseModel::Base inline Vector whiten(const Vector& v) const override { Vector r = v; this->WhitenSystem(r); return r; } inline Matrix Whiten(const Matrix& A) const override { Vector b; Matrix B=A; this->WhitenSystem(B,b); return B; } inline Vector unwhiten(const Vector& /*v*/) const override { throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); } - + /// Compute loss from the m-estimator using the Mahalanobis distance. double loss(const double squared_distance) const override { return robust_->loss(std::sqrt(squared_distance)); } - // TODO: these are really robust iterated re-weighting support functions + // These are really robust iterated re-weighting support functions virtual void WhitenSystem(Vector& b) const; void WhitenSystem(std::vector& A, Vector& b) const override; void WhitenSystem(Matrix& A, Vector& b) const override; @@ -705,7 +708,6 @@ namespace gtsam { return noise_->unweightedWhiten(v); } double weight(const Vector& v) const override { - // Todo(mikebosse): make the robust weight function input a vector. return robust_->weight(v.norm()); } diff --git a/gtsam/linear/SubgraphPreconditioner.cpp b/gtsam/linear/SubgraphPreconditioner.cpp index fdcb4f7ac..215200818 100644 --- a/gtsam/linear/SubgraphPreconditioner.cpp +++ b/gtsam/linear/SubgraphPreconditioner.cpp @@ -173,7 +173,7 @@ void SubgraphPreconditioner::transposeMultiplyAdd Errors::const_iterator it = e.begin(); for(auto& key_value: y) { const Vector& ei = *it; - axpy(alpha, ei, key_value.second); + key_value.second += alpha * ei; ++it; } transposeMultiplyAdd2(alpha, it, e.end(), y); @@ -191,7 +191,7 @@ void SubgraphPreconditioner::transposeMultiplyAdd2 (double alpha, VectorValues x = VectorValues::Zero(y); // x = 0 Ab2_->transposeMultiplyAdd(1.0,e2,x); // x += A2'*e2 - axpy(alpha, Rc1_->backSubstituteTranspose(x), y); // y += alpha*inv(R1')*x + y += alpha * Rc1_->backSubstituteTranspose(x); // y += alpha*inv(R1')*x } /* ************************************************************************* */ diff --git a/gtsam/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h index 58ef7d733..906ee80fd 100644 --- a/gtsam/linear/iterative-inl.h +++ b/gtsam/linear/iterative-inl.h @@ -72,7 +72,7 @@ namespace gtsam { double takeOptimalStep(V& x) { // TODO: can we use gamma instead of dot(d,g) ????? Answer not trivial double alpha = -dot(d, g) / dot(Ad, Ad); // calculate optimal step-size - axpy(alpha, d, x); // // do step in new search direction, x += alpha*d + x += alpha * d; // do step in new search direction, x += alpha*d return alpha; } @@ -106,7 +106,7 @@ namespace gtsam { double beta = new_gamma / gamma; // d = g + d*beta; d *= beta; - axpy(1.0, g, d); + d += 1.0 * g; } gamma = new_gamma; diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index 8635c55f8..d882cb38b 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -34,6 +34,9 @@ virtual class Gaussian : gtsam::noiseModel::Base { // enabling serialization functionality void serializable() const; + + // enable pickling in python + void pickle() const; }; virtual class Diagonal : gtsam::noiseModel::Gaussian { @@ -49,6 +52,9 @@ virtual class Diagonal : gtsam::noiseModel::Gaussian { // enabling serialization functionality void serializable() const; + + // enable pickling in python + void pickle() const; }; virtual class Constrained : gtsam::noiseModel::Diagonal { @@ -66,6 +72,9 @@ virtual class Constrained : gtsam::noiseModel::Diagonal { // enabling serialization functionality void serializable() const; + + // enable pickling in python + void pickle() const; }; virtual class Isotropic : gtsam::noiseModel::Diagonal { @@ -78,6 +87,9 @@ virtual class Isotropic : gtsam::noiseModel::Diagonal { // enabling serialization functionality void serializable() const; + + // enable pickling in python + void pickle() const; }; virtual class Unit : gtsam::noiseModel::Isotropic { @@ -85,6 +97,9 @@ virtual class Unit : gtsam::noiseModel::Isotropic { // enabling serialization functionality void serializable() const; + + // enable pickling in python + void pickle() const; }; namespace mEstimator { @@ -221,6 +236,7 @@ class VectorValues { //Constructors VectorValues(); VectorValues(const gtsam::VectorValues& other); + VectorValues(const gtsam::VectorValues& first, const gtsam::VectorValues& second); //Named Constructors static gtsam::VectorValues Zero(const gtsam::VectorValues& model); @@ -301,6 +317,7 @@ virtual class JacobianFactor : gtsam::GaussianFactor { void print(string s = "", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; void printKeys(string s) const; + gtsam::KeyVector& keys() const; bool equals(const gtsam::GaussianFactor& lf, double tol) const; size_t size() const; Vector unweighted_error(const gtsam::VectorValues& c) const; @@ -400,6 +417,7 @@ class GaussianFactorGraph { // error and probability double error(const gtsam::VectorValues& c) const; double probPrime(const gtsam::VectorValues& c) const; + void printErrors(const gtsam::VectorValues& c, string str = "GaussianFactorGraph: ", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; gtsam::GaussianFactorGraph clone() const; gtsam::GaussianFactorGraph negate() const; @@ -512,9 +530,9 @@ virtual class GaussianBayesNet { size_t size() const; // FactorGraph derived interface - // size_t size() const; gtsam::GaussianConditional* at(size_t idx) const; gtsam::KeySet keys() const; + gtsam::KeyVector keyVector() const; bool exists(size_t idx) const; void saveGraph(const string& s) const; diff --git a/gtsam/linear/tests/testErrors.cpp b/gtsam/linear/tests/testErrors.cpp index 74eef9a2c..f11fb90b9 100644 --- a/gtsam/linear/tests/testErrors.cpp +++ b/gtsam/linear/tests/testErrors.cpp @@ -32,7 +32,7 @@ TEST( Errors, arithmetic ) e += Vector2(1.0,2.0), Vector3(3.0,4.0,5.0); DOUBLES_EQUAL(1+4+9+16+25,dot(e,e),1e-9); - axpy(2.0,e,e); + axpy(2.0, e, e); Errors expected; expected += Vector2(3.0,6.0), Vector3(9.0,12.0,15.0); CHECK(assert_equal(expected,e)); diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index 42d68a603..b974b6cd5 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -662,25 +662,14 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone) { double dead_zone_size = 1.0; SharedNoiseModel robust = noiseModel::Robust::Create( - noiseModel::mEstimator::L2WithDeadZone::Create(dead_zone_size), - Unit::Create(3)); - -/* - * TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes - * implement a loss function, and GTSAM calls the weight function to evaluate the - * total penalty, rather than calling the loss function. The weight function should be - * used during iteratively reweighted least squares optimization, but should not be used to - * evaluate the total penalty. The long-term solution is for all mEstimators to implement - * both a weight and a loss function, and for GTSAM to call the loss function when - * evaluating the total penalty. This bug causes the test below to fail, so I'm leaving it - * commented out until the underlying bug in GTSAM is fixed. - * - * for (int i = 0; i < 5; i++) { - * Vector3 error = Vector3(i, 0, 0); - * DOUBLES_EQUAL(0.5*max(0,i-1)*max(0,i-1), robust->distance(error), 1e-8); - * } - */ + noiseModel::mEstimator::L2WithDeadZone::Create(dead_zone_size), + Unit::Create(3)); + for (int i = 0; i < 5; i++) { + Vector3 error = Vector3(i, 0, 0); + DOUBLES_EQUAL(std::fmax(0, i - dead_zone_size) * i, + robust->squaredMahalanobisDistance(error), 1e-8); + } } TEST(NoiseModel, lossFunctionAtZero) @@ -707,9 +696,9 @@ TEST(NoiseModel, lossFunctionAtZero) auto dcs = mEstimator::DCS::Create(k); DOUBLES_EQUAL(dcs->loss(0), 0, 1e-8); DOUBLES_EQUAL(dcs->weight(0), 1, 1e-8); - // auto lsdz = mEstimator::L2WithDeadZone::Create(k); - // DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8); - // DOUBLES_EQUAL(lsdz->weight(0), 1, 1e-8); + auto lsdz = mEstimator::L2WithDeadZone::Create(k); + DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8); + DOUBLES_EQUAL(lsdz->weight(0), 0, 1e-8); } diff --git a/gtsam/linear/tests/testSerializationLinear.cpp b/gtsam/linear/tests/testSerializationLinear.cpp index c5b3dab37..881b2830e 100644 --- a/gtsam/linear/tests/testSerializationLinear.cpp +++ b/gtsam/linear/tests/testSerializationLinear.cpp @@ -39,14 +39,14 @@ using namespace gtsam::serializationTestHelpers; /* ************************************************************************* */ // Export Noisemodels // See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic") -BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); +BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal") /* ************************************************************************* */ // example noise models @@ -129,9 +129,9 @@ TEST (Serialization, SharedDiagonal_noiseModels) { /* Create GUIDs for factors */ /* ************************************************************************* */ -BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor"); -BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor"); -BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional"); +BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor") +BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor") +BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional") /* ************************************************************************* */ TEST (Serialization, linear_factors) { diff --git a/gtsam/navigation/CombinedImuFactor.cpp b/gtsam/navigation/CombinedImuFactor.cpp index ca1c5b93a..7d086eb57 100644 --- a/gtsam/navigation/CombinedImuFactor.cpp +++ b/gtsam/navigation/CombinedImuFactor.cpp @@ -257,5 +257,5 @@ std::ostream& operator<<(std::ostream& os, const CombinedImuFactor& f) { /// namespace gtsam /// Boost serialization export definition for derived class -BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams); +BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams) diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index ed94750b7..89e8b574f 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -353,4 +353,4 @@ struct traits : public Testable {}; } // namespace gtsam /// Add Boost serialization export key (declaration) for derived class -BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams); +BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams) diff --git a/gtsam/navigation/navigation.i b/gtsam/navigation/navigation.i index 48a5a35de..1f9ffcf2e 100644 --- a/gtsam/navigation/navigation.i +++ b/gtsam/navigation/navigation.i @@ -41,6 +41,12 @@ class ConstantBias { Vector gyroscope() const; Vector correctAccelerometer(Vector measurement) const; Vector correctGyroscope(Vector measurement) const; + + // enabling serialization functionality + void serialize() const; + + // enable pickling in python + void pickle() const; }; }///\namespace imuBias @@ -64,6 +70,12 @@ class NavState { gtsam::NavState retract(const Vector& x) const; Vector localCoordinates(const gtsam::NavState& g) const; + + // enabling serialization functionality + void serialize() const; + + // enable pickling in python + void pickle() const; }; #include @@ -88,6 +100,8 @@ virtual class PreintegratedRotationParams { virtual class PreintegrationParams : gtsam::PreintegratedRotationParams { PreintegrationParams(Vector n_gravity); + gtsam::Vector n_gravity; + static gtsam::PreintegrationParams* MakeSharedD(double g); static gtsam::PreintegrationParams* MakeSharedU(double g); static gtsam::PreintegrationParams* MakeSharedD(); // default g = 9.81 @@ -104,6 +118,12 @@ virtual class PreintegrationParams : gtsam::PreintegratedRotationParams { Matrix getAccelerometerCovariance() const; Matrix getIntegrationCovariance() const; bool getUse2ndOrderCoriolis() const; + + // enabling serialization functionality + void serialize() const; + + // enable pickling in python + void pickle() const; }; #include @@ -133,6 +153,12 @@ class PreintegratedImuMeasurements { Vector biasHatVector() const; gtsam::NavState predict(const gtsam::NavState& state_i, const gtsam::imuBias::ConstantBias& bias) const; + + // enabling serialization functionality + void serialize() const; + + // enable pickling in python + void pickle() const; }; virtual class ImuFactor: gtsam::NonlinearFactor { diff --git a/gtsam/navigation/tests/testAttitudeFactor.cpp b/gtsam/navigation/tests/testAttitudeFactor.cpp index d49907cbf..9304e8412 100644 --- a/gtsam/navigation/tests/testAttitudeFactor.cpp +++ b/gtsam/navigation/tests/testAttitudeFactor.cpp @@ -66,7 +66,7 @@ TEST( Rot3AttitudeFactor, Constructor ) { /* ************************************************************************* */ // Export Noisemodels // See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html -BOOST_CLASS_EXPORT(gtsam::noiseModel::Isotropic); +BOOST_CLASS_EXPORT(gtsam::noiseModel::Isotropic) /* ************************************************************************* */ TEST(Rot3AttitudeFactor, Serialization) { diff --git a/gtsam/navigation/tests/testGPSFactor.cpp b/gtsam/navigation/tests/testGPSFactor.cpp index b784c0c94..c94e1d3d5 100644 --- a/gtsam/navigation/tests/testGPSFactor.cpp +++ b/gtsam/navigation/tests/testGPSFactor.cpp @@ -72,7 +72,7 @@ TEST( GPSFactor, Constructor ) { // Calculate numerical derivatives Matrix expectedH = numericalDerivative11( - std::bind(&GPSFactor::evaluateError, &factor, _1, boost::none), T); + std::bind(&GPSFactor::evaluateError, &factor, std::placeholders::_1, boost::none), T); // Use the factor to calculate the derivative Matrix actualH; @@ -101,7 +101,7 @@ TEST( GPSFactor2, Constructor ) { // Calculate numerical derivatives Matrix expectedH = numericalDerivative11( - std::bind(&GPSFactor2::evaluateError, &factor, _1, boost::none), T); + std::bind(&GPSFactor2::evaluateError, &factor, std::placeholders::_1, boost::none), T); // Use the factor to calculate the derivative Matrix actualH; diff --git a/gtsam/navigation/tests/testImuFactorSerialization.cpp b/gtsam/navigation/tests/testImuFactorSerialization.cpp index ed72e18e9..e72b1fb5b 100644 --- a/gtsam/navigation/tests/testImuFactorSerialization.cpp +++ b/gtsam/navigation/tests/testImuFactorSerialization.cpp @@ -31,16 +31,16 @@ using namespace gtsam; using namespace gtsam::serializationTestHelpers; BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, - "gtsam_noiseModel_Constrained"); + "gtsam_noiseModel_Constrained") BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, - "gtsam_noiseModel_Diagonal"); + "gtsam_noiseModel_Diagonal") BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, - "gtsam_noiseModel_Gaussian"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); + "gtsam_noiseModel_Gaussian") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit") BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, - "gtsam_noiseModel_Isotropic"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); + "gtsam_noiseModel_Isotropic") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal") template P getPreintegratedMeasurements() { diff --git a/gtsam/navigation/tests/testMagFactor.cpp b/gtsam/navigation/tests/testMagFactor.cpp index 5107b3b6b..85447facd 100644 --- a/gtsam/navigation/tests/testMagFactor.cpp +++ b/gtsam/navigation/tests/testMagFactor.cpp @@ -64,7 +64,7 @@ TEST( MagFactor, unrotate ) { Point3 expected(22735.5, 314.502, 44202.5); EXPECT( assert_equal(expected, MagFactor::unrotate(theta,nM,H),1e-1)); EXPECT( assert_equal(numericalDerivative11 // - (std::bind(&MagFactor::unrotate, _1, nM, none), theta), H, 1e-6)); + (std::bind(&MagFactor::unrotate, std::placeholders::_1, nM, none), theta), H, 1e-6)); } // ************************************************************************* @@ -76,35 +76,35 @@ TEST( MagFactor, Factors ) { MagFactor f(1, measured, s, dir, bias, model); EXPECT( assert_equal(Z_3x1,f.evaluateError(theta,H1),1e-5)); EXPECT( assert_equal((Matrix)numericalDerivative11 // - (std::bind(&MagFactor::evaluateError, &f, _1, none), theta), H1, 1e-7)); + (std::bind(&MagFactor::evaluateError, &f, std::placeholders::_1, none), theta), H1, 1e-7)); // MagFactor1 MagFactor1 f1(1, measured, s, dir, bias, model); EXPECT( assert_equal(Z_3x1,f1.evaluateError(nRb,H1),1e-5)); EXPECT( assert_equal(numericalDerivative11 // - (std::bind(&MagFactor1::evaluateError, &f1, _1, none), nRb), H1, 1e-7)); + (std::bind(&MagFactor1::evaluateError, &f1, std::placeholders::_1, none), nRb), H1, 1e-7)); // MagFactor2 MagFactor2 f2(1, 2, measured, nRb, model); EXPECT( assert_equal(Z_3x1,f2.evaluateError(scaled,bias,H1,H2),1e-5)); EXPECT( assert_equal(numericalDerivative11 // - (std::bind(&MagFactor2::evaluateError, &f2, _1, bias, none, none), scaled),// + (std::bind(&MagFactor2::evaluateError, &f2, std::placeholders::_1, bias, none, none), scaled),// H1, 1e-7)); EXPECT( assert_equal(numericalDerivative11 // - (std::bind(&MagFactor2::evaluateError, &f2, scaled, _1, none, none), bias),// + (std::bind(&MagFactor2::evaluateError, &f2, scaled, std::placeholders::_1, none, none), bias),// H2, 1e-7)); // MagFactor2 MagFactor3 f3(1, 2, 3, measured, nRb, model); EXPECT(assert_equal(Z_3x1,f3.evaluateError(s,dir,bias,H1,H2,H3),1e-5)); EXPECT(assert_equal((Matrix)numericalDerivative11 // - (std::bind(&MagFactor3::evaluateError, &f3, _1, dir, bias, none, none, none), s),// + (std::bind(&MagFactor3::evaluateError, &f3, std::placeholders::_1, dir, bias, none, none, none), s),// H1, 1e-7)); EXPECT(assert_equal(numericalDerivative11 // - (std::bind(&MagFactor3::evaluateError, &f3, s, _1, bias, none, none, none), dir),// + (std::bind(&MagFactor3::evaluateError, &f3, s, std::placeholders::_1, bias, none, none, none), dir),// H2, 1e-7)); EXPECT(assert_equal(numericalDerivative11 // - (std::bind(&MagFactor3::evaluateError, &f3, s, dir, _1, none, none, none), bias),// + (std::bind(&MagFactor3::evaluateError, &f3, s, dir, std::placeholders::_1, none, none, none), bias),// H3, 1e-7)); } diff --git a/gtsam/nonlinear/CustomFactor.h b/gtsam/nonlinear/CustomFactor.h index dbaf31898..615b5418e 100644 --- a/gtsam/nonlinear/CustomFactor.h +++ b/gtsam/nonlinear/CustomFactor.h @@ -101,4 +101,4 @@ private: } }; -}; +} diff --git a/gtsam/nonlinear/DoglegOptimizerImpl.cpp b/gtsam/nonlinear/DoglegOptimizerImpl.cpp index c319f26e6..7e9db6b64 100644 --- a/gtsam/nonlinear/DoglegOptimizerImpl.cpp +++ b/gtsam/nonlinear/DoglegOptimizerImpl.cpp @@ -78,7 +78,8 @@ VectorValues DoglegOptimizerImpl::ComputeBlend(double delta, const VectorValues& // Compute blended point if(verbose) cout << "In blend region with fraction " << tau << " of Newton's method point" << endl; - VectorValues blend = (1. - tau) * x_u; axpy(tau, x_n, blend); + VectorValues blend = (1. - tau) * x_u; + blend += tau * x_n; return blend; } diff --git a/gtsam/nonlinear/Expression-inl.h b/gtsam/nonlinear/Expression-inl.h index cf2462dfc..b2ef6f055 100644 --- a/gtsam/nonlinear/Expression-inl.h +++ b/gtsam/nonlinear/Expression-inl.h @@ -246,6 +246,18 @@ struct apply_compose { return x.compose(y, H1, H2); } }; + +template <> +struct apply_compose { + double operator()(const double& x, const double& y, + OptionalJacobian<1, 1> H1 = boost::none, + OptionalJacobian<1, 1> H2 = boost::none) const { + if (H1) H1->setConstant(y); + if (H2) H2->setConstant(x); + return x * y; + } +}; + } // Global methods: diff --git a/gtsam/nonlinear/ExpressionFactor.h b/gtsam/nonlinear/ExpressionFactor.h index b55d643aa..d22690cf3 100644 --- a/gtsam/nonlinear/ExpressionFactor.h +++ b/gtsam/nonlinear/ExpressionFactor.h @@ -305,7 +305,7 @@ struct traits> * \deprecated Prefer the more general ExpressionFactorN<>. */ template -class ExpressionFactor2 : public ExpressionFactorN { +class GTSAM_DEPRECATED ExpressionFactor2 : public ExpressionFactorN { public: /// Destructor ~ExpressionFactor2() override {} diff --git a/gtsam/nonlinear/GncOptimizer.h b/gtsam/nonlinear/GncOptimizer.h index 3ddaf4820..3025d2468 100644 --- a/gtsam/nonlinear/GncOptimizer.h +++ b/gtsam/nonlinear/GncOptimizer.h @@ -142,8 +142,9 @@ class GTSAM_EXPORT GncOptimizer { * provides an extra interface for the user to initialize the weightst * */ void setWeights(const Vector w) { - if(w.size() != nfg_.size()){ - throw std::runtime_error("GncOptimizer::setWeights: the number of specified weights" + if (size_t(w.size()) != nfg_.size()) { + throw std::runtime_error( + "GncOptimizer::setWeights: the number of specified weights" " does not match the size of the factor graph."); } weights_ = w; diff --git a/gtsam/nonlinear/LinearContainerFactor.h b/gtsam/nonlinear/LinearContainerFactor.h index 8c5b34f01..efc095775 100644 --- a/gtsam/nonlinear/LinearContainerFactor.h +++ b/gtsam/nonlinear/LinearContainerFactor.h @@ -29,9 +29,6 @@ protected: GaussianFactor::shared_ptr factor_; boost::optional linearizationPoint_; - /** Default constructor - necessary for serialization */ - LinearContainerFactor() {} - /** direct copy constructor */ GTSAM_EXPORT LinearContainerFactor(const GaussianFactor::shared_ptr& factor, const boost::optional& linearizationPoint); @@ -43,6 +40,9 @@ public: typedef boost::shared_ptr shared_ptr; + /** Default constructor - necessary for serialization */ + LinearContainerFactor() {} + /** Primary constructor: store a linear factor with optional linearization point */ GTSAM_EXPORT LinearContainerFactor(const JacobianFactor& factor, const Values& linearizationPoint = Values()); diff --git a/gtsam/nonlinear/Marginals.cpp b/gtsam/nonlinear/Marginals.cpp index c29a79623..41212ed76 100644 --- a/gtsam/nonlinear/Marginals.cpp +++ b/gtsam/nonlinear/Marginals.cpp @@ -80,11 +80,15 @@ Marginals::Marginals(const GaussianFactorGraph& graph, const VectorValues& solut /* ************************************************************************* */ void Marginals::computeBayesTree() { + // The default ordering to use. + const Ordering::OrderingType defaultOrderingType = Ordering::COLAMD; // Compute BayesTree - if(factorization_ == CHOLESKY) - bayesTree_ = *graph_.eliminateMultifrontal(EliminatePreferCholesky); - else if(factorization_ == QR) - bayesTree_ = *graph_.eliminateMultifrontal(EliminateQR); + if (factorization_ == CHOLESKY) + bayesTree_ = *graph_.eliminateMultifrontal(defaultOrderingType, + EliminatePreferCholesky); + else if (factorization_ == QR) + bayesTree_ = + *graph_.eliminateMultifrontal(defaultOrderingType, EliminateQR); } /* ************************************************************************* */ diff --git a/gtsam/nonlinear/Marginals.h b/gtsam/nonlinear/Marginals.h index 9935bafdd..947274a97 100644 --- a/gtsam/nonlinear/Marginals.h +++ b/gtsam/nonlinear/Marginals.h @@ -131,17 +131,19 @@ protected: void computeBayesTree(const Ordering& ordering); public: +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /** \deprecated argument order changed due to removing boost::optional */ - Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization, + GTSAM_DEPRECATED Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization, const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {} /** \deprecated argument order changed due to removing boost::optional */ - Marginals(const GaussianFactorGraph& graph, const Values& solution, Factorization factorization, + GTSAM_DEPRECATED Marginals(const GaussianFactorGraph& graph, const Values& solution, Factorization factorization, const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {} /** \deprecated argument order changed due to removing boost::optional */ - Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization, + GTSAM_DEPRECATED Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization, const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {} +#endif }; diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index fd9e49a62..a7a0d724b 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -149,7 +149,7 @@ boost::tuple nonlinearConjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool singleIteration, const bool gradientDescent = false) { - // GTSAM_CONCEPT_MANIFOLD_TYPE(V); + // GTSAM_CONCEPT_MANIFOLD_TYPE(V) size_t iteration = 0; diff --git a/gtsam/nonlinear/NonlinearEquality.h b/gtsam/nonlinear/NonlinearEquality.h index 47083d5d7..43d30254e 100644 --- a/gtsam/nonlinear/NonlinearEquality.h +++ b/gtsam/nonlinear/NonlinearEquality.h @@ -219,7 +219,6 @@ protected: X value_; /// fixed value for variable GTSAM_CONCEPT_MANIFOLD_TYPE(X) - GTSAM_CONCEPT_TESTABLE_TYPE(X) public: diff --git a/gtsam/nonlinear/NonlinearFactor.cpp b/gtsam/nonlinear/NonlinearFactor.cpp index 8b8d2da6c..3d572e970 100644 --- a/gtsam/nonlinear/NonlinearFactor.cpp +++ b/gtsam/nonlinear/NonlinearFactor.cpp @@ -114,7 +114,7 @@ double NoiseModelFactor::weight(const Values& c) const { if (noiseModel_) { const Vector b = unwhitenedError(c); check(noiseModel_, b.size()); - return 0.5 * noiseModel_->weight(b); + return noiseModel_->weight(b); } else return 1.0; diff --git a/gtsam/nonlinear/NonlinearFactorGraph.h b/gtsam/nonlinear/NonlinearFactorGraph.h index 4d321f8ab..61cbbafb9 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.h +++ b/gtsam/nonlinear/NonlinearFactorGraph.h @@ -265,15 +265,17 @@ namespace gtsam { public: +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /** \deprecated */ - boost::shared_ptr linearizeToHessianFactor( + boost::shared_ptr GTSAM_DEPRECATED linearizeToHessianFactor( const Values& values, boost::none_t, const Dampen& dampen = nullptr) const {return linearizeToHessianFactor(values, dampen);} /** \deprecated */ - Values updateCholesky(const Values& values, boost::none_t, + Values GTSAM_DEPRECATED updateCholesky(const Values& values, boost::none_t, const Dampen& dampen = nullptr) const {return updateCholesky(values, dampen);} +#endif }; diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 0d7e9e17f..3ce6db4af 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -147,11 +147,13 @@ VectorValues NonlinearOptimizer::solve(const GaussianFactorGraph& gfg, } else if (params.isSequential()) { // Sequential QR or Cholesky (decided by params.getEliminationFunction()) if (params.ordering) - delta = gfg.eliminateSequential(*params.ordering, params.getEliminationFunction(), - boost::none, params.orderingType)->optimize(); + delta = gfg.eliminateSequential(*params.ordering, + params.getEliminationFunction()) + ->optimize(); else - delta = gfg.eliminateSequential(params.getEliminationFunction(), boost::none, - params.orderingType)->optimize(); + delta = gfg.eliminateSequential(params.orderingType, + params.getEliminationFunction()) + ->optimize(); } else if (params.isIterative()) { // Conjugate Gradient -> needs params.iterativeParams if (!params.iterativeParams) diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index 33e9e7d82..207f35540 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -24,6 +24,7 @@ #pragma once +#include #include #include #include @@ -62,17 +63,18 @@ namespace gtsam { class GTSAM_EXPORT Values { private: - // Internally we store a boost ptr_map, with a ValueCloneAllocator (defined - // below) to clone and deallocate the Value objects, and a boost - // fast_pool_allocator to allocate map nodes. In this way, all memory is - // allocated in a boost memory pool. + // below) to clone and deallocate the Value objects, and our compile-flag- + // dependent FastDefaultAllocator to allocate map nodes. In this way, the + // user defines the allocation details (i.e. optimize for memory pool/arenas + // concurrency). + typedef internal::FastDefaultAllocator>::type KeyValuePtrPairAllocator; typedef boost::ptr_map< Key, Value, std::less, ValueCloneAllocator, - boost::fast_pool_allocator > > KeyValueMap; + KeyValuePtrPairAllocator > KeyValueMap; // The member to store the values, see just above KeyValueMap values_; diff --git a/gtsam/nonlinear/factorTesting.h b/gtsam/nonlinear/factorTesting.h index 74ef87737..3a9b6fb11 100644 --- a/gtsam/nonlinear/factorTesting.h +++ b/gtsam/nonlinear/factorTesting.h @@ -35,8 +35,7 @@ namespace gtsam { * zero errors anyway. However, it means that below will only be exact for the correct measurement. */ JacobianFactor linearizeNumerically(const NoiseModelFactor& factor, - const Values& values, double delta = 1e-5) { - + const Values& values, double delta = 1e-5) { // We will fill a vector of key/Jacobians pairs (a map would sort) std::vector > jacobians; @@ -46,24 +45,24 @@ JacobianFactor linearizeNumerically(const NoiseModelFactor& factor, // Loop over all variables const double one_over_2delta = 1.0 / (2.0 * delta); - for(Key key: factor) { + for (Key key : factor) { // Compute central differences using the values struct. VectorValues dX = values.zeroVectors(); const size_t cols = dX.dim(key); Matrix J = Matrix::Zero(rows, cols); for (size_t col = 0; col < cols; ++col) { Eigen::VectorXd dx = Eigen::VectorXd::Zero(cols); - dx[col] = delta; + dx(col) = delta; dX[key] = dx; Values eval_values = values.retract(dX); const Eigen::VectorXd left = factor.whitenedError(eval_values); - dx[col] = -delta; + dx(col) = -delta; dX[key] = dx; eval_values = values.retract(dX); const Eigen::VectorXd right = factor.whitenedError(eval_values); J.col(col) = (left - right) * one_over_2delta; } - jacobians.push_back(std::make_pair(key,J)); + jacobians.push_back(std::make_pair(key, J)); } // Next step...return JacobianFactor diff --git a/gtsam/nonlinear/nonlinear.i b/gtsam/nonlinear/nonlinear.i index d068bd7ee..152c4b8e7 100644 --- a/gtsam/nonlinear/nonlinear.i +++ b/gtsam/nonlinear/nonlinear.i @@ -115,6 +115,10 @@ class Ordering { Ordering(); Ordering(const gtsam::Ordering& other); + template + static gtsam::Ordering Colamd(const FACTOR_GRAPH& graph); + // Testable void print(string s = "", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; @@ -734,7 +738,12 @@ class ISAM2 { const gtsam::KeyList& extraReelimKeys, bool force_relinearize); + gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, + const gtsam::Values& newTheta, + const gtsam::ISAM2UpdateParams& updateParams); + gtsam::Values getLinearizationPoint() const; + bool valueExists(gtsam::Key key) const; gtsam::Values calculateEstimate() const; template , gtsam::PinholeCamera, Vector, Matrix}> VALUE calculateEstimate(size_t key) const; - gtsam::Values calculateBestEstimate() const; Matrix marginalCovariance(size_t key) const; + gtsam::Values calculateBestEstimate() const; gtsam::VectorValues getDelta() const; + double error(const gtsam::VectorValues& x) const; gtsam::NonlinearFactorGraph getFactorsUnsafe() const; gtsam::VariableIndex getVariableIndex() const; + const gtsam::KeySet& getFixedVariables() const; gtsam::ISAM2Params params() const; + + void printStats() const; + gtsam::VectorValues gradientAtZero() const; }; #include diff --git a/gtsam/nonlinear/tests/testExpression.cpp b/gtsam/nonlinear/tests/testExpression.cpp index 80262ae3f..92f4998a2 100644 --- a/gtsam/nonlinear/tests/testExpression.cpp +++ b/gtsam/nonlinear/tests/testExpression.cpp @@ -293,6 +293,19 @@ TEST(Expression, compose3) { EXPECT(expected == R3.keys()); } +/* ************************************************************************* */ +// Test compose with double type (should be multiplication). +TEST(Expression, compose4) { + // Create expression + gtsam::Key key = 1; + Double_ R1(key), R2(key); + Double_ R3 = R1 * R2; + + // Check keys + set expected = list_of(1); + EXPECT(expected == R3.keys()); +} + /* ************************************************************************* */ // Test with ternary function. Rot3 composeThree(const Rot3& R1, const Rot3& R2, const Rot3& R3, OptionalJacobian<3, 3> H1, diff --git a/gtsam/nonlinear/tests/testSerializationNonlinear.cpp b/gtsam/nonlinear/tests/testSerializationNonlinear.cpp index f4bb5f4f6..4a73cbb0b 100644 --- a/gtsam/nonlinear/tests/testSerializationNonlinear.cpp +++ b/gtsam/nonlinear/tests/testSerializationNonlinear.cpp @@ -35,37 +35,37 @@ using namespace gtsam::serializationTestHelpers; /* ************************************************************************* */ // Create GUIDs for Noisemodels -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,"gtsam_noiseModel_Isotropic"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,"gtsam_noiseModel_Isotropic") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal") /* ************************************************************************* */ // Create GUIDs for factors -BOOST_CLASS_EXPORT_GUID(gtsam::PriorFactor, "gtsam::PriorFactor"); -BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor"); -BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor"); -BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional"); +BOOST_CLASS_EXPORT_GUID(gtsam::PriorFactor, "gtsam::PriorFactor") +BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor") +BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor") +BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional") /* ************************************************************************* */ // Export all classes derived from Value -GTSAM_VALUE_EXPORT(gtsam::Cal3_S2); -GTSAM_VALUE_EXPORT(gtsam::Cal3Bundler); -GTSAM_VALUE_EXPORT(gtsam::Point3); -GTSAM_VALUE_EXPORT(gtsam::Pose3); -GTSAM_VALUE_EXPORT(gtsam::Rot3); -GTSAM_VALUE_EXPORT(gtsam::PinholeCamera); -GTSAM_VALUE_EXPORT(gtsam::PinholeCamera); -GTSAM_VALUE_EXPORT(gtsam::PinholeCamera); +GTSAM_VALUE_EXPORT(gtsam::Cal3_S2) +GTSAM_VALUE_EXPORT(gtsam::Cal3Bundler) +GTSAM_VALUE_EXPORT(gtsam::Point3) +GTSAM_VALUE_EXPORT(gtsam::Pose3) +GTSAM_VALUE_EXPORT(gtsam::Rot3) +GTSAM_VALUE_EXPORT(gtsam::PinholeCamera) +GTSAM_VALUE_EXPORT(gtsam::PinholeCamera) +GTSAM_VALUE_EXPORT(gtsam::PinholeCamera) namespace detail { template struct pack { diff --git a/gtsam/slam/BetweenFactor.h b/gtsam/slam/BetweenFactor.h index aef41d5fd..8a1ffdd72 100644 --- a/gtsam/slam/BetweenFactor.h +++ b/gtsam/slam/BetweenFactor.h @@ -103,7 +103,7 @@ namespace gtsam { boost::none, boost::optional H2 = boost::none) const override { T hx = traits::Between(p1, p2, H1, H2); // h(x) // manifold equivalent of h(x)-z -> log(z,h(x)) -#ifdef SLOW_BUT_CORRECT_BETWEENFACTOR +#ifdef GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR typename traits::ChartJacobian::Jacobian Hlocal; Vector rval = traits::Local(measured_, hx, boost::none, (H1 || H2) ? &Hlocal : 0); if (H1) *H1 = Hlocal * (*H1); diff --git a/gtsam/slam/GeneralSFMFactor.h b/gtsam/slam/GeneralSFMFactor.h index 2e4543177..bfc3a0f78 100644 --- a/gtsam/slam/GeneralSFMFactor.h +++ b/gtsam/slam/GeneralSFMFactor.h @@ -59,8 +59,8 @@ namespace gtsam { template class GeneralSFMFactor: public NoiseModelFactor2 { - GTSAM_CONCEPT_MANIFOLD_TYPE(CAMERA); - GTSAM_CONCEPT_MANIFOLD_TYPE(LANDMARK); + GTSAM_CONCEPT_MANIFOLD_TYPE(CAMERA) + GTSAM_CONCEPT_MANIFOLD_TYPE(LANDMARK) static const int DimC = FixedDimension::value; static const int DimL = FixedDimension::value; @@ -202,7 +202,7 @@ struct traits > : Testable< template class GeneralSFMFactor2: public NoiseModelFactor3 { - GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION); + GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION) static const int DimK = FixedDimension::value; protected: diff --git a/gtsam/slam/JacobianFactorSVD.h b/gtsam/slam/JacobianFactorSVD.h index bc906d24e..f6bc1dd8c 100644 --- a/gtsam/slam/JacobianFactorSVD.h +++ b/gtsam/slam/JacobianFactorSVD.h @@ -9,20 +9,21 @@ namespace gtsam { /** - * JacobianFactor for Schur complement that uses the "Nullspace Trick" by Mourikis + * JacobianFactor for Schur complement that uses the "Nullspace Trick" by + * Mourikis et al. * * This trick is equivalent to the Schur complement, but can be faster. - * In essence, the linear factor |E*dp + F*dX - b|, where p is point and X are poses, - * is multiplied by Enull, a matrix that spans the left nullspace of E, i.e., - * The mx3 matrix is analyzed with SVD as E = [Erange Enull]*S*V (mxm * mx3 * 3x3) - * where Enull is an m x (m-3) matrix - * Then Enull'*E*dp = 0, and + * In essence, the linear factor |E*dp + F*dX - b|, where p is point and X are + * poses, is multiplied by Enull, a matrix that spans the left nullspace of E, + * i.e., The mx3 matrix is analyzed with SVD as E = [Erange Enull]*S*V (mxm * + * mx3 * 3x3) where Enull is an m x (m-3) matrix Then Enull'*E*dp = 0, and * |Enull'*E*dp + Enull'*F*dX - Enull'*b| == |Enull'*F*dX - Enull'*b| * Normally F is m x 6*numKeys, and Enull'*F yields an (m-3) x 6*numKeys matrix. * - * The code below assumes that F is block diagonal and is given as a vector of ZDim*D blocks. - * Example: m = 4 (2 measurements), Enull = 4*1, F = 4*12 (for D=6) - * Then Enull'*F = 1*4 * 4*12 = 1*12, but each 1*6 piece can be computed as a 1x2 * 2x6 mult + * The code below assumes that F is block diagonal and is given as a vector of + * ZDim*D blocks. Example: m = 4 (2 measurements), Enull = 4*1, F = 4*12 (for + * D=6) Then Enull'*F = 1*4 * 4*12 = 1*12, but each 1*6 piece can be computed as + * a 1x2 * 2x6 multiplication. */ template class JacobianFactorSVD: public RegularJacobianFactor { @@ -37,10 +38,10 @@ public: JacobianFactorSVD() { } - /// Empty constructor with keys - JacobianFactorSVD(const KeyVector& keys, // - const SharedDiagonal& model = SharedDiagonal()) : - Base() { + /// Empty constructor with keys. + JacobianFactorSVD(const KeyVector& keys, + const SharedDiagonal& model = SharedDiagonal()) + : Base() { Matrix zeroMatrix = Matrix::Zero(0, D); Vector zeroVector = Vector::Zero(0); std::vector QF; @@ -51,18 +52,21 @@ public: } /** - * @brief Constructor - * Takes the CameraSet derivatives (as ZDim*D blocks of block-diagonal F) - * and a reduced point derivative, Enull - * and creates a reduced-rank Jacobian factor on the CameraSet + * @brief Construct a new JacobianFactorSVD object, createing a reduced-rank + * Jacobian factor on the CameraSet. * - * @Fblocks: + * @param keys keys associated with F blocks. + * @param Fblocks CameraSet derivatives, ZDim*D blocks of block-diagonal F + * @param Enull a reduced point derivative + * @param b right-hand side + * @param model noise model */ - JacobianFactorSVD(const KeyVector& keys, - const std::vector >& Fblocks, const Matrix& Enull, - const Vector& b, // - const SharedDiagonal& model = SharedDiagonal()) : - Base() { + JacobianFactorSVD( + const KeyVector& keys, + const std::vector >& Fblocks, + const Matrix& Enull, const Vector& b, + const SharedDiagonal& model = SharedDiagonal()) + : Base() { size_t numKeys = Enull.rows() / ZDim; size_t m2 = ZDim * numKeys - 3; // TODO: is this not just Enull.rows()? // PLAIN nullptr SPACE TRICK @@ -74,9 +78,8 @@ public: QF.reserve(numKeys); for (size_t k = 0; k < Fblocks.size(); ++k) { Key key = keys[k]; - QF.push_back( - KeyMatrix(key, - (Enull.transpose()).block(0, ZDim * k, m2, ZDim) * Fblocks[k])); + QF.emplace_back( + key, (Enull.transpose()).block(0, ZDim * k, m2, ZDim) * Fblocks[k]); } JacobianFactor::fillTerms(QF, Enull.transpose() * b, model); } diff --git a/gtsam/slam/ProjectionFactor.h b/gtsam/slam/ProjectionFactor.h index ada304f27..42dba8bd0 100644 --- a/gtsam/slam/ProjectionFactor.h +++ b/gtsam/slam/ProjectionFactor.h @@ -11,7 +11,7 @@ /** * @file ProjectionFactor.h - * @brief Basic bearing factor from 2D measurement + * @brief Reprojection of a LANDMARK to a 2D point. * @author Chris Beall * @author Richard Roberts * @author Frank Dellaert @@ -22,17 +22,21 @@ #include #include +#include +#include #include #include namespace gtsam { /** - * Non-linear factor for a constraint derived from a 2D measurement. The calibration is known here. - * i.e. the main building block for visual SLAM. + * Non-linear factor for a constraint derived from a 2D measurement. + * The calibration is known here. + * The main building block for visual SLAM. * @addtogroup SLAM */ - template + template class GenericProjectionFactor: public NoiseModelFactor2 { protected: @@ -57,9 +61,9 @@ namespace gtsam { typedef boost::shared_ptr shared_ptr; /// Default constructor - GenericProjectionFactor() : - measured_(0, 0), throwCheirality_(false), verboseCheirality_(false) { - } + GenericProjectionFactor() : + measured_(0, 0), throwCheirality_(false), verboseCheirality_(false) { + } /** * Constructor diff --git a/gtsam/slam/README.md b/gtsam/slam/README.md new file mode 100644 index 000000000..ae5edfdac --- /dev/null +++ b/gtsam/slam/README.md @@ -0,0 +1,68 @@ +# SLAM Factors + +## GenericProjectionFactor (defined in ProjectionFactor.h) + +Non-linear factor that minimizes the re-projection error with respect to a 2D measurement. +The calibration is assumed known and passed in the constructor. +The main building block for visual SLAM. + +Templated on +- `POSE`, default `Pose3` +- `LANDMARK`, default `Point3` +- `CALIBRATION`, default `Cal3_S2` + +## SmartFactors + +These are "structure-less" factors, i.e., rather than introducing a new variable for an observed 3D point or landmark, a single factor is created that provides a multi-view constraint on several poses and/or cameras. +While one typically adds multiple GenericProjectionFactors (one for each observation of a landmark), a SmartFactor collects all measurements for a landmark, i.e., the factor graph contains 1 smart factor per landmark. + +### SmartFactorBase + +This is the base class for smart factors, templated on a `CAMERA` type. +It has no internal point, but it saves the measurements, keeps a noise model, and an optional sensor pose. + +### SmartProjectionFactor + +Also templated on `CAMERA`. Triangulates a 3D point and keeps an estimate of it around. +This factor operates with monocular cameras, and is used to optimize the camera pose +*and* calibration for each camera, and requires variables of type `CAMERA` in values. + +If the calibration is fixed use `SmartProjectionPoseFactor` instead! + + +### SmartProjectionPoseFactor + +Derives from `SmartProjectionFactor` but is templated on a `CALIBRATION` type, setting `CAMERA = PinholePose`. +This factor assumes that the camera calibration is fixed and the same for all cameras involved in this factor. +The factor only constrains poses. + +If the calibration should be optimized, as well, use `SmartProjectionFactor` instead! + +### SmartProjectionRigFactor + +Same as `SmartProjectionPoseFactor`, except: +- it is templated on `CAMERA`, i.e., it allows cameras beyond pinhole; +- it allows measurements from multiple cameras, each camera with fixed but potentially different intrinsics and extrinsics; +- it allows multiple observations from the same pose/key, again, to model a multi-camera system. + +## Linearized Smart Factors + +The factors below are less likely to be relevant to the user, but result from using the non-linear smart factors above. + + +### RegularImplicitSchurFactor + +A specialization of a GaussianFactor to structure-less SFM, which is very fast in a conjugate gradient (CG) solver. +It is produced by calling `createRegularImplicitSchurFactor` in `SmartFactorBase` or `SmartProjectionFactor`. + +### JacobianFactorQ + +A RegularJacobianFactor that uses some badly documented reduction on the Jacobians. + +### JacobianFactorQR + +A RegularJacobianFactor that eliminates a point using sequential elimination. + +### JacobianFactorQR + +A RegularJacobianFactor that uses the "Nullspace Trick" by Mourikis et al. See the documentation in the file, which *is* well documented. \ No newline at end of file diff --git a/gtsam/slam/RegularImplicitSchurFactor.h b/gtsam/slam/RegularImplicitSchurFactor.h index 2ed6aa491..b4a341719 100644 --- a/gtsam/slam/RegularImplicitSchurFactor.h +++ b/gtsam/slam/RegularImplicitSchurFactor.h @@ -1,6 +1,6 @@ /** * @file RegularImplicitSchurFactor.h - * @brief A new type of linear factor (GaussianFactor), which is subclass of GaussianFactor + * @brief A subclass of GaussianFactor specialized to structureless SFM. * @author Frank Dellaert * @author Luca Carlone */ @@ -20,6 +20,20 @@ namespace gtsam { /** * RegularImplicitSchurFactor + * + * A specialization of a GaussianFactor to structure-less SFM, which is very + * fast in a conjugate gradient (CG) solver. Specifically, as measured in + * timeSchurFactors.cpp, it stays very fast for an increasing number of cameras. + * The magic is in multiplyHessianAdd, which does the Hessian-vector multiply at + * the core of CG, and implements + * y += F'*alpha*(I - E*P*E')*F*x + * where + * - F is the 2mx6m Jacobian of the m 2D measurements wrpt m 6DOF poses + * - E is the 2mx3 Jacabian of the m 2D measurements wrpt a 3D point + * - P is the covariance on the point + * The equation above implicitly executes the Schur complement by removing the + * information E*P*E' from the Hessian. It is also very fast as we do not use + * the full 6m*6m F matrix, but rather only it's m 6x6 diagonal blocks. */ template class RegularImplicitSchurFactor: public GaussianFactor { @@ -38,9 +52,10 @@ protected: static const int ZDim = traits::dimension; ///< Measurement dimension typedef Eigen::Matrix MatrixZD; ///< type of an F block - typedef Eigen::Matrix MatrixDD; ///< camera hessian + typedef Eigen::Matrix MatrixDD; ///< camera Hessian + typedef std::vector > FBlocks; - const std::vector > FBlocks_; ///< All ZDim*D F blocks (one for each camera) + FBlocks FBlocks_; ///< All ZDim*D F blocks (one for each camera) const Matrix PointCovariance_; ///< the 3*3 matrix P = inv(E'E) (2*2 if degenerate) const Matrix E_; ///< The 2m*3 E Jacobian with respect to the point const Vector b_; ///< 2m-dimensional RHS vector @@ -52,17 +67,25 @@ public: } /// Construct from blocks of F, E, inv(E'*E), and RHS vector b - RegularImplicitSchurFactor(const KeyVector& keys, - const std::vector >& FBlocks, const Matrix& E, const Matrix& P, - const Vector& b) : - GaussianFactor(keys), FBlocks_(FBlocks), PointCovariance_(P), E_(E), b_(b) { - } + + /** + * @brief Construct a new RegularImplicitSchurFactor object. + * + * @param keys keys corresponding to cameras + * @param Fs All ZDim*D F blocks (one for each camera) + * @param E Jacobian of measurements wrpt point. + * @param P point covariance matrix + * @param b RHS vector + */ + RegularImplicitSchurFactor(const KeyVector& keys, const FBlocks& Fs, + const Matrix& E, const Matrix& P, const Vector& b) + : GaussianFactor(keys), FBlocks_(Fs), PointCovariance_(P), E_(E), b_(b) {} /// Destructor ~RegularImplicitSchurFactor() override { } - std::vector >& FBlocks() const { + const FBlocks& Fs() const { return FBlocks_; } diff --git a/gtsam/slam/SmartFactorBase.h b/gtsam/slam/SmartFactorBase.h index 380283141..ddf56b289 100644 --- a/gtsam/slam/SmartFactorBase.h +++ b/gtsam/slam/SmartFactorBase.h @@ -37,12 +37,14 @@ namespace gtsam { /** - * @brief Base class for smart factors + * @brief Base class for smart factors. * This base class has no internal point, but it has a measurement, noise model * and an optional sensor pose. - * This class mainly computes the derivatives and returns them as a variety of factors. - * The methods take a Cameras argument, which should behave like PinholeCamera, and - * the value of a point, which is kept in the base class. + * This class mainly computes the derivatives and returns them as a variety of + * factors. The methods take a CameraSet argument and the value of a + * point, which is kept in the derived class. + * + * @tparam CAMERA should behave like a PinholeCamera. */ template class SmartFactorBase: public NonlinearFactor { @@ -64,19 +66,20 @@ protected: /** * As of Feb 22, 2015, the noise model is the same for all measurements and * is isotropic. This allows for moving most calculations of Schur complement - * etc to be moved to CameraSet very easily, and also agrees pragmatically + * etc. to be easily moved to CameraSet, and also agrees pragmatically * with what is normally done. */ SharedIsotropic noiseModel_; /** - * 2D measurement and noise model for each of the m views - * We keep a copy of measurements for I/O and computing the error. + * Measurements for each of the m views. + * We keep a copy of the measurements for I/O and computing the error. * The order is kept the same as the keys that we use to create the factor. */ ZVector measured_; - boost::optional body_P_sensor_; ///< Pose of the camera in the body frame + boost::optional + body_P_sensor_; ///< Pose of the camera in the body frame // Cache for Fblocks, to avoid a malloc ever time we re-linearize mutable FBlocks Fs; @@ -84,16 +87,16 @@ protected: public: GTSAM_MAKE_ALIGNED_OPERATOR_NEW - /// shorthand for a smart pointer to a factor + /// shorthand for a smart pointer to a factor. typedef boost::shared_ptr shared_ptr; - /// We use the new CameraSte data structure to refer to a set of cameras + /// The CameraSet data structure is used to refer to a set of cameras. typedef CameraSet Cameras; - /// Default Constructor, for serialization + /// Default Constructor, for serialization. SmartFactorBase() {} - /// Constructor + /// Construct with given noise model and optional arguments. SmartFactorBase(const SharedNoiseModel& sharedNoiseModel, boost::optional body_P_sensor = boost::none, size_t expectedNumberCameras = 10) @@ -111,12 +114,12 @@ protected: noiseModel_ = sharedIsotropic; } - /// Virtual destructor, subclasses from NonlinearFactor + /// Virtual destructor, subclasses from NonlinearFactor. ~SmartFactorBase() override { } /** - * Add a new measurement and pose/camera key + * Add a new measurement and pose/camera key. * @param measured is the 2m dimensional projection of a single landmark * @param key is the index corresponding to the camera observing the landmark */ @@ -129,9 +132,7 @@ protected: this->keys_.push_back(key); } - /** - * Add a bunch of measurements, together with the camera keys - */ + /// Add a bunch of measurements, together with the camera keys. void add(const ZVector& measurements, const KeyVector& cameraKeys) { assert(measurements.size() == cameraKeys.size()); for (size_t i = 0; i < measurements.size(); i++) { @@ -140,8 +141,8 @@ protected: } /** - * Adds an entire SfM_track (collection of cameras observing a single point). - * The noise is assumed to be the same for all measurements + * Add an entire SfM_track (collection of cameras observing a single point). + * The noise is assumed to be the same for all measurements. */ template void add(const SFM_TRACK& trackToAdd) { @@ -151,17 +152,13 @@ protected: } } - /// get the dimension (number of rows!) of the factor - size_t dim() const override { - return ZDim * this->measured_.size(); - } + /// Return the dimension (number of rows!) of the factor. + size_t dim() const override { return ZDim * this->measured_.size(); } - /** return the measurements */ - const ZVector& measured() const { - return measured_; - } + /// Return the 2D measurements (ZDim, in general). + const ZVector& measured() const { return measured_; } - /// Collect all cameras: important that in key order + /// Collect all cameras: important that in key order. virtual Cameras cameras(const Values& values) const { Cameras cameras; for(const Key& k: this->keys_) @@ -188,25 +185,30 @@ protected: /// equals bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { - const This *e = dynamic_cast(&p); - - bool areMeasurementsEqual = true; - for (size_t i = 0; i < measured_.size(); i++) { - if (traits::Equals(this->measured_.at(i), e->measured_.at(i), tol) == false) - areMeasurementsEqual = false; - break; + if (const This* e = dynamic_cast(&p)) { + // Check that all measurements are the same. + for (size_t i = 0; i < measured_.size(); i++) { + if (!traits::Equals(this->measured_.at(i), e->measured_.at(i), tol)) + return false; + } + // If so, check base class. + return Base::equals(p, tol); + } else { + return false; } - return e && Base::equals(p, tol) && areMeasurementsEqual; } /// Compute reprojection errors [h(x)-z] = [cameras.project(p)-z] and - /// derivatives + /// derivatives. This is the error before the noise model is applied. template Vector unwhitenedError( const Cameras& cameras, const POINT& point, boost::optional Fs = boost::none, // boost::optional E = boost::none) const { - Vector ue = cameras.reprojectionError(point, measured_, Fs, E); + // Reproject, with optional derivatives. + Vector error = cameras.reprojectionError(point, measured_, Fs, E); + + // Apply chain rule if body_P_sensor_ is given. if (body_P_sensor_ && Fs) { const Pose3 sensor_P_body = body_P_sensor_->inverse(); constexpr int camera_dim = traits::dimension; @@ -224,52 +226,60 @@ protected: Fs->at(i) = Fs->at(i) * J; } } - correctForMissingMeasurements(cameras, ue, Fs, E); - return ue; + + // Correct the Jacobians in case some measurements are missing. + correctForMissingMeasurements(cameras, error, Fs, E); + + return error; } /** - * This corrects the Jacobians for the case in which some pixel measurement is missing (nan) - * In practice, this does not do anything in the monocular case, but it is implemented in the stereo version + * This corrects the Jacobians for the case in which some 2D measurement is + * missing (nan). In practice, this does not do anything in the monocular + * case, but it is implemented in the stereo version. */ - virtual void correctForMissingMeasurements(const Cameras& cameras, Vector& ue, boost::optional Fs = boost::none, - boost::optional E = boost::none) const {} + virtual void correctForMissingMeasurements( + const Cameras& cameras, Vector& ue, + boost::optional Fs = boost::none, + boost::optional E = boost::none) const {} /** - * Calculate vector of re-projection errors [h(x)-z] = [cameras.project(p) - z] - * Noise model applied + * Calculate vector of re-projection errors [h(x)-z] = [cameras.project(p) - + * z], with the noise model applied. */ template Vector whitenedError(const Cameras& cameras, const POINT& point) const { - Vector e = cameras.reprojectionError(point, measured_); + Vector error = cameras.reprojectionError(point, measured_); if (noiseModel_) - noiseModel_->whitenInPlace(e); - return e; + noiseModel_->whitenInPlace(error); + return error; } - /** Calculate the error of the factor. - * This is the log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/\sigma^2 \f$ in case of Gaussian. - * In this class, we take the raw prediction error \f$ h(x)-z \f$, ask the noise model - * to transform it to \f$ (h(x)-z)^2/\sigma^2 \f$, and then multiply by 0.5. - * Will be used in "error(Values)" function required by NonlinearFactor base class + /** + * Calculate the error of the factor. + * This is the log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/\sigma^2 \f$ in case of + * Gaussian. In this class, we take the raw prediction error \f$ h(x)-z \f$, + * ask the noise model to transform it to \f$ (h(x)-z)^2/\sigma^2 \f$, and + * then multiply by 0.5. Will be used in "error(Values)" function required by + * NonlinearFactor base class */ template double totalReprojectionError(const Cameras& cameras, const POINT& point) const { - Vector e = whitenedError(cameras, point); - return 0.5 * e.dot(e); + Vector error = whitenedError(cameras, point); + return 0.5 * error.dot(error); } - /// Computes Point Covariance P from E - static Matrix PointCov(Matrix& E) { + /// Computes Point Covariance P from the "point Jacobian" E. + static Matrix PointCov(const Matrix& E) { return (E.transpose() * E).inverse(); } /** - * Compute F, E, and b (called below in both vanilla and SVD versions), where - * F is a vector of derivatives wrpt the cameras, and E the stacked derivatives - * with respect to the point. The value of cameras/point are passed as parameters. - * TODO: Kill this obsolete method + * Compute F, E, and b (called below in both vanilla and SVD versions), where + * F is a vector of derivatives wrpt the cameras, and E the stacked + * derivatives with respect to the point. The value of cameras/point are + * passed as parameters. */ template void computeJacobians(FBlocks& Fs, Matrix& E, Vector& b, @@ -281,7 +291,11 @@ protected: b = -unwhitenedError(cameras, point, Fs, E); } - /// SVD version + /** + * SVD version that produces smaller Jacobian matrices by doing an SVD + * decomposition on E, and returning the left nulkl-space of E. + * See JacobianFactorSVD for more documentation. + * */ template void computeJacobiansSVD(FBlocks& Fs, Matrix& Enull, Vector& b, const Cameras& cameras, const POINT& point) const { @@ -291,14 +305,14 @@ protected: static const int N = FixedDimension::value; // 2 (Unit3) or 3 (Point3) - // Do SVD on A + // Do SVD on A. Eigen::JacobiSVD svd(E, Eigen::ComputeFullU); - Vector s = svd.singularValues(); size_t m = this->keys_.size(); Enull = svd.matrixU().block(0, N, ZDim * m, ZDim * m - N); // last ZDim*m-N columns } - /// Linearize to a Hessianfactor + /// Linearize to a Hessianfactor. + // TODO(dellaert): Not used/tested anywhere and not properly whitened. boost::shared_ptr > createHessianFactor( const Cameras& cameras, const Point3& point, const double lambda = 0.0, bool diagonalDamping = false) const { @@ -351,9 +365,7 @@ protected: P, b); } - /** - * Return Jacobians as JacobianFactorQ - */ + /// Return Jacobians as JacobianFactorQ. boost::shared_ptr > createJacobianQFactor( const Cameras& cameras, const Point3& point, double lambda = 0.0, bool diagonalDamping = false) const { @@ -368,8 +380,8 @@ protected: } /** - * Return Jacobians as JacobianFactorSVD - * TODO lambda is currently ignored + * Return Jacobians as JacobianFactorSVD. + * TODO(dellaert): lambda is currently ignored */ boost::shared_ptr createJacobianSVDFactor( const Cameras& cameras, const Point3& point, double lambda = 0.0) const { @@ -393,7 +405,7 @@ protected: F.block(ZDim * i, Dim * i) = Fs.at(i); } - + // Return sensor pose. Pose3 body_P_sensor() const{ if(body_P_sensor_) return *body_P_sensor_; diff --git a/gtsam/slam/SmartProjectionFactor.h b/gtsam/slam/SmartProjectionFactor.h index f67ca0740..f9c101cb8 100644 --- a/gtsam/slam/SmartProjectionFactor.h +++ b/gtsam/slam/SmartProjectionFactor.h @@ -61,15 +61,17 @@ protected: /// @name Caching triangulation /// @{ mutable TriangulationResult result_; ///< result from triangulateSafe - mutable std::vector > cameraPosesTriangulation_; ///< current triangulation poses + mutable std::vector > + cameraPosesTriangulation_; ///< current triangulation poses /// @} -public: + public: /// shorthand for a smart pointer to a factor typedef boost::shared_ptr shared_ptr; /// shorthand for a set of cameras + typedef CAMERA Camera; typedef CameraSet Cameras; /** @@ -116,21 +118,31 @@ public: && Base::equals(p, tol); } - /// Check if the new linearization point is the same as the one used for previous triangulation + /** + * @brief Check if the new linearization point is the same as the one used for + * previous triangulation. + * + * @param cameras + * @return true if we need to re-triangulate. + */ bool decideIfTriangulate(const Cameras& cameras) const { - // several calls to linearize will be done from the same linearization point, hence it is not needed to re-triangulate - // Note that this is not yet "selecting linearization", that will come later, and we only check if the - // current linearization is the "same" (up to tolerance) w.r.t. the last time we triangulated the point + // Several calls to linearize will be done from the same linearization + // point, hence it is not needed to re-triangulate. Note that this is not + // yet "selecting linearization", that will come later, and we only check if + // the current linearization is the "same" (up to tolerance) w.r.t. the last + // time we triangulated the point. size_t m = cameras.size(); bool retriangulate = false; - // if we do not have a previous linearization point or the new linearization point includes more poses + // Definitely true if we do not have a previous linearization point or the + // new linearization point includes more poses. if (cameraPosesTriangulation_.empty() || cameras.size() != cameraPosesTriangulation_.size()) retriangulate = true; + // Otherwise, check poses against cache. if (!retriangulate) { for (size_t i = 0; i < cameras.size(); i++) { if (!cameras[i].pose().equals(cameraPosesTriangulation_[i], @@ -141,7 +153,8 @@ public: } } - if (retriangulate) { // we store the current poses used for triangulation + // Store the current poses used for triangulation if we will re-triangulate. + if (retriangulate) { cameraPosesTriangulation_.clear(); cameraPosesTriangulation_.reserve(m); for (size_t i = 0; i < m; i++) @@ -149,10 +162,15 @@ public: cameraPosesTriangulation_.push_back(cameras[i].pose()); } - return retriangulate; // if we arrive to this point all poses are the same and we don't need re-triangulation + return retriangulate; } - /// triangulateSafe + /** + * @brief Call gtsam::triangulateSafe iff we need to re-triangulate. + * + * @param cameras + * @return TriangulationResult + */ TriangulationResult triangulateSafe(const Cameras& cameras) const { size_t m = cameras.size(); @@ -166,17 +184,21 @@ public: return result_; } - /// triangulate + /** + * @brief Possibly re-triangulate before calculating Jacobians. + * + * @param cameras + * @return true if we could safely triangulate + */ bool triangulateForLinearize(const Cameras& cameras) const { triangulateSafe(cameras); // imperative, might reset result_ return bool(result_); } - /// linearize returns a Hessianfactor that is an approximation of error(p) + /// Create a Hessianfactor that is an approximation of error(p). boost::shared_ptr > createHessianFactor( - const Cameras& cameras, const double lambda = 0.0, bool diagonalDamping = - false) const { - + const Cameras& cameras, const double lambda = 0.0, + bool diagonalDamping = false) const { size_t numKeys = this->keys_.size(); // Create structures for Hessian Factors KeyVector js; @@ -184,39 +206,38 @@ public: std::vector gs(numKeys); if (this->measured_.size() != cameras.size()) - throw std::runtime_error("SmartProjectionHessianFactor: this->measured_" - ".size() inconsistent with input"); + throw std::runtime_error( + "SmartProjectionHessianFactor: this->measured_" + ".size() inconsistent with input"); triangulateSafe(cameras); if (params_.degeneracyMode == ZERO_ON_DEGENERACY && !result_) { // failed: return"empty" Hessian - for(Matrix& m: Gs) - m = Matrix::Zero(Base::Dim, Base::Dim); - for(Vector& v: gs) - v = Vector::Zero(Base::Dim); + for (Matrix& m : Gs) m = Matrix::Zero(Base::Dim, Base::Dim); + for (Vector& v : gs) v = Vector::Zero(Base::Dim); return boost::make_shared >(this->keys_, - Gs, gs, 0.0); + Gs, gs, 0.0); } // Jacobian could be 3D Point3 OR 2D Unit3, difference is E.cols(). - std::vector > Fblocks; + typename Base::FBlocks Fs; Matrix E; Vector b; - computeJacobiansWithTriangulatedPoint(Fblocks, E, b, cameras); + computeJacobiansWithTriangulatedPoint(Fs, E, b, cameras); // Whiten using noise model - Base::whitenJacobians(Fblocks, E, b); + Base::whitenJacobians(Fs, E, b); // build augmented hessian - SymmetricBlockMatrix augmentedHessian = // - Cameras::SchurComplement(Fblocks, E, b, lambda, diagonalDamping); + SymmetricBlockMatrix augmentedHessian = // + Cameras::SchurComplement(Fs, E, b, lambda, diagonalDamping); - return boost::make_shared >(this->keys_, - augmentedHessian); + return boost::make_shared >( + this->keys_, augmentedHessian); } - // create factor + // Create RegularImplicitSchurFactor factor. boost::shared_ptr > createRegularImplicitSchurFactor( const Cameras& cameras, double lambda) const { if (triangulateForLinearize(cameras)) @@ -226,7 +247,7 @@ public: return boost::shared_ptr >(); } - /// create factor + /// Create JacobianFactorQ factor. boost::shared_ptr > createJacobianQFactor( const Cameras& cameras, double lambda) const { if (triangulateForLinearize(cameras)) @@ -236,13 +257,13 @@ public: return boost::make_shared >(this->keys_); } - /// Create a factor, takes values + /// Create JacobianFactorQ factor, takes values. boost::shared_ptr > createJacobianQFactor( const Values& values, double lambda) const { return createJacobianQFactor(this->cameras(values), lambda); } - /// different (faster) way to compute Jacobian factor + /// Different (faster) way to compute a JacobianFactorSVD factor. boost::shared_ptr createJacobianSVDFactor( const Cameras& cameras, double lambda) const { if (triangulateForLinearize(cameras)) @@ -252,19 +273,19 @@ public: return boost::make_shared >(this->keys_); } - /// linearize to a Hessianfactor + /// Linearize to a Hessianfactor. virtual boost::shared_ptr > linearizeToHessian( const Values& values, double lambda = 0.0) const { return createHessianFactor(this->cameras(values), lambda); } - /// linearize to an Implicit Schur factor + /// Linearize to an Implicit Schur factor. virtual boost::shared_ptr > linearizeToImplicit( const Values& values, double lambda = 0.0) const { return createRegularImplicitSchurFactor(this->cameras(values), lambda); } - /// linearize to a JacobianfactorQ + /// Linearize to a JacobianfactorQ. virtual boost::shared_ptr > linearizeToJacobian( const Values& values, double lambda = 0.0) const { return createJacobianQFactor(this->cameras(values), lambda); @@ -334,7 +355,7 @@ public: /// Assumes the point has been computed /// Note E can be 2m*3 or 2m*2, in case point is degenerate void computeJacobiansWithTriangulatedPoint( - std::vector >& Fblocks, Matrix& E, Vector& b, + typename Base::FBlocks& Fs, Matrix& E, Vector& b, const Cameras& cameras) const { if (!result_) { @@ -342,32 +363,32 @@ public: // TODO check flag whether we should do this Unit3 backProjected = cameras[0].backprojectPointAtInfinity( this->measured_.at(0)); - Base::computeJacobians(Fblocks, E, b, cameras, backProjected); + Base::computeJacobians(Fs, E, b, cameras, backProjected); } else { // valid result: just return Base version - Base::computeJacobians(Fblocks, E, b, cameras, *result_); + Base::computeJacobians(Fs, E, b, cameras, *result_); } } /// Version that takes values, and creates the point bool triangulateAndComputeJacobians( - std::vector >& Fblocks, Matrix& E, Vector& b, + typename Base::FBlocks& Fs, Matrix& E, Vector& b, const Values& values) const { Cameras cameras = this->cameras(values); bool nonDegenerate = triangulateForLinearize(cameras); if (nonDegenerate) - computeJacobiansWithTriangulatedPoint(Fblocks, E, b, cameras); + computeJacobiansWithTriangulatedPoint(Fs, E, b, cameras); return nonDegenerate; } /// takes values bool triangulateAndComputeJacobiansSVD( - std::vector >& Fblocks, Matrix& Enull, Vector& b, + typename Base::FBlocks& Fs, Matrix& Enull, Vector& b, const Values& values) const { Cameras cameras = this->cameras(values); bool nonDegenerate = triangulateForLinearize(cameras); if (nonDegenerate) - Base::computeJacobiansSVD(Fblocks, Enull, b, cameras, *result_); + Base::computeJacobiansSVD(Fs, Enull, b, cameras, *result_); return nonDegenerate; } diff --git a/gtsam/slam/SmartProjectionRigFactor.h b/gtsam/slam/SmartProjectionRigFactor.h new file mode 100644 index 000000000..149c12928 --- /dev/null +++ b/gtsam/slam/SmartProjectionRigFactor.h @@ -0,0 +1,372 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file SmartProjectionRigFactor.h + * @brief Smart factor on poses, assuming camera calibration is fixed. + * Same as SmartProjectionPoseFactor, except: + * - it is templated on CAMERA (i.e., it allows cameras beyond pinhole) + * - it admits a different calibration for each measurement (i.e., it + * can model a multi-camera rig system) + * - it allows multiple observations from the same pose/key (again, to + * model a multi-camera system) + * @author Luca Carlone + * @author Frank Dellaert + */ + +#pragma once + +#include + +namespace gtsam { +/** + * + * @addtogroup SLAM + * + * If you are using the factor, please cite: + * L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert, Eliminating + * conditionally independent sets in factor graphs: a unifying perspective based + * on smart factors, Int. Conf. on Robotics and Automation (ICRA), 2014. + */ + +/** + * This factor assumes that camera calibration is fixed (but each measurement + * can be taken by a different camera in the rig, hence can have a different + * extrinsic and intrinsic calibration). The factor only constrains poses + * (variable dimension is 6 for each pose). This factor requires that values + * contains the involved poses (Pose3). If all measurements share the same + * calibration (i.e., are from the same camera), use SmartProjectionPoseFactor + * instead! If the calibration should be optimized, as well, use + * SmartProjectionFactor instead! + * @addtogroup SLAM + */ +template +class SmartProjectionRigFactor : public SmartProjectionFactor { + private: + typedef SmartProjectionFactor Base; + typedef SmartProjectionRigFactor This; + typedef typename CAMERA::CalibrationType CALIBRATION; + typedef typename CAMERA::Measurement MEASUREMENT; + typedef typename CAMERA::MeasurementVector MEASUREMENTS; + + static const int DimPose = 6; ///< Pose3 dimension + static const int ZDim = 2; ///< Measurement dimension + + protected: + /// vector of keys (one for each observation) with potentially repeated keys + KeyVector nonUniqueKeys_; + + /// cameras in the rig (fixed poses wrt body and intrinsics, for each camera) + boost::shared_ptr cameraRig_; + + /// vector of camera Ids (one for each observation, in the same order), + /// identifying which camera took the measurement + FastVector cameraIds_; + + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + typedef CAMERA Camera; + typedef CameraSet Cameras; + + /// shorthand for a smart pointer to a factor + typedef boost::shared_ptr shared_ptr; + + /// Default constructor, only for serialization + SmartProjectionRigFactor() {} + + /** + * Constructor + * @param sharedNoiseModel isotropic noise model for the 2D feature + * measurements + * @param cameraRig set of cameras (fixed poses wrt body and intrinsics) in + * the camera rig + * @param params parameters for the smart projection factors + */ + SmartProjectionRigFactor( + const SharedNoiseModel& sharedNoiseModel, + const boost::shared_ptr& cameraRig, + const SmartProjectionParams& params = SmartProjectionParams()) + : Base(sharedNoiseModel, params), cameraRig_(cameraRig) { + // throw exception if configuration is not supported by this factor + if (Base::params_.degeneracyMode != gtsam::ZERO_ON_DEGENERACY) + throw std::runtime_error( + "SmartProjectionRigFactor: " + "degeneracyMode must be set to ZERO_ON_DEGENERACY"); + if (Base::params_.linearizationMode != gtsam::HESSIAN) + throw std::runtime_error( + "SmartProjectionRigFactor: " + "linearizationMode must be set to HESSIAN"); + } + + /** Virtual destructor */ + ~SmartProjectionRigFactor() override = default; + + /** + * add a new measurement, corresponding to an observation from pose "poseKey" + * and taken from the camera in the rig identified by "cameraId" + * @param measured 2-dimensional location of the projection of a + * single landmark in a single view (the measurement) + * @param poseKey key corresponding to the body pose of the camera taking the + * measurement + * @param cameraId ID of the camera in the rig taking the measurement (default + * 0) + */ + void add(const MEASUREMENT& measured, const Key& poseKey, + const size_t& cameraId = 0) { + // store measurement and key + this->measured_.push_back(measured); + this->nonUniqueKeys_.push_back(poseKey); + + // also store keys in the keys_ vector: these keys are assumed to be + // unique, so we avoid duplicates here + if (std::find(this->keys_.begin(), this->keys_.end(), poseKey) == + this->keys_.end()) + this->keys_.push_back(poseKey); // add only unique keys + + // store id of the camera taking the measurement + cameraIds_.push_back(cameraId); + } + + /** + * Variant of the previous "add" function in which we include multiple + * measurements + * @param measurements vector of the 2m dimensional location of the projection + * of a single landmark in the m views (the measurements) + * @param poseKeys keys corresponding to the body poses of the cameras taking + * the measurements + * @param cameraIds IDs of the cameras in the rig taking each measurement + * (same order as the measurements) + */ + void add(const MEASUREMENTS& measurements, const KeyVector& poseKeys, + const FastVector& cameraIds = FastVector()) { + if (poseKeys.size() != measurements.size() || + (poseKeys.size() != cameraIds.size() && cameraIds.size() != 0)) { + throw std::runtime_error( + "SmartProjectionRigFactor: " + "trying to add inconsistent inputs"); + } + if (cameraIds.size() == 0 && cameraRig_->size() > 1) { + throw std::runtime_error( + "SmartProjectionRigFactor: " + "camera rig includes multiple camera " + "but add did not input cameraIds"); + } + for (size_t i = 0; i < measurements.size(); i++) { + add(measurements[i], poseKeys[i], + cameraIds.size() == 0 ? 0 : cameraIds[i]); + } + } + + /// return (for each observation) the (possibly non unique) keys involved in + /// the measurements + const KeyVector& nonUniqueKeys() const { return nonUniqueKeys_; } + + /// return the calibration object + const boost::shared_ptr& cameraRig() const { return cameraRig_; } + + /// return the calibration object + const FastVector& cameraIds() const { return cameraIds_; } + + /** + * print + * @param s optional string naming the factor + * @param keyFormatter optional formatter useful for printing Symbols + */ + void print( + const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { + std::cout << s << "SmartProjectionRigFactor: \n "; + for (size_t i = 0; i < nonUniqueKeys_.size(); i++) { + std::cout << "-- Measurement nr " << i << std::endl; + std::cout << "key: " << keyFormatter(nonUniqueKeys_[i]) << std::endl; + std::cout << "cameraId: " << cameraIds_[i] << std::endl; + (*cameraRig_)[cameraIds_[i]].print("camera in rig:\n"); + } + Base::print("", keyFormatter); + } + + /// equals + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { + const This* e = dynamic_cast(&p); + return e && Base::equals(p, tol) && nonUniqueKeys_ == e->nonUniqueKeys() && + cameraRig_->equals(*(e->cameraRig())) && + std::equal(cameraIds_.begin(), cameraIds_.end(), + e->cameraIds().begin()); + } + + /** + * Collect all cameras involved in this factor + * @param values Values structure which must contain body poses corresponding + * to keys involved in this factor + * @return vector of cameras + */ + typename Base::Cameras cameras(const Values& values) const override { + typename Base::Cameras cameras; + cameras.reserve(nonUniqueKeys_.size()); // preallocate + for (size_t i = 0; i < nonUniqueKeys_.size(); i++) { + const typename Base::Camera& camera_i = (*cameraRig_)[cameraIds_[i]]; + const Pose3 world_P_sensor_i = + values.at(nonUniqueKeys_[i]) // = world_P_body + * camera_i.pose(); // = body_P_cam_i + cameras.emplace_back(world_P_sensor_i, + make_shared( + camera_i.calibration())); + } + return cameras; + } + + /** + * error calculates the error of the factor. + */ + double error(const Values& values) const override { + if (this->active(values)) { + return this->totalReprojectionError(this->cameras(values)); + } else { // else of active flag + return 0.0; + } + } + + /** + * Compute jacobian F, E and error vector at a given linearization point + * @param values Values structure which must contain camera poses + * corresponding to keys involved in this factor + * @return Return arguments are the camera jacobians Fs (including the + * jacobian with respect to both body poses we interpolate from), the point + * Jacobian E, and the error vector b. Note that the jacobians are computed + * for a given point. + */ + void computeJacobiansWithTriangulatedPoint(typename Base::FBlocks& Fs, + Matrix& E, Vector& b, + const Cameras& cameras) const { + if (!this->result_) { + throw("computeJacobiansWithTriangulatedPoint"); + } else { // valid result: compute jacobians + b = -cameras.reprojectionError(*this->result_, this->measured_, Fs, E); + for (size_t i = 0; i < Fs.size(); i++) { + const Pose3& body_P_sensor = (*cameraRig_)[cameraIds_[i]].pose(); + const Pose3 world_P_body = cameras[i].pose() * body_P_sensor.inverse(); + Eigen::Matrix H; + world_P_body.compose(body_P_sensor, H); + Fs.at(i) = Fs.at(i) * H; + } + } + } + + /// linearize and return a Hessianfactor that is an approximation of error(p) + boost::shared_ptr > createHessianFactor( + const Values& values, const double& lambda = 0.0, + bool diagonalDamping = false) const { + // we may have multiple observation sharing the same keys (e.g., 2 cameras + // measuring from the same body pose), hence the number of unique keys may + // be smaller than nrMeasurements + size_t nrUniqueKeys = + this->keys_ + .size(); // note: by construction, keys_ only contains unique keys + + Cameras cameras = this->cameras(values); + + // Create structures for Hessian Factors + std::vector js; + std::vector Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2); + std::vector gs(nrUniqueKeys); + + if (this->measured_.size() != cameras.size()) // 1 observation per camera + throw std::runtime_error( + "SmartProjectionRigFactor: " + "measured_.size() inconsistent with input"); + + // triangulate 3D point at given linearization point + this->triangulateSafe(cameras); + + if (!this->result_) { // failed: return "empty/zero" Hessian + if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) { + for (Matrix& m : Gs) m = Matrix::Zero(DimPose, DimPose); + for (Vector& v : gs) v = Vector::Zero(DimPose); + return boost::make_shared >(this->keys_, + Gs, gs, 0.0); + } else { + throw std::runtime_error( + "SmartProjectionRigFactor: " + "only supported degeneracy mode is ZERO_ON_DEGENERACY"); + } + } + + // compute Jacobian given triangulated 3D Point + typename Base::FBlocks Fs; + Matrix E; + Vector b; + this->computeJacobiansWithTriangulatedPoint(Fs, E, b, cameras); + + // Whiten using noise model + this->noiseModel_->WhitenSystem(E, b); + for (size_t i = 0; i < Fs.size(); i++) { + Fs[i] = this->noiseModel_->Whiten(Fs[i]); + } + + const Matrix3 P = Base::Cameras::PointCov(E, lambda, diagonalDamping); + + // Build augmented Hessian (with last row/column being the information + // vector) Note: we need to get the augumented hessian wrt the unique keys + // in key_ + SymmetricBlockMatrix augmentedHessianUniqueKeys = + Base::Cameras::template SchurComplementAndRearrangeBlocks<3, 6, 6>( + Fs, E, P, b, nonUniqueKeys_, this->keys_); + + return boost::make_shared >( + this->keys_, augmentedHessianUniqueKeys); + } + + /** + * Linearize to Gaussian Factor (possibly adding a damping factor Lambda for + * LM) + * @param values Values structure which must contain camera poses and + * extrinsic pose for this factor + * @return a Gaussian factor + */ + boost::shared_ptr linearizeDamped( + const Values& values, const double& lambda = 0.0) const { + // depending on flag set on construction we may linearize to different + // linear factors + switch (this->params_.linearizationMode) { + case HESSIAN: + return this->createHessianFactor(values, lambda); + default: + throw std::runtime_error( + "SmartProjectionRigFactor: unknown linearization mode"); + } + } + + /// linearize + boost::shared_ptr linearize( + const Values& values) const override { + return this->linearizeDamped(values); + } + + private: + /// Serialization function + friend class boost::serialization::access; + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); + // ar& BOOST_SERIALIZATION_NVP(nonUniqueKeys_); + // ar& BOOST_SERIALIZATION_NVP(cameraRig_); + // ar& BOOST_SERIALIZATION_NVP(cameraIds_); + } +}; +// end of class declaration + +/// traits +template +struct traits > + : public Testable > {}; + +} // namespace gtsam diff --git a/gtsam/slam/TriangulationFactor.h b/gtsam/slam/TriangulationFactor.h index f12053d29..0a15d6861 100644 --- a/gtsam/slam/TriangulationFactor.h +++ b/gtsam/slam/TriangulationFactor.h @@ -129,7 +129,7 @@ public: << std::endl; if (throwCheirality_) throw e; - return Eigen::Matrix::dimension,1>::Constant(2.0 * camera_.calibration().fx()); + return camera_.defaultErrorWhenTriangulatingBehindCamera(); } } diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index c8a8b15c5..d7e925bd9 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -392,7 +392,7 @@ parseMeasurements(const std::string &filename, size_t maxIndex) { ParseMeasurement parse{model ? createSampler(model) : nullptr, maxIndex, true, NoiseFormatAUTO, - KernelFunctionTypeNONE}; + KernelFunctionTypeNONE, nullptr}; return parseToVector>(filename, parse); } diff --git a/gtsam/slam/expressions.h b/gtsam/slam/expressions.h index c6aa02774..3b8ea86d3 100644 --- a/gtsam/slam/expressions.h +++ b/gtsam/slam/expressions.h @@ -138,4 +138,21 @@ Point2_ uncalibrate(const Expression& K, const Point2_& xy_hat) { return Point2_(K, &CALIBRATION::uncalibrate, xy_hat); } + +/// logmap +// TODO(dellaert): Should work but fails because of a type deduction conflict. +// template +// gtsam::Expression::TangentVector> logmap( +// const gtsam::Expression &x1, const gtsam::Expression &x2) { +// return gtsam::Expression::TangentVector>( +// x1, &T::logmap, x2); +// } + +template +gtsam::Expression::TangentVector> logmap( + const gtsam::Expression &x1, const gtsam::Expression &x2) { + return Expression::TangentVector>( + gtsam::traits::Logmap, between(x1, x2)); +} + } // \namespace gtsam diff --git a/gtsam/slam/lago.cpp b/gtsam/slam/lago.cpp index 70caa424f..f8b092f86 100644 --- a/gtsam/slam/lago.cpp +++ b/gtsam/slam/lago.cpp @@ -36,7 +36,7 @@ static const Matrix I = I_1x1; static const Matrix I3 = I_3x3; static const noiseModel::Diagonal::shared_ptr priorOrientationNoise = - noiseModel::Diagonal::Sigmas((Vector(1) << 0).finished()); + noiseModel::Diagonal::Sigmas(Vector1(0)); static const noiseModel::Diagonal::shared_ptr priorPose2Noise = noiseModel::Diagonal::Variances(Vector3(1e-6, 1e-6, 1e-8)); diff --git a/gtsam/slam/slam.i b/gtsam/slam/slam.i index 1c04fd14c..527e838b2 100644 --- a/gtsam/slam/slam.i +++ b/gtsam/slam/slam.i @@ -168,6 +168,13 @@ template virtual class PoseTranslationPrior : gtsam::NoiseModelFactor { PoseTranslationPrior(size_t key, const POSE& pose_z, const gtsam::noiseModel::Base* noiseModel); + POSE measured() const; + + // enabling serialization functionality + void serialize() const; + + // enable pickling in python + void pickle() const; }; typedef gtsam::PoseTranslationPrior PoseTranslationPrior2D; @@ -178,6 +185,7 @@ template virtual class PoseRotationPrior : gtsam::NoiseModelFactor { PoseRotationPrior(size_t key, const POSE& pose_z, const gtsam::noiseModel::Base* noiseModel); + POSE measured() const; }; typedef gtsam::PoseRotationPrior PoseRotationPrior2D; @@ -334,5 +342,11 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor { Vector evaluateError(const T& R1, const T& R2); }; - + +#include +namespace lago { + gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph, bool useOdometricPath = true); + gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& initialGuess); +} + } // namespace gtsam diff --git a/gtsam/slam/tests/smartFactorScenarios.h b/gtsam/slam/tests/smartFactorScenarios.h index 4abc59305..310dbe79e 100644 --- a/gtsam/slam/tests/smartFactorScenarios.h +++ b/gtsam/slam/tests/smartFactorScenarios.h @@ -17,11 +17,14 @@ */ #pragma once -#include -#include -#include -#include #include +#include +#include +#include +#include +#include + +#include "../SmartProjectionRigFactor.h" using namespace std; using namespace gtsam; @@ -43,7 +46,7 @@ Pose3 pose_above = level_pose * Pose3(Rot3(), Point3(0, -1, 0)); // Create a noise unit2 for the pixel error static SharedNoiseModel unit2(noiseModel::Unit::Create(2)); -static double fov = 60; // degrees +static double fov = 60; // degrees static size_t w = 640, h = 480; /* ************************************************************************* */ @@ -62,38 +65,43 @@ Camera cam2(pose_right, K2); Camera cam3(pose_above, K2); typedef GeneralSFMFactor SFMFactor; SmartProjectionParams params; -} +} // namespace vanilla /* ************************************************************************* */ // default Cal3_S2 poses namespace vanillaPose { typedef PinholePose Camera; +typedef CameraSet Cameras; typedef SmartProjectionPoseFactor SmartFactor; +typedef SmartProjectionRigFactor SmartRigFactor; static Cal3_S2::shared_ptr sharedK(new Cal3_S2(fov, w, h)); Camera level_camera(level_pose, sharedK); Camera level_camera_right(pose_right, sharedK); Camera cam1(level_pose, sharedK); Camera cam2(pose_right, sharedK); Camera cam3(pose_above, sharedK); -} +} // namespace vanillaPose /* ************************************************************************* */ // default Cal3_S2 poses namespace vanillaPose2 { typedef PinholePose Camera; +typedef CameraSet Cameras; typedef SmartProjectionPoseFactor SmartFactor; +typedef SmartProjectionRigFactor SmartRigFactor; static Cal3_S2::shared_ptr sharedK2(new Cal3_S2(1500, 1200, 0, 640, 480)); Camera level_camera(level_pose, sharedK2); Camera level_camera_right(pose_right, sharedK2); Camera cam1(level_pose, sharedK2); Camera cam2(pose_right, sharedK2); Camera cam3(pose_above, sharedK2); -} +} // namespace vanillaPose2 /* *************************************************************************/ // Cal3Bundler cameras namespace bundler { typedef PinholeCamera Camera; +typedef CameraSet Cameras; typedef SmartProjectionFactor SmartFactor; static Cal3Bundler K(500, 1e-3, 1e-3, 0, 0); Camera level_camera(level_pose, K); @@ -105,41 +113,59 @@ Camera cam1(level_pose, K); Camera cam2(pose_right, K); Camera cam3(pose_above, K); typedef GeneralSFMFactor SFMFactor; -} +} // namespace bundler + /* *************************************************************************/ // Cal3Bundler poses namespace bundlerPose { typedef PinholePose Camera; +typedef CameraSet Cameras; typedef SmartProjectionPoseFactor SmartFactor; -static boost::shared_ptr sharedBundlerK( - new Cal3Bundler(500, 1e-3, 1e-3, 1000, 2000)); +typedef SmartProjectionRigFactor SmartRigFactor; +static boost::shared_ptr sharedBundlerK(new Cal3Bundler(500, 1e-3, + 1e-3, 1000, + 2000)); Camera level_camera(level_pose, sharedBundlerK); Camera level_camera_right(pose_right, sharedBundlerK); Camera cam1(level_pose, sharedBundlerK); Camera cam2(pose_right, sharedBundlerK); Camera cam3(pose_above, sharedBundlerK); -} +} // namespace bundlerPose + +/* ************************************************************************* */ +// sphericalCamera +namespace sphericalCamera { +typedef SphericalCamera Camera; +typedef CameraSet Cameras; +typedef SmartProjectionRigFactor SmartFactorP; +static EmptyCal::shared_ptr emptyK; +Camera level_camera(level_pose); +Camera level_camera_right(pose_right); +Camera cam1(level_pose); +Camera cam2(pose_right); +Camera cam3(pose_above); +} // namespace sphericalCamera /* *************************************************************************/ -template +template CAMERA perturbCameraPose(const CAMERA& camera) { - Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 10, 0., -M_PI / 10), - Point3(0.5, 0.1, 0.3)); + Pose3 noise_pose = + Pose3(Rot3::Ypr(-M_PI / 10, 0., -M_PI / 10), Point3(0.5, 0.1, 0.3)); Pose3 cameraPose = camera.pose(); Pose3 perturbedCameraPose = cameraPose.compose(noise_pose); return CAMERA(perturbedCameraPose, camera.calibration()); } -template -void projectToMultipleCameras(const CAMERA& cam1, const CAMERA& cam2, - const CAMERA& cam3, Point3 landmark, typename CAMERA::MeasurementVector& measurements_cam) { - Point2 cam1_uv1 = cam1.project(landmark); - Point2 cam2_uv1 = cam2.project(landmark); - Point2 cam3_uv1 = cam3.project(landmark); +template +void projectToMultipleCameras( + const CAMERA& cam1, const CAMERA& cam2, const CAMERA& cam3, Point3 landmark, + typename CAMERA::MeasurementVector& measurements_cam) { + typename CAMERA::Measurement cam1_uv1 = cam1.project(landmark); + typename CAMERA::Measurement cam2_uv1 = cam2.project(landmark); + typename CAMERA::Measurement cam3_uv1 = cam3.project(landmark); measurements_cam.push_back(cam1_uv1); measurements_cam.push_back(cam2_uv1); measurements_cam.push_back(cam3_uv1); } /* ************************************************************************* */ - diff --git a/gtsam/slam/tests/testSlamExpressions.cpp b/gtsam/slam/tests/testSlamExpressions.cpp index 294b821d3..b5298989f 100644 --- a/gtsam/slam/tests/testSlamExpressions.cpp +++ b/gtsam/slam/tests/testSlamExpressions.cpp @@ -58,6 +58,13 @@ TEST(SlamExpressions, unrotate) { const Point3_ q_ = unrotate(R_, p_); } +/* ************************************************************************* */ +TEST(SlamExpressions, logmap) { + Pose3_ T1_(0); + Pose3_ T2_(1); + const Vector6_ l = logmap(T1_, T2_); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/slam/tests/testSmartFactorBase.cpp b/gtsam/slam/tests/testSmartFactorBase.cpp index 951cbf8f4..ba46dce8d 100644 --- a/gtsam/slam/tests/testSmartFactorBase.cpp +++ b/gtsam/slam/tests/testSmartFactorBase.cpp @@ -127,13 +127,13 @@ TEST(SmartFactorBase, Stereo) { } /* ************************************************************************* */ -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal") TEST(SmartFactorBase, serialize) { using namespace gtsam::serializationTestHelpers; diff --git a/gtsam/slam/tests/testSmartProjectionFactor.cpp b/gtsam/slam/tests/testSmartProjectionFactor.cpp index 1fd06cc9f..133f81511 100644 --- a/gtsam/slam/tests/testSmartProjectionFactor.cpp +++ b/gtsam/slam/tests/testSmartProjectionFactor.cpp @@ -811,13 +811,13 @@ TEST(SmartProjectionFactor, implicitJacobianFactor ) { } /* ************************************************************************* */ -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal") TEST(SmartProjectionFactor, serialize) { using namespace vanilla; diff --git a/gtsam/slam/tests/testSmartProjectionPoseFactor.cpp b/gtsam/slam/tests/testSmartProjectionPoseFactor.cpp index 997c33846..f3706fa02 100644 --- a/gtsam/slam/tests/testSmartProjectionPoseFactor.cpp +++ b/gtsam/slam/tests/testSmartProjectionPoseFactor.cpp @@ -1333,13 +1333,13 @@ TEST( SmartProjectionPoseFactor, Cal3BundlerRotationOnly ) { } /* ************************************************************************* */ -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal") TEST(SmartProjectionPoseFactor, serialize) { using namespace vanillaPose; diff --git a/gtsam/slam/tests/testSmartProjectionRigFactor.cpp b/gtsam/slam/tests/testSmartProjectionRigFactor.cpp new file mode 100644 index 000000000..7f65a3c33 --- /dev/null +++ b/gtsam/slam/tests/testSmartProjectionRigFactor.cpp @@ -0,0 +1,1603 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testSmartProjectionRigFactor.cpp + * @brief Unit tests for SmartProjectionRigFactor Class + * @author Chris Beall + * @author Luca Carlone + * @author Zsolt Kira + * @author Frank Dellaert + * @date August 2021 + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "smartFactorScenarios.h" +#define DISABLE_TIMING + +using namespace boost::assign; +using namespace std::placeholders; + +static const double rankTol = 1.0; +// Create a noise model for the pixel error +static const double sigma = 0.1; +static SharedIsotropic model(noiseModel::Isotropic::Sigma(2, sigma)); + +// Convenience for named keys +using symbol_shorthand::L; +using symbol_shorthand::X; + +// tests data +static Symbol x1('X', 1); +static Symbol x2('X', 2); +static Symbol x3('X', 3); + +Key cameraId1 = 0; // first camera +Key cameraId2 = 1; +Key cameraId3 = 2; + +static Point2 measurement1(323.0, 240.0); + +LevenbergMarquardtParams lmParams; + +/* ************************************************************************* */ +// default Cal3_S2 poses with rolling shutter effect +namespace vanillaRig { +using namespace vanillaPose; +SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors +} // namespace vanillaRig + +/* ************************************************************************* */ +TEST(SmartProjectionRigFactor, Constructor) { + using namespace vanillaRig; + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + SmartRigFactor::shared_ptr factor1( + new SmartRigFactor(model, cameraRig, params)); +} + +/* ************************************************************************* */ +TEST(SmartProjectionRigFactor, Constructor2) { + using namespace vanillaRig; + boost::shared_ptr cameraRig(new Cameras()); + SmartProjectionParams params2( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + params2.setRankTolerance(rankTol); + SmartRigFactor factor1(model, cameraRig, params2); +} + +/* ************************************************************************* */ +TEST(SmartProjectionRigFactor, Constructor3) { + using namespace vanillaRig; + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + SmartRigFactor::shared_ptr factor1( + new SmartRigFactor(model, cameraRig, params)); + factor1->add(measurement1, x1, cameraId1); +} + +/* ************************************************************************* */ +TEST(SmartProjectionRigFactor, Constructor4) { + using namespace vanillaRig; + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + SmartProjectionParams params2( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + params2.setRankTolerance(rankTol); + SmartRigFactor factor1(model, cameraRig, params2); + factor1.add(measurement1, x1, cameraId1); +} + +/* ************************************************************************* */ +TEST(SmartProjectionRigFactor, Equals) { + using namespace vanillaRig; + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + + SmartRigFactor::shared_ptr factor1( + new SmartRigFactor(model, cameraRig, params)); + factor1->add(measurement1, x1, cameraId1); + + SmartRigFactor::shared_ptr factor2( + new SmartRigFactor(model, cameraRig, params)); + factor2->add(measurement1, x1, cameraId1); + + CHECK(assert_equal(*factor1, *factor2)); + + SmartRigFactor::shared_ptr factor3( + new SmartRigFactor(model, cameraRig, params)); + factor3->add(measurement1, x1); // now use default camera ID + + CHECK(assert_equal(*factor1, *factor3)); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, noiseless) { + using namespace vanillaRig; + + // Project two landmarks into two cameras + Point2 level_uv = level_camera.project(landmark1); + Point2 level_uv_right = level_camera_right.project(landmark1); + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + + SmartRigFactor factor(model, cameraRig, params); + factor.add(level_uv, x1); // both taken from the same camera + factor.add(level_uv_right, x2); + + Values values; // it's a pose factor, hence these are poses + values.insert(x1, cam1.pose()); + values.insert(x2, cam2.pose()); + + double actualError = factor.error(values); + double expectedError = 0.0; + EXPECT_DOUBLES_EQUAL(expectedError, actualError, 1e-7); + + SmartRigFactor::Cameras cameras = factor.cameras(values); + double actualError2 = factor.totalReprojectionError(cameras); + EXPECT_DOUBLES_EQUAL(expectedError, actualError2, 1e-7); + + // Calculate expected derivative for point (easiest to check) + std::function f = // + std::bind(&SmartRigFactor::whitenedError, factor, cameras, + std::placeholders::_1); + + // Calculate using computeEP + Matrix actualE; + factor.triangulateAndComputeE(actualE, values); + + // get point + boost::optional point = factor.point(); + CHECK(point); + + // calculate numerical derivative with triangulated point + Matrix expectedE = sigma * numericalDerivative11(f, *point); + EXPECT(assert_equal(expectedE, actualE, 1e-7)); + + // Calculate using reprojectionError + SmartRigFactor::Cameras::FBlocks F; + Matrix E; + Vector actualErrors = factor.unwhitenedError(cameras, *point, F, E); + EXPECT(assert_equal(expectedE, E, 1e-7)); + + EXPECT(assert_equal(Z_4x1, actualErrors, 1e-7)); + + // Calculate using computeJacobians + Vector b; + SmartRigFactor::FBlocks Fs; + factor.computeJacobians(Fs, E, b, cameras, *point); + double actualError3 = b.squaredNorm(); + EXPECT(assert_equal(expectedE, E, 1e-7)); + EXPECT_DOUBLES_EQUAL(expectedError, actualError3, 1e-6); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, noisy) { + using namespace vanillaRig; + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + + // Project two landmarks into two cameras + Point2 pixelError(0.2, 0.2); + Point2 level_uv = level_camera.project(landmark1) + pixelError; + Point2 level_uv_right = level_camera_right.project(landmark1); + + Values values; + values.insert(x1, cam1.pose()); + Pose3 noise_pose = + Pose3(Rot3::Ypr(-M_PI / 10, 0., -M_PI / 10), Point3(0.5, 0.1, 0.3)); + values.insert(x2, pose_right.compose(noise_pose)); + + SmartRigFactor::shared_ptr factor( + new SmartRigFactor(model, cameraRig, params)); + factor->add(level_uv, x1, cameraId1); + factor->add(level_uv_right, x2, cameraId1); + + double actualError1 = factor->error(values); + + // create other factor by passing multiple measurements + SmartRigFactor::shared_ptr factor2( + new SmartRigFactor(model, cameraRig, params)); + + Point2Vector measurements; + measurements.push_back(level_uv); + measurements.push_back(level_uv_right); + + KeyVector views{x1, x2}; + FastVector cameraIds{0, 0}; + + factor2->add(measurements, views, cameraIds); + double actualError2 = factor2->error(values); + DOUBLES_EQUAL(actualError1, actualError2, 1e-7); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, smartFactorWithSensorBodyTransform) { + using namespace vanillaRig; + + // create arbitrary body_T_sensor (transforms from sensor to body) + Pose3 body_T_sensor = + Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), Point3(1, 1, 1)); + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(body_T_sensor, sharedK)); + + // These are the poses we want to estimate, from camera measurements + const Pose3 sensor_T_body = body_T_sensor.inverse(); + Pose3 wTb1 = cam1.pose() * sensor_T_body; + Pose3 wTb2 = cam2.pose() * sensor_T_body; + Pose3 wTb3 = cam3.pose() * sensor_T_body; + + // three landmarks ~5 meters infront of camera + Point3 landmark1(5, 0.5, 1.2), landmark2(5, -0.5, 1.2), landmark3(5, 0, 3.0); + + Point2Vector measurements_cam1, measurements_cam2, measurements_cam3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3); + + // Create smart factors + KeyVector views{x1, x2, x3}; + FastVector cameraIds{0, 0, 0}; + + SmartProjectionParams params; + params.setRankTolerance(1.0); + params.setDegeneracyMode(ZERO_ON_DEGENERACY); + params.setEnableEPI(false); + + SmartRigFactor smartFactor1(model, cameraRig, params); + smartFactor1.add( + measurements_cam1, + views); // use default CameraIds since we have a single camera + + SmartRigFactor smartFactor2(model, cameraRig, params); + smartFactor2.add(measurements_cam2, views); + + SmartRigFactor smartFactor3(model, cameraRig, params); + smartFactor3.add(measurements_cam3, views); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + // Put all factors in factor graph, adding priors + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, wTb1, noisePrior); + graph.addPrior(x2, wTb2, noisePrior); + + // Check errors at ground truth poses + Values gtValues; + gtValues.insert(x1, wTb1); + gtValues.insert(x2, wTb2); + gtValues.insert(x3, wTb3); + double actualError = graph.error(gtValues); + double expectedError = 0.0; + DOUBLES_EQUAL(expectedError, actualError, 1e-7) + + Pose3 noise_pose = + Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), Point3(0.1, 0.1, 0.1)); + Values values; + values.insert(x1, wTb1); + values.insert(x2, wTb2); + // initialize third pose with some noise, we expect it to move back to + // original pose3 + values.insert(x3, wTb3 * noise_pose); + + LevenbergMarquardtParams lmParams; + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(wTb3, result.at(x3))); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, smartFactorWithMultipleCameras) { + using namespace vanillaRig; + + // create arbitrary body_T_sensor (transforms from sensor to body) + Pose3 body_T_sensor1 = + Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), Point3(1, 1, 1)); + Pose3 body_T_sensor2 = + Pose3(Rot3::Ypr(-M_PI / 5, 0., -M_PI / 2), Point3(0, 0, 1)); + Pose3 body_T_sensor3 = Pose3::identity(); + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(body_T_sensor1, sharedK)); + cameraRig->push_back(Camera(body_T_sensor2, sharedK)); + cameraRig->push_back(Camera(body_T_sensor3, sharedK)); + + // These are the poses we want to estimate, from camera measurements + const Pose3 sensor_T_body1 = body_T_sensor1.inverse(); + const Pose3 sensor_T_body2 = body_T_sensor2.inverse(); + const Pose3 sensor_T_body3 = body_T_sensor3.inverse(); + Pose3 wTb1 = cam1.pose() * sensor_T_body1; + Pose3 wTb2 = cam2.pose() * sensor_T_body2; + Pose3 wTb3 = cam3.pose() * sensor_T_body3; + + // three landmarks ~5 meters infront of camera + Point3 landmark1(5, 0.5, 1.2), landmark2(5, -0.5, 1.2), landmark3(5, 0, 3.0); + + Point2Vector measurements_cam1, measurements_cam2, measurements_cam3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3); + + // Create smart factors + KeyVector views{x1, x2, x3}; + FastVector cameraIds{0, 1, 2}; + + SmartProjectionParams params; + params.setRankTolerance(1.0); + params.setDegeneracyMode(ZERO_ON_DEGENERACY); + params.setEnableEPI(false); + + SmartRigFactor smartFactor1(model, cameraRig, params); + smartFactor1.add(measurements_cam1, views, cameraIds); + + SmartRigFactor smartFactor2(model, cameraRig, params); + smartFactor2.add(measurements_cam2, views, cameraIds); + + SmartRigFactor smartFactor3(model, cameraRig, params); + smartFactor3.add(measurements_cam3, views, cameraIds); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + // Put all factors in factor graph, adding priors + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, wTb1, noisePrior); + graph.addPrior(x2, wTb2, noisePrior); + + // Check errors at ground truth poses + Values gtValues; + gtValues.insert(x1, wTb1); + gtValues.insert(x2, wTb2); + gtValues.insert(x3, wTb3); + double actualError = graph.error(gtValues); + double expectedError = 0.0; + DOUBLES_EQUAL(expectedError, actualError, 1e-7) + + Pose3 noise_pose = + Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), Point3(0.1, 0.1, 0.1)); + Values values; + values.insert(x1, wTb1); + values.insert(x2, wTb2); + // initialize third pose with some noise, we expect it to move back to + // original pose3 + values.insert(x3, wTb3 * noise_pose); + + LevenbergMarquardtParams lmParams; + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(wTb3, result.at(x3))); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, 3poses_smart_projection_factor) { + using namespace vanillaPose2; + Point2Vector measurements_cam1, measurements_cam2, measurements_cam3; + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK2)); + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3); + + KeyVector views{x1, x2, x3}; + FastVector cameraIds{ + 0, 0, 0}; // 3 measurements from the same camera in the rig + + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(measurements_cam1, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor2( + new SmartRigFactor(model, cameraRig, params)); + smartFactor2->add(measurements_cam2, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor3( + new SmartRigFactor(model, cameraRig, params)); + smartFactor3->add(measurements_cam3, views, cameraIds); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, cam1.pose(), noisePrior); + graph.addPrior(x2, cam2.pose(), noisePrior); + + Values groundTruth; + groundTruth.insert(x1, cam1.pose()); + groundTruth.insert(x2, cam2.pose()); + groundTruth.insert(x3, cam3.pose()); + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), + // Point3(0.5,0.1,0.3)); // noise from regular projection factor test below + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, cam1.pose()); + values.insert(x2, cam2.pose()); + // initialize third pose with some noise, we expect it to move back to + // original pose_above + values.insert(x3, pose_above * noise_pose); + EXPECT(assert_equal( + Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656, -0.0313952598, + -0.000986635786, 0.0314107591, -0.999013364, -0.0313952598), + Point3(0.1, -0.1, 1.9)), + values.at(x3))); + + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(pose_above, result.at(x3), 1e-6)); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, Factors) { + using namespace vanillaRig; + + // Default cameras for simple derivatives + Rot3 R; + static Cal3_S2::shared_ptr sharedK(new Cal3_S2(100, 100, 0, 0, 0)); + Camera cam1(Pose3(R, Point3(0, 0, 0)), sharedK), + cam2(Pose3(R, Point3(1, 0, 0)), sharedK); + + // one landmarks 1m in front of camera + Point3 landmark1(0, 0, 10); + + Point2Vector measurements_cam1; + + // Project 2 landmarks into 2 cameras + measurements_cam1.push_back(cam1.project(landmark1)); + measurements_cam1.push_back(cam2.project(landmark1)); + + // Create smart factors + KeyVector views{x1, x2}; + FastVector cameraIds{0, 0}; + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + + SmartRigFactor::shared_ptr smartFactor1 = boost::make_shared( + model, cameraRig, params); + smartFactor1->add(measurements_cam1, + views); // we have a single camera so use default cameraIds + + SmartRigFactor::Cameras cameras; + cameras.push_back(cam1); + cameras.push_back(cam2); + + // Make sure triangulation works + CHECK(smartFactor1->triangulateSafe(cameras)); + CHECK(!smartFactor1->isDegenerate()); + CHECK(!smartFactor1->isPointBehindCamera()); + boost::optional p = smartFactor1->point(); + CHECK(p); + EXPECT(assert_equal(landmark1, *p)); + + VectorValues zeroDelta; + Vector6 delta; + delta.setZero(); + zeroDelta.insert(x1, delta); + zeroDelta.insert(x2, delta); + + VectorValues perturbedDelta; + delta.setOnes(); + perturbedDelta.insert(x1, delta); + perturbedDelta.insert(x2, delta); + double expectedError = 2500; + + // After eliminating the point, A1 and A2 contain 2-rank information on + // cameras: + Matrix16 A1, A2; + A1 << -10, 0, 0, 0, 1, 0; + A2 << 10, 0, 1, 0, -1, 0; + A1 *= 10. / sigma; + A2 *= 10. / sigma; + Matrix expectedInformation; // filled below + { + // createHessianFactor + Matrix66 G11 = 0.5 * A1.transpose() * A1; + Matrix66 G12 = 0.5 * A1.transpose() * A2; + Matrix66 G22 = 0.5 * A2.transpose() * A2; + + Vector6 g1; + g1.setZero(); + Vector6 g2; + g2.setZero(); + + double f = 0; + + RegularHessianFactor<6> expected(x1, x2, G11, G12, g1, G22, g2, f); + expectedInformation = expected.information(); + + Values values; + values.insert(x1, cam1.pose()); + values.insert(x2, cam2.pose()); + + boost::shared_ptr> actual = + smartFactor1->createHessianFactor(values, 0.0); + EXPECT(assert_equal(expectedInformation, actual->information(), 1e-6)); + EXPECT(assert_equal(expected, *actual, 1e-6)); + EXPECT_DOUBLES_EQUAL(0, actual->error(zeroDelta), 1e-6); + EXPECT_DOUBLES_EQUAL(expectedError, actual->error(perturbedDelta), 1e-6); + } +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, 3poses_iterative_smart_projection_factor) { + using namespace vanillaRig; + + KeyVector views{x1, x2, x3}; + + Point2Vector measurements_cam1, measurements_cam2, measurements_cam3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3); + + // create smart factor + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + FastVector cameraIds{0, 0, 0}; + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(measurements_cam1, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor2( + new SmartRigFactor(model, cameraRig, params)); + smartFactor2->add(measurements_cam2, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor3( + new SmartRigFactor(model, cameraRig, params)); + smartFactor3->add(measurements_cam3, views, cameraIds); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, cam1.pose(), noisePrior); + graph.addPrior(x2, cam2.pose(), noisePrior); + + // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), + // Point3(0.5,0.1,0.3)); // noise from regular projection factor test below + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, cam1.pose()); + values.insert(x2, cam2.pose()); + // initialize third pose with some noise, we expect it to move back to + // original pose_above + values.insert(x3, pose_above * noise_pose); + EXPECT(assert_equal(Pose3(Rot3(1.11022302e-16, -0.0314107591, 0.99950656, + -0.99950656, -0.0313952598, -0.000986635786, + 0.0314107591, -0.999013364, -0.0313952598), + Point3(0.1, -0.1, 1.9)), + values.at(x3))); + + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(pose_above, result.at(x3), 1e-7)); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, landmarkDistance) { + using namespace vanillaRig; + + double excludeLandmarksFutherThanDist = 2; + + KeyVector views{x1, x2, x3}; + + Point2Vector measurements_cam1, measurements_cam2, measurements_cam3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3); + + SmartProjectionParams params; + params.setRankTolerance(1.0); + params.setLinearizationMode(gtsam::HESSIAN); + params.setDegeneracyMode(gtsam::ZERO_ON_DEGENERACY); + params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist); + params.setEnableEPI(false); + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + FastVector cameraIds{0, 0, 0}; + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(measurements_cam1, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor2( + new SmartRigFactor(model, cameraRig, params)); + smartFactor2->add(measurements_cam2, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor3( + new SmartRigFactor(model, cameraRig, params)); + smartFactor3->add(measurements_cam3, views, cameraIds); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, cam1.pose(), noisePrior); + graph.addPrior(x2, cam2.pose(), noisePrior); + + // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), + // Point3(0.5,0.1,0.3)); // noise from regular projection factor test below + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, cam1.pose()); + values.insert(x2, cam2.pose()); + values.insert(x3, pose_above * noise_pose); + + // All factors are disabled and pose should remain where it is + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(values.at(x3), result.at(x3))); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, dynamicOutlierRejection) { + using namespace vanillaRig; + + double excludeLandmarksFutherThanDist = 1e10; + double dynamicOutlierRejectionThreshold = + 1; // max 1 pixel of average reprojection error + + KeyVector views{x1, x2, x3}; + + // add fourth landmark + Point3 landmark4(5, -0.5, 1); + + Point2Vector measurements_cam1, measurements_cam2, measurements_cam3, + measurements_cam4; + + // Project 4 landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3); + projectToMultipleCameras(cam1, cam2, cam3, landmark4, measurements_cam4); + measurements_cam4.at(0) = + measurements_cam4.at(0) + Point2(10, 10); // add outlier + + SmartProjectionParams params; + params.setLinearizationMode(gtsam::HESSIAN); + params.setDegeneracyMode(gtsam::ZERO_ON_DEGENERACY); + params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist); + params.setDynamicOutlierRejectionThreshold(dynamicOutlierRejectionThreshold); + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + FastVector cameraIds{0, 0, 0}; + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(measurements_cam1, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor2( + new SmartRigFactor(model, cameraRig, params)); + smartFactor2->add(measurements_cam2, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor3( + new SmartRigFactor(model, cameraRig, params)); + smartFactor3->add(measurements_cam3, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor4( + new SmartRigFactor(model, cameraRig, params)); + smartFactor4->add(measurements_cam4, views, cameraIds); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.push_back(smartFactor4); + graph.addPrior(x1, cam1.pose(), noisePrior); + graph.addPrior(x2, cam2.pose(), noisePrior); + + Values values; + values.insert(x1, cam1.pose()); + values.insert(x2, cam2.pose()); + values.insert(x3, cam3.pose()); + + // All factors are disabled and pose should remain where it is + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(cam3.pose(), result.at(x3))); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, CheckHessian) { + KeyVector views{x1, x2, x3}; + + using namespace vanillaRig; + + // Two slightly different cameras + Pose3 pose2 = + level_pose * Pose3(Rot3::RzRyRx(-0.05, 0.0, -0.05), Point3(0, 0, 0)); + Pose3 pose3 = pose2 * Pose3(Rot3::RzRyRx(-0.05, 0.0, -0.05), Point3(0, 0, 0)); + Camera cam2(pose2, sharedK); + Camera cam3(pose3, sharedK); + + Point2Vector measurements_cam1, measurements_cam2, measurements_cam3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3); + + SmartProjectionParams params; + params.setRankTolerance(10); + params.setDegeneracyMode(gtsam::ZERO_ON_DEGENERACY); + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + FastVector cameraIds{0, 0, 0}; + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); // HESSIAN, by default + smartFactor1->add(measurements_cam1, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor2( + new SmartRigFactor(model, cameraRig, params)); // HESSIAN, by default + smartFactor2->add(measurements_cam2, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor3( + new SmartRigFactor(model, cameraRig, params)); // HESSIAN, by default + smartFactor3->add(measurements_cam3, views, cameraIds); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + + // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), + // Point3(0.5,0.1,0.3)); // noise from regular projection factor test below + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, cam1.pose()); + values.insert(x2, cam2.pose()); + // initialize third pose with some noise, we expect it to move back to + // original pose_above + values.insert(x3, pose3 * noise_pose); + EXPECT(assert_equal(Pose3(Rot3(0.00563056869, -0.130848107, 0.991386438, + -0.991390265, -0.130426831, -0.0115837907, + 0.130819108, -0.98278564, -0.130455917), + Point3(0.0897734171, -0.110201006, 0.901022872)), + values.at(x3))); + + boost::shared_ptr factor1 = smartFactor1->linearize(values); + boost::shared_ptr factor2 = smartFactor2->linearize(values); + boost::shared_ptr factor3 = smartFactor3->linearize(values); + + Matrix CumulativeInformation = + factor1->information() + factor2->information() + factor3->information(); + + boost::shared_ptr GaussianGraph = + graph.linearize(values); + Matrix GraphInformation = GaussianGraph->hessian().first; + + // Check Hessian + EXPECT(assert_equal(GraphInformation, CumulativeInformation, 1e-6)); + + Matrix AugInformationMatrix = factor1->augmentedInformation() + + factor2->augmentedInformation() + + factor3->augmentedInformation(); + + // Check Information vector + Vector InfoVector = AugInformationMatrix.block( + 0, 18, 18, 1); // 18x18 Hessian + information vector + + // Check Hessian + EXPECT(assert_equal(InfoVector, GaussianGraph->hessian().second, 1e-6)); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, Hessian) { + using namespace vanillaPose2; + + KeyVector views{x1, x2}; + + // Project three landmarks into 2 cameras + Point2 cam1_uv1 = cam1.project(landmark1); + Point2 cam2_uv1 = cam2.project(landmark1); + Point2Vector measurements_cam1; + measurements_cam1.push_back(cam1_uv1); + measurements_cam1.push_back(cam2_uv1); + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK2)); + FastVector cameraIds{0, 0}; + + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(measurements_cam1, views, cameraIds); + + Pose3 noise_pose = + Pose3(Rot3::Ypr(-M_PI / 10, 0., -M_PI / 10), Point3(0.5, 0.1, 0.3)); + Values values; + values.insert(x1, cam1.pose()); + values.insert(x2, cam2.pose()); + + boost::shared_ptr factor = smartFactor1->linearize(values); + + // compute triangulation from linearization point + // compute reprojection errors (sum squared) + // compare with factor.info(): the bottom right element is the squared sum of + // the reprojection errors (normalized by the covariance) check that it is + // correctly scaled when using noiseProjection = [1/4 0; 0 1/4] +} + +/* ************************************************************************* */ +TEST(SmartProjectionRigFactor, ConstructorWithCal3Bundler) { + using namespace bundlerPose; + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedBundlerK)); + + SmartProjectionParams params; + params.setDegeneracyMode(gtsam::ZERO_ON_DEGENERACY); + SmartRigFactor factor(model, cameraRig, params); + factor.add(measurement1, x1, cameraId1); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, Cal3Bundler) { + using namespace bundlerPose; + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + + // three landmarks ~5 meters in front of camera + Point3 landmark3(3, 0, 3.0); + + Point2Vector measurements_cam1, measurements_cam2, measurements_cam3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3); + + KeyVector views{x1, x2, x3}; + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedBundlerK)); + FastVector cameraIds{0, 0, 0}; + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(measurements_cam1, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor2( + new SmartRigFactor(model, cameraRig, params)); + smartFactor2->add(measurements_cam2, views, cameraIds); + + SmartRigFactor::shared_ptr smartFactor3( + new SmartRigFactor(model, cameraRig, params)); + smartFactor3->add(measurements_cam3, views, cameraIds); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, cam1.pose(), noisePrior); + graph.addPrior(x2, cam2.pose(), noisePrior); + + // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), + // Point3(0.5,0.1,0.3)); // noise from regular projection factor test below + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, cam1.pose()); + values.insert(x2, cam2.pose()); + // initialize third pose with some noise, we expect it to move back to + // original pose_above + values.insert(x3, pose_above * noise_pose); + EXPECT(assert_equal( + Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656, -0.0313952598, + -0.000986635786, 0.0314107591, -0.999013364, -0.0313952598), + Point3(0.1, -0.1, 1.9)), + values.at(x3))); + + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(cam3.pose(), result.at(x3), 1e-6)); +} + +#include +typedef GenericProjectionFactor TestProjectionFactor; +static Symbol l0('L', 0); +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, + hessianComparedToProjFactors_measurementsFromSamePose) { + // in this test we make sure the fact works even if we have multiple pixel + // measurements of the same landmark at a single pose, a setup that occurs in + // multi-camera systems + + using namespace vanillaRig; + Point2Vector measurements_lmk1; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_lmk1); + + // create redundant measurements: + Camera::MeasurementVector measurements_lmk1_redundant = measurements_lmk1; + measurements_lmk1_redundant.push_back( + measurements_lmk1.at(0)); // we readd the first measurement + + // create inputs + KeyVector keys{x1, x2, x3, x1}; + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + FastVector cameraIds{0, 0, 0, 0}; + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1_redundant, keys, cameraIds); + + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, level_pose); + values.insert(x2, pose_right); + // initialize third pose with some noise to get a nontrivial linearization + // point + values.insert(x3, pose_above * noise_pose); + EXPECT( // check that the pose is actually noisy + assert_equal(Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656, + -0.0313952598, -0.000986635786, 0.0314107591, + -0.999013364, -0.0313952598), + Point3(0.1, -0.1, 1.9)), + values.at(x3))); + + // linearization point for the poses + Pose3 pose1 = level_pose; + Pose3 pose2 = pose_right; + Pose3 pose3 = pose_above * noise_pose; + + // ==== check Hessian of smartFactor1 ===== + // -- compute actual Hessian + boost::shared_ptr linearfactor1 = + smartFactor1->linearize(values); + Matrix actualHessian = linearfactor1->information(); + + // -- compute expected Hessian from manual Schur complement from Jacobians + // linearization point for the 3D point + smartFactor1->triangulateSafe(smartFactor1->cameras(values)); + TriangulationResult point = smartFactor1->point(); + EXPECT(point.valid()); // check triangulated point is valid + + // Use standard ProjectionFactor factor to calculate the Jacobians + Matrix F = Matrix::Zero(2 * 4, 6 * 3); + Matrix E = Matrix::Zero(2 * 4, 3); + Vector b = Vector::Zero(2 * 4); + + // create projection factors rolling shutter + TestProjectionFactor factor11(measurements_lmk1_redundant[0], model, x1, l0, + sharedK); + Matrix HPoseActual, HEActual; + // note: b is minus the reprojection error, cf the smart factor jacobian + // computation + b.segment<2>(0) = + -factor11.evaluateError(pose1, *point, HPoseActual, HEActual); + F.block<2, 6>(0, 0) = HPoseActual; + E.block<2, 3>(0, 0) = HEActual; + + TestProjectionFactor factor12(measurements_lmk1_redundant[1], model, x2, l0, + sharedK); + b.segment<2>(2) = + -factor12.evaluateError(pose2, *point, HPoseActual, HEActual); + F.block<2, 6>(2, 6) = HPoseActual; + E.block<2, 3>(2, 0) = HEActual; + + TestProjectionFactor factor13(measurements_lmk1_redundant[2], model, x3, l0, + sharedK); + b.segment<2>(4) = + -factor13.evaluateError(pose3, *point, HPoseActual, HEActual); + F.block<2, 6>(4, 12) = HPoseActual; + E.block<2, 3>(4, 0) = HEActual; + + TestProjectionFactor factor14(measurements_lmk1_redundant[3], model, x1, l0, + sharedK); + b.segment<2>(6) = + -factor11.evaluateError(pose1, *point, HPoseActual, HEActual); + F.block<2, 6>(6, 0) = HPoseActual; + E.block<2, 3>(6, 0) = HEActual; + + // whiten + F = (1 / sigma) * F; + E = (1 / sigma) * E; + b = (1 / sigma) * b; + //* G = F' * F - F' * E * P * E' * F + Matrix P = (E.transpose() * E).inverse(); + Matrix expectedHessian = + F.transpose() * F - (F.transpose() * E * P * E.transpose() * F); + EXPECT(assert_equal(expectedHessian, actualHessian, 1e-6)); + + // ==== check Information vector of smartFactor1 ===== + GaussianFactorGraph gfg; + gfg.add(linearfactor1); + Matrix actualHessian_v2 = gfg.hessian().first; + EXPECT(assert_equal(actualHessian_v2, actualHessian, + 1e-6)); // sanity check on hessian + + // -- compute actual information vector + Vector actualInfoVector = gfg.hessian().second; + + // -- compute expected information vector from manual Schur complement from + // Jacobians + //* g = F' * (b - E * P * E' * b) + Vector expectedInfoVector = F.transpose() * (b - E * P * E.transpose() * b); + EXPECT(assert_equal(expectedInfoVector, actualInfoVector, 1e-6)); + + // ==== check error of smartFactor1 (again) ===== + NonlinearFactorGraph nfg_projFactors; + nfg_projFactors.add(factor11); + nfg_projFactors.add(factor12); + nfg_projFactors.add(factor13); + nfg_projFactors.add(factor14); + values.insert(l0, *point); + + double actualError = smartFactor1->error(values); + double expectedError = nfg_projFactors.error(values); + EXPECT_DOUBLES_EQUAL(expectedError, actualError, 1e-7); +} + +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, optimization_3poses_measurementsFromSamePose) { + using namespace vanillaRig; + Point2Vector measurements_lmk1, measurements_lmk2, measurements_lmk3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_lmk1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_lmk2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_lmk3); + + // create inputs + KeyVector keys{x1, x2, x3}; + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + FastVector cameraIds{0, 0, 0}; + FastVector cameraIdsRedundant{0, 0, 0, 0}; + + // For first factor, we create redundant measurement (taken by the same keys + // as factor 1, to make sure the redundancy in the keys does not create + // problems) + Camera::MeasurementVector& measurements_lmk1_redundant = measurements_lmk1; + measurements_lmk1_redundant.push_back( + measurements_lmk1.at(0)); // we readd the first measurement + KeyVector keys_redundant = keys; + keys_redundant.push_back(keys.at(0)); // we readd the first key + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1_redundant, keys_redundant, + cameraIdsRedundant); + + SmartRigFactor::shared_ptr smartFactor2( + new SmartRigFactor(model, cameraRig, params)); + smartFactor2->add(measurements_lmk2, keys, cameraIds); + + SmartRigFactor::shared_ptr smartFactor3( + new SmartRigFactor(model, cameraRig, params)); + smartFactor3->add(measurements_lmk3, keys, cameraIds); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, level_pose, noisePrior); + graph.addPrior(x2, pose_right, noisePrior); + + Values groundTruth; + groundTruth.insert(x1, level_pose); + groundTruth.insert(x2, pose_right); + groundTruth.insert(x3, pose_above); + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), + // Point3(0.5,0.1,0.3)); // noise from regular projection factor test below + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, level_pose); + values.insert(x2, pose_right); + // initialize third pose with some noise, we expect it to move back to + // original pose_above + values.insert(x3, pose_above * noise_pose); + EXPECT( // check that the pose is actually noisy + assert_equal(Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656, + -0.0313952598, -0.000986635786, 0.0314107591, + -0.999013364, -0.0313952598), + Point3(0.1, -0.1, 1.9)), + values.at(x3))); + + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(pose_above, result.at(x3), 1e-5)); +} + +#ifndef DISABLE_TIMING +#include +// this factor is slightly slower (but comparable) to original +// SmartProjectionPoseFactor +//-Total: 0 CPU (0 times, 0 wall, 0.17 children, min: 0 max: 0) +//| -SmartRigFactor LINEARIZE: 0.05 CPU (10000 times, 0.057952 wall, 0.05 +// children, min: 0 max: 0) | -SmartPoseFactor LINEARIZE: 0.05 CPU (10000 +// times, 0.069647 wall, 0.05 children, min: 0 max: 0) +/* *************************************************************************/ +TEST(SmartProjectionRigFactor, timing) { + using namespace vanillaRig; + + // Default cameras for simple derivatives + static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0)); + + Rot3 R = Rot3::identity(); + Pose3 pose1 = Pose3(R, Point3(0, 0, 0)); + Pose3 pose2 = Pose3(R, Point3(1, 0, 0)); + Camera cam1(pose1, sharedKSimple), cam2(pose2, sharedKSimple); + Pose3 body_P_sensorId = Pose3::identity(); + + boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig + cameraRig->push_back(Camera(body_P_sensorId, sharedKSimple)); + + // one landmarks 1m in front of camera + Point3 landmark1(0, 0, 10); + + Point2Vector measurements_lmk1; + + // Project 2 landmarks into 2 cameras + measurements_lmk1.push_back(cam1.project(landmark1)); + measurements_lmk1.push_back(cam2.project(landmark1)); + + size_t nrTests = 10000; + + for (size_t i = 0; i < nrTests; i++) { + SmartRigFactor::shared_ptr smartRigFactor( + new SmartRigFactor(model, cameraRig, params)); + smartRigFactor->add(measurements_lmk1[0], x1, cameraId1); + smartRigFactor->add(measurements_lmk1[1], x1, cameraId1); + + Values values; + values.insert(x1, pose1); + values.insert(x2, pose2); + gttic_(SmartRigFactor_LINEARIZE); + smartRigFactor->linearize(values); + gttoc_(SmartRigFactor_LINEARIZE); + } + + for (size_t i = 0; i < nrTests; i++) { + SmartFactor::shared_ptr smartFactor( + new SmartFactor(model, sharedKSimple, params)); + smartFactor->add(measurements_lmk1[0], x1); + smartFactor->add(measurements_lmk1[1], x2); + + Values values; + values.insert(x1, pose1); + values.insert(x2, pose2); + gttic_(SmartPoseFactor_LINEARIZE); + smartFactor->linearize(values); + gttoc_(SmartPoseFactor_LINEARIZE); + } + tictoc_print_(); +} +#endif + +/* *************************************************************************/ +TEST(SmartProjectionFactorP, optimization_3poses_sphericalCamera) { + using namespace sphericalCamera; + Camera::MeasurementVector measurements_lmk1, measurements_lmk2, + measurements_lmk3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, + measurements_lmk1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, + measurements_lmk2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, + measurements_lmk3); + + // create inputs + KeyVector keys; + keys.push_back(x1); + keys.push_back(x2); + keys.push_back(x3); + + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), emptyK)); + + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + params.setRankTolerance(0.1); + + SmartFactorP::shared_ptr smartFactor1( + new SmartFactorP(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1, keys); + + SmartFactorP::shared_ptr smartFactor2( + new SmartFactorP(model, cameraRig, params)); + smartFactor2->add(measurements_lmk2, keys); + + SmartFactorP::shared_ptr smartFactor3( + new SmartFactorP(model, cameraRig, params)); + smartFactor3->add(measurements_lmk3, keys); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, level_pose, noisePrior); + graph.addPrior(x2, pose_right, noisePrior); + + Values groundTruth; + groundTruth.insert(x1, level_pose); + groundTruth.insert(x2, pose_right); + groundTruth.insert(x3, pose_above); + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 10, 0., -M_PI / 100), + Point3(0.2, 0.2, 0.2)); // note: larger noise! + + Values values; + values.insert(x1, level_pose); + values.insert(x2, pose_right); + // initialize third pose with some noise, we expect it to move back to + // original pose_above + values.insert(x3, pose_above * noise_pose); + + DOUBLES_EQUAL(0.94148963675515274, graph.error(values), 1e-9); + + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + + EXPECT(assert_equal(pose_above, result.at(x3), 1e-5)); +} + +#ifndef DISABLE_TIMING +#include +// using spherical camera is slightly slower (but comparable) to +// PinholePose +//| -SmartFactorP spherical LINEARIZE: 0.01 CPU (1000 times, 0.008178 wall, +// 0.01 children, min: 0 max: 0) | -SmartFactorP pinhole LINEARIZE: 0.01 CPU +//(1000 times, 0.005717 wall, 0.01 children, min: 0 max: 0) +/* *************************************************************************/ +TEST(SmartProjectionFactorP, timing_sphericalCamera) { + // create common data + Rot3 R = Rot3::identity(); + Pose3 pose1 = Pose3(R, Point3(0, 0, 0)); + Pose3 pose2 = Pose3(R, Point3(1, 0, 0)); + Pose3 body_P_sensorId = Pose3::identity(); + Point3 landmark1(0, 0, 10); + + // create spherical data + EmptyCal::shared_ptr emptyK; + SphericalCamera cam1_sphere(pose1, emptyK), cam2_sphere(pose2, emptyK); + // Project 2 landmarks into 2 cameras + std::vector measurements_lmk1_sphere; + measurements_lmk1_sphere.push_back(cam1_sphere.project(landmark1)); + measurements_lmk1_sphere.push_back(cam2_sphere.project(landmark1)); + + // create Cal3_S2 data + static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0)); + PinholePose cam1(pose1, sharedKSimple), cam2(pose2, sharedKSimple); + // Project 2 landmarks into 2 cameras + std::vector measurements_lmk1; + measurements_lmk1.push_back(cam1.project(landmark1)); + measurements_lmk1.push_back(cam2.project(landmark1)); + + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + + size_t nrTests = 1000; + + for (size_t i = 0; i < nrTests; i++) { + boost::shared_ptr> cameraRig( + new CameraSet()); // single camera in the rig + cameraRig->push_back(SphericalCamera(body_P_sensorId, emptyK)); + + SmartProjectionRigFactor::shared_ptr smartFactorP( + new SmartProjectionRigFactor(model, cameraRig, + params)); + smartFactorP->add(measurements_lmk1_sphere[0], x1); + smartFactorP->add(measurements_lmk1_sphere[1], x1); + + Values values; + values.insert(x1, pose1); + values.insert(x2, pose2); + gttic_(SmartFactorP_spherical_LINEARIZE); + smartFactorP->linearize(values); + gttoc_(SmartFactorP_spherical_LINEARIZE); + } + + for (size_t i = 0; i < nrTests; i++) { + boost::shared_ptr>> cameraRig( + new CameraSet>()); // single camera in the rig + cameraRig->push_back(PinholePose(body_P_sensorId, sharedKSimple)); + + SmartProjectionRigFactor>::shared_ptr smartFactorP2( + new SmartProjectionRigFactor>(model, cameraRig, + params)); + smartFactorP2->add(measurements_lmk1[0], x1); + smartFactorP2->add(measurements_lmk1[1], x1); + + Values values; + values.insert(x1, pose1); + values.insert(x2, pose2); + gttic_(SmartFactorP_pinhole_LINEARIZE); + smartFactorP2->linearize(values); + gttoc_(SmartFactorP_pinhole_LINEARIZE); + } + tictoc_print_(); +} +#endif + +/* *************************************************************************/ +TEST(SmartProjectionFactorP, 2poses_rankTol) { + Pose3 poseA = Pose3( + Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), + Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame + Pose3 poseB = Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), + Point3(0.0, -0.1, 0.0)); // 10cm to the right of poseA + Point3 landmarkL = Point3(5.0, 0.0, 0.0); // 5m in front of poseA + + // triangulate from a stereo with 10cm baseline, assuming standard calibration + { // default rankTol = 1 gives a valid point (compare with calibrated and + // spherical cameras below) + using namespace vanillaPose; // pinhole with Cal3_S2 calibration + + Camera cam1(poseA, sharedK); + Camera cam2(poseB, sharedK); + + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + params.setRankTolerance(1); + + boost::shared_ptr>> cameraRig( + new CameraSet>()); // single camera in the rig + cameraRig->push_back(PinholePose(Pose3::identity(), sharedK)); + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(cam1.project(landmarkL), x1); + smartFactor1->add(cam2.project(landmarkL), x2); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + + Values groundTruth; + groundTruth.insert(x1, poseA); + groundTruth.insert(x2, poseB); + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // get point + TriangulationResult point = smartFactor1->point(); + EXPECT(point.valid()); // valid triangulation + EXPECT(assert_equal(landmarkL, *point, 1e-7)); + } + // triangulate from a stereo with 10cm baseline, assuming canonical + // calibration + { // default rankTol = 1 or 0.1 gives a degenerate point, which is + // undesirable for a point 5m away and 10cm baseline + using namespace vanillaPose; // pinhole with Cal3_S2 calibration + static Cal3_S2::shared_ptr canonicalK( + new Cal3_S2(1.0, 1.0, 0.0, 0.0, 0.0)); // canonical camera + + Camera cam1(poseA, canonicalK); + Camera cam2(poseB, canonicalK); + + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + params.setRankTolerance(0.1); + + boost::shared_ptr>> cameraRig( + new CameraSet>()); // single camera in the rig + cameraRig->push_back(PinholePose(Pose3::identity(), canonicalK)); + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(cam1.project(landmarkL), x1); + smartFactor1->add(cam2.project(landmarkL), x2); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + + Values groundTruth; + groundTruth.insert(x1, poseA); + groundTruth.insert(x2, poseB); + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // get point + TriangulationResult point = smartFactor1->point(); + EXPECT(point.degenerate()); // valid triangulation + } + // triangulate from a stereo with 10cm baseline, assuming canonical + // calibration + { // smaller rankTol = 0.01 gives a valid point (compare with calibrated and + // spherical cameras below) + using namespace vanillaPose; // pinhole with Cal3_S2 calibration + static Cal3_S2::shared_ptr canonicalK( + new Cal3_S2(1.0, 1.0, 0.0, 0.0, 0.0)); // canonical camera + + Camera cam1(poseA, canonicalK); + Camera cam2(poseB, canonicalK); + + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + params.setRankTolerance(0.01); + + boost::shared_ptr>> cameraRig( + new CameraSet>()); // single camera in the rig + cameraRig->push_back(PinholePose(Pose3::identity(), canonicalK)); + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(cam1.project(landmarkL), x1); + smartFactor1->add(cam2.project(landmarkL), x2); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + + Values groundTruth; + groundTruth.insert(x1, poseA); + groundTruth.insert(x2, poseB); + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // get point + TriangulationResult point = smartFactor1->point(); + EXPECT(point.valid()); // valid triangulation + EXPECT(assert_equal(landmarkL, *point, 1e-7)); + } +} + +/* *************************************************************************/ +TEST(SmartProjectionFactorP, 2poses_sphericalCamera_rankTol) { + typedef SphericalCamera Camera; + typedef SmartProjectionRigFactor SmartRigFactor; + static EmptyCal::shared_ptr emptyK; + Pose3 poseA = Pose3( + Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), + Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame + Pose3 poseB = Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), + Point3(0.0, -0.1, 0.0)); // 10cm to the right of poseA + Point3 landmarkL = Point3(5.0, 0.0, 0.0); // 5m in front of poseA + + Camera cam1(poseA); + Camera cam2(poseB); + + boost::shared_ptr> cameraRig( + new CameraSet()); // single camera in the rig + cameraRig->push_back(SphericalCamera(Pose3::identity(), emptyK)); + + // TRIANGULATION TEST WITH DEFAULT RANK TOL + { // rankTol = 1 or 0.1 gives a degenerate point, which is undesirable for a + // point 5m away and 10cm baseline + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + params.setRankTolerance(0.1); + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(cam1.project(landmarkL), x1); + smartFactor1->add(cam2.project(landmarkL), x2); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + + Values groundTruth; + groundTruth.insert(x1, poseA); + groundTruth.insert(x2, poseB); + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // get point + TriangulationResult point = smartFactor1->point(); + EXPECT(point.degenerate()); // not enough parallax + } + // SAME TEST WITH SMALLER RANK TOL + { // rankTol = 0.01 gives a valid point + // By playing with this test, we can show we can triangulate also with a + // baseline of 5cm (even for points far away, >100m), but the test fails + // when the baseline becomes 1cm. This suggests using rankTol = 0.01 and + // setting a reasonable max landmark distance to obtain best results. + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors + params.setRankTolerance(0.01); + + SmartRigFactor::shared_ptr smartFactor1( + new SmartRigFactor(model, cameraRig, params)); + smartFactor1->add(cam1.project(landmarkL), x1); + smartFactor1->add(cam2.project(landmarkL), x2); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + + Values groundTruth; + groundTruth.insert(x1, poseA); + groundTruth.insert(x2, poseB); + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // get point + TriangulationResult point = smartFactor1->point(); + EXPECT(point.valid()); // valid triangulation + EXPECT(assert_equal(landmarkL, *point, 1e-7)); + } +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam_unstable/CMakeLists.txt b/gtsam_unstable/CMakeLists.txt index 13c061b9b..98a1b4ef9 100644 --- a/gtsam_unstable/CMakeLists.txt +++ b/gtsam_unstable/CMakeLists.txt @@ -100,12 +100,12 @@ endif() install( TARGETS gtsam_unstable - EXPORT GTSAM-exports + EXPORT GTSAM_UNSTABLE-exports LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) -list(APPEND GTSAM_EXPORTED_TARGETS gtsam_unstable) -set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) +list(APPEND GTSAM_UNSTABLE_EXPORTED_TARGETS gtsam_unstable) +set(GTSAM_UNSTABLE_EXPORTED_TARGETS "${GTSAM_UNSTABLE_EXPORTED_TARGETS}" PARENT_SCOPE) # Build examples add_subdirectory(examples) diff --git a/gtsam_unstable/discrete/AllDiff.cpp b/gtsam_unstable/discrete/AllDiff.cpp index 9e124954f..85cf0b472 100644 --- a/gtsam_unstable/discrete/AllDiff.cpp +++ b/gtsam_unstable/discrete/AllDiff.cpp @@ -5,107 +5,109 @@ * @author Frank Dellaert */ -#include -#include #include +#include +#include + #include namespace gtsam { - /* ************************************************************************* */ - AllDiff::AllDiff(const DiscreteKeys& dkeys) : - Constraint(dkeys.indices()) { - for(const DiscreteKey& dkey: dkeys) - cardinalities_.insert(dkey); - } +/* ************************************************************************* */ +AllDiff::AllDiff(const DiscreteKeys& dkeys) : Constraint(dkeys.indices()) { + for (const DiscreteKey& dkey : dkeys) cardinalities_.insert(dkey); +} - /* ************************************************************************* */ - void AllDiff::print(const std::string& s, - const KeyFormatter& formatter) const { - std::cout << s << "AllDiff on "; - for (Key dkey: keys_) - std::cout << formatter(dkey) << " "; - std::cout << std::endl; - } +/* ************************************************************************* */ +void AllDiff::print(const std::string& s, const KeyFormatter& formatter) const { + std::cout << s << "AllDiff on "; + for (Key dkey : keys_) std::cout << formatter(dkey) << " "; + std::cout << std::endl; +} - /* ************************************************************************* */ - double AllDiff::operator()(const Values& values) const { - std::set < size_t > taken; // record values taken by keys - for(Key dkey: keys_) { - size_t value = values.at(dkey); // get the value for that key - if (taken.count(value)) return 0.0;// check if value alreday taken - taken.insert(value);// if not, record it as taken and keep checking +/* ************************************************************************* */ +double AllDiff::operator()(const Values& values) const { + std::set taken; // record values taken by keys + for (Key dkey : keys_) { + size_t value = values.at(dkey); // get the value for that key + if (taken.count(value)) return 0.0; // check if value alreday taken + taken.insert(value); // if not, record it as taken and keep checking + } + return 1.0; +} + +/* ************************************************************************* */ +DecisionTreeFactor AllDiff::toDecisionTreeFactor() const { + // We will do this by converting the allDif into many BinaryAllDiff + // constraints + DecisionTreeFactor converted; + size_t nrKeys = keys_.size(); + for (size_t i1 = 0; i1 < nrKeys; i1++) + for (size_t i2 = i1 + 1; i2 < nrKeys; i2++) { + BinaryAllDiff binary12(discreteKey(i1), discreteKey(i2)); + converted = converted * binary12.toDecisionTreeFactor(); } - return 1.0; + return converted; +} + +/* ************************************************************************* */ +DecisionTreeFactor AllDiff::operator*(const DecisionTreeFactor& f) const { + // TODO: can we do this more efficiently? + return toDecisionTreeFactor() * f; +} + +/* ************************************************************************* */ +bool AllDiff::ensureArcConsistency(Key j, Domains* domains) const { + Domain& Dj = domains->at(j); + + // Though strictly not part of allDiff, we check for + // a value in domains->at(j) that does not occur in any other connected domain. + // If found, we make this a singleton... + // TODO: make a new constraint where this really is true + boost::optional maybeChanged = Dj.checkAllDiff(keys_, *domains); + if (maybeChanged) { + Dj = *maybeChanged; + return true; } - /* ************************************************************************* */ - DecisionTreeFactor AllDiff::toDecisionTreeFactor() const { - // We will do this by converting the allDif into many BinaryAllDiff constraints - DecisionTreeFactor converted; - size_t nrKeys = keys_.size(); - for (size_t i1 = 0; i1 < nrKeys; i1++) - for (size_t i2 = i1 + 1; i2 < nrKeys; i2++) { - BinaryAllDiff binary12(discreteKey(i1),discreteKey(i2)); - converted = converted * binary12.toDecisionTreeFactor(); - } - return converted; - } - - /* ************************************************************************* */ - DecisionTreeFactor AllDiff::operator*(const DecisionTreeFactor& f) const { - // TODO: can we do this more efficiently? - return toDecisionTreeFactor() * f; - } - - /* ************************************************************************* */ - bool AllDiff::ensureArcConsistency(size_t j, std::vector& domains) const { - // Though strictly not part of allDiff, we check for - // a value in domains[j] that does not occur in any other connected domain. - // If found, we make this a singleton... - // TODO: make a new constraint where this really is true - Domain& Dj = domains[j]; - if (Dj.checkAllDiff(keys_, domains)) return true; - - // Check all other domains for singletons and erase corresponding values - // This is the same as arc-consistency on the equivalent binary constraints - bool changed = false; - for(Key k: keys_) - if (k != j) { - const Domain& Dk = domains[k]; - if (Dk.isSingleton()) { // check if singleton - size_t value = Dk.firstValue(); - if (Dj.contains(value)) { - Dj.erase(value); // erase value if true - changed = true; - } + // Check all other domains for singletons and erase corresponding values. + // This is the same as arc-consistency on the equivalent binary constraints + bool changed = false; + for (Key k : keys_) + if (k != j) { + const Domain& Dk = domains->at(k); + if (Dk.isSingleton()) { // check if singleton + size_t value = Dk.firstValue(); + if (Dj.contains(value)) { + Dj.erase(value); // erase value if true + changed = true; } } - return changed; - } + } + return changed; +} - /* ************************************************************************* */ - Constraint::shared_ptr AllDiff::partiallyApply(const Values& values) const { - DiscreteKeys newKeys; - // loop over keys and add them only if they do not appear in values - for(Key k: keys_) - if (values.find(k) == values.end()) { - newKeys.push_back(DiscreteKey(k,cardinalities_.at(k))); - } - return boost::make_shared(newKeys); - } +/* ************************************************************************* */ +Constraint::shared_ptr AllDiff::partiallyApply(const Values& values) const { + DiscreteKeys newKeys; + // loop over keys and add them only if they do not appear in values + for (Key k : keys_) + if (values.find(k) == values.end()) { + newKeys.push_back(DiscreteKey(k, cardinalities_.at(k))); + } + return boost::make_shared(newKeys); +} - /* ************************************************************************* */ - Constraint::shared_ptr AllDiff::partiallyApply( - const std::vector& domains) const { - DiscreteFactor::Values known; - for(Key k: keys_) { - const Domain& Dk = domains[k]; - if (Dk.isSingleton()) - known[k] = Dk.firstValue(); - } - return partiallyApply(known); +/* ************************************************************************* */ +Constraint::shared_ptr AllDiff::partiallyApply( + const Domains& domains) const { + DiscreteFactor::Values known; + for (Key k : keys_) { + const Domain& Dk = domains.at(k); + if (Dk.isSingleton()) known[k] = Dk.firstValue(); } + return partiallyApply(known); +} - /* ************************************************************************* */ -} // namespace gtsam +/* ************************************************************************* */ +} // namespace gtsam diff --git a/gtsam_unstable/discrete/AllDiff.h b/gtsam_unstable/discrete/AllDiff.h index 80e700b29..57b0aeb5c 100644 --- a/gtsam_unstable/discrete/AllDiff.h +++ b/gtsam_unstable/discrete/AllDiff.h @@ -7,71 +7,66 @@ #pragma once -#include #include +#include namespace gtsam { - /** - * General AllDiff constraint - * Returns 1 if values for all keys are different, 0 otherwise - * DiscreteFactors are all awkward in that they have to store two types of keys: - * for each variable we have a Key and an Key. In this factor, we - * keep the Indices locally, and the Indices are stored in IndexFactor. +/** + * General AllDiff constraint. + * Returns 1 if values for all keys are different, 0 otherwise. + */ +class GTSAM_UNSTABLE_EXPORT AllDiff : public Constraint { + std::map cardinalities_; + + DiscreteKey discreteKey(size_t i) const { + Key j = keys_[i]; + return DiscreteKey(j, cardinalities_.at(j)); + } + + public: + /// Construct from keys. + AllDiff(const DiscreteKeys& dkeys); + + // print + void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const override; + + /// equals + bool equals(const DiscreteFactor& other, double tol) const override { + if (!dynamic_cast(&other)) + return false; + else { + const AllDiff& f(static_cast(other)); + return cardinalities_.size() == f.cardinalities_.size() && + std::equal(cardinalities_.begin(), cardinalities_.end(), + f.cardinalities_.begin()); + } + } + + /// Calculate value = expensive ! + double operator()(const Values& values) const override; + + /// Convert into a decisiontree, can be *very* expensive ! + DecisionTreeFactor toDecisionTreeFactor() const override; + + /// Multiply into a decisiontree + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; + + /* + * Ensure Arc-consistency by checking every possible value of domain j. + * @param j domain to be checked + * @param (in/out) domains all domains, but only domains->at(j) will be checked. + * @return true if domains->at(j) was changed, false otherwise. */ - class GTSAM_UNSTABLE_EXPORT AllDiff: public Constraint { + bool ensureArcConsistency(Key j, Domains* domains) const override; - std::map cardinalities_; + /// Partially apply known values + Constraint::shared_ptr partiallyApply(const Values&) const override; - DiscreteKey discreteKey(size_t i) const { - Key j = keys_[i]; - return DiscreteKey(j,cardinalities_.at(j)); - } + /// Partially apply known values, domain version + Constraint::shared_ptr partiallyApply( + const Domains&) const override; +}; - public: - - /// Constructor - AllDiff(const DiscreteKeys& dkeys); - - // print - void print(const std::string& s = "", - const KeyFormatter& formatter = DefaultKeyFormatter) const override; - - /// equals - bool equals(const DiscreteFactor& other, double tol) const override { - if(!dynamic_cast(&other)) - return false; - else { - const AllDiff& f(static_cast(other)); - return cardinalities_.size() == f.cardinalities_.size() - && std::equal(cardinalities_.begin(), cardinalities_.end(), - f.cardinalities_.begin()); - } - } - - /// Calculate value = expensive ! - double operator()(const Values& values) const override; - - /// Convert into a decisiontree, can be *very* expensive ! - DecisionTreeFactor toDecisionTreeFactor() const override; - - /// Multiply into a decisiontree - DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; - - /* - * Ensure Arc-consistency - * Arc-consistency involves creating binaryAllDiff constraints - * In which case the combinatorial hyper-arc explosion disappears. - * @param j domain to be checked - * @param domains all other domains - */ - bool ensureArcConsistency(size_t j, std::vector& domains) const override; - - /// Partially apply known values - Constraint::shared_ptr partiallyApply(const Values&) const override; - - /// Partially apply known values, domain version - Constraint::shared_ptr partiallyApply(const std::vector&) const override; - }; - -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam_unstable/discrete/BinaryAllDiff.h b/gtsam_unstable/discrete/BinaryAllDiff.h index bbb60e2f1..a2c7ba660 100644 --- a/gtsam_unstable/discrete/BinaryAllDiff.h +++ b/gtsam_unstable/discrete/BinaryAllDiff.h @@ -7,94 +7,90 @@ #pragma once -#include -#include #include +#include +#include namespace gtsam { - /** - * Binary AllDiff constraint - * Returns 1 if values for two keys are different, 0 otherwise - * DiscreteFactors are all awkward in that they have to store two types of keys: - * for each variable we have a Index and an Index. In this factor, we - * keep the Indices locally, and the Indices are stored in IndexFactor. - */ - class BinaryAllDiff: public Constraint { +/** + * Binary AllDiff constraint + * Returns 1 if values for two keys are different, 0 otherwise. + */ +class BinaryAllDiff : public Constraint { + size_t cardinality0_, cardinality1_; /// cardinality - size_t cardinality0_, cardinality1_; /// cardinality + public: + /// Constructor + BinaryAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) + : Constraint(key1.first, key2.first), + cardinality0_(key1.second), + cardinality1_(key2.second) {} - public: + // print + void print( + const std::string& s = "", + const KeyFormatter& formatter = DefaultKeyFormatter) const override { + std::cout << s << "BinaryAllDiff on " << formatter(keys_[0]) << " and " + << formatter(keys_[1]) << std::endl; + } - /// Constructor - BinaryAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) : - Constraint(key1.first, key2.first), - cardinality0_(key1.second), cardinality1_(key2.second) { - } - - // print - void print(const std::string& s = "", - const KeyFormatter& formatter = DefaultKeyFormatter) const override { - std::cout << s << "BinaryAllDiff on " << formatter(keys_[0]) << " and " - << formatter(keys_[1]) << std::endl; - } - - /// equals - bool equals(const DiscreteFactor& other, double tol) const override { - if(!dynamic_cast(&other)) - return false; - else { - const BinaryAllDiff& f(static_cast(other)); - return (cardinality0_==f.cardinality0_) && (cardinality1_==f.cardinality1_); - } - } - - /// Calculate value - double operator()(const Values& values) const override { - return (double) (values.at(keys_[0]) != values.at(keys_[1])); - } - - /// Convert into a decisiontree - DecisionTreeFactor toDecisionTreeFactor() const override { - DiscreteKeys keys; - keys.push_back(DiscreteKey(keys_[0],cardinality0_)); - keys.push_back(DiscreteKey(keys_[1],cardinality1_)); - std::vector table; - for (size_t i1 = 0; i1 < cardinality0_; i1++) - for (size_t i2 = 0; i2 < cardinality1_; i2++) - table.push_back(i1 != i2); - DecisionTreeFactor converted(keys, table); - return converted; - } - - /// Multiply into a decisiontree - DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override { - // TODO: can we do this more efficiently? - return toDecisionTreeFactor() * f; - } - - /* - * Ensure Arc-consistency - * @param j domain to be checked - * @param domains all other domains - */ - /// - bool ensureArcConsistency(size_t j, std::vector& domains) const override { -// throw std::runtime_error( -// "BinaryAllDiff::ensureArcConsistency not implemented"); + /// equals + bool equals(const DiscreteFactor& other, double tol) const override { + if (!dynamic_cast(&other)) return false; + else { + const BinaryAllDiff& f(static_cast(other)); + return (cardinality0_ == f.cardinality0_) && + (cardinality1_ == f.cardinality1_); } + } - /// Partially apply known values - Constraint::shared_ptr partiallyApply(const Values&) const override { - throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented"); - } + /// Calculate value + double operator()(const Values& values) const override { + return (double)(values.at(keys_[0]) != values.at(keys_[1])); + } - /// Partially apply known values, domain version - Constraint::shared_ptr partiallyApply( - const std::vector&) const override { - throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented"); - } - }; + /// Convert into a decisiontree + DecisionTreeFactor toDecisionTreeFactor() const override { + DiscreteKeys keys; + keys.push_back(DiscreteKey(keys_[0], cardinality0_)); + keys.push_back(DiscreteKey(keys_[1], cardinality1_)); + std::vector table; + for (size_t i1 = 0; i1 < cardinality0_; i1++) + for (size_t i2 = 0; i2 < cardinality1_; i2++) table.push_back(i1 != i2); + DecisionTreeFactor converted(keys, table); + return converted; + } -} // namespace gtsam + /// Multiply into a decisiontree + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override { + // TODO: can we do this more efficiently? + return toDecisionTreeFactor() * f; + } + + /* + * Ensure Arc-consistency by checking every possible value of domain j. + * @param j domain to be checked + * @param (in/out) domains all domains, but only domains->at(j) will be checked. + * @return true if domains->at(j) was changed, false otherwise. + */ + bool ensureArcConsistency(Key j, Domains* domains) const override { + throw std::runtime_error( + "BinaryAllDiff::ensureArcConsistency not implemented"); + return false; + } + + /// Partially apply known values + Constraint::shared_ptr partiallyApply(const Values&) const override { + throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented"); + } + + /// Partially apply known values, domain version + Constraint::shared_ptr partiallyApply( + const Domains&) const override { + throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented"); + } +}; + +} // namespace gtsam diff --git a/gtsam_unstable/discrete/CSP.cpp b/gtsam_unstable/discrete/CSP.cpp index 525abd098..1b38d20d6 100644 --- a/gtsam_unstable/discrete/CSP.cpp +++ b/gtsam_unstable/discrete/CSP.cpp @@ -5,99 +5,95 @@ * @author Frank Dellaert */ -#include -#include #include +#include +#include using namespace std; namespace gtsam { - /// Find the best total assignment - can be expensive - CSP::sharedValues CSP::optimalAssignment() const { - DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(); - sharedValues mpe = chordal->optimize(); - return mpe; - } +/// Find the best total assignment - can be expensive +CSP::Values CSP::optimalAssignment() const { + DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(); + return chordal->optimize(); +} - /// Find the best total assignment - can be expensive - CSP::sharedValues CSP::optimalAssignment(const Ordering& ordering) const { - DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(ordering); - sharedValues mpe = chordal->optimize(); - return mpe; - } +/// Find the best total assignment - can be expensive +CSP::Values CSP::optimalAssignment(const Ordering& ordering) const { + DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(ordering); + return chordal->optimize(); +} - void CSP::runArcConsistency(size_t cardinality, size_t nrIterations, bool print) const { - // Create VariableIndex - VariableIndex index(*this); - // index.print(); +bool CSP::runArcConsistency(const VariableIndex& index, + Domains* domains) const { + bool changed = false; - size_t n = index.size(); + // iterate over all variables in the index + for (auto entry : index) { + // Get the variable's key and associated factors: + const Key key = entry.first; + const FactorIndices& factors = entry.second; - // Initialize domains - std::vector < Domain > domains; - for (size_t j = 0; j < n; j++) - domains.push_back(Domain(DiscreteKey(j,cardinality))); + // If this domain is already a singleton, we do nothing. + if (domains->at(key).isSingleton()) continue; - // Create array of flags indicating a domain changed or not - std::vector changed(n); - - // iterate nrIterations over entire grid - for (size_t it = 0; it < nrIterations; it++) { - bool anyChange = false; - // iterate over all cells - for (size_t v = 0; v < n; v++) { - // keep track of which domains changed - changed[v] = false; - // loop over all factors/constraints for variable v - const FactorIndices& factors = index[v]; - for(size_t f: factors) { - // if not already a singleton - if (!domains[v].isSingleton()) { - // get the constraint and call its ensureArcConsistency method - Constraint::shared_ptr constraint = boost::dynamic_pointer_cast((*this)[f]); - if (!constraint) throw runtime_error("CSP:runArcConsistency: non-constraint factor"); - changed[v] = constraint->ensureArcConsistency(v,domains) || changed[v]; - } - } // f - if (changed[v]) anyChange = true; - } // v - if (!anyChange) break; - // TODO: Sudoku specific hack - if (print) { - if (cardinality == 9 && n == 81) { - for (size_t i = 0, v = 0; i < (size_t)std::sqrt((double)n); i++) { - for (size_t j = 0; j < (size_t)std::sqrt((double)n); j++, v++) { - if (changed[v]) cout << "*"; - domains[v].print(); - cout << "\t"; - } // i - cout << endl; - } // j - } else { - for (size_t v = 0; v < n; v++) { - if (changed[v]) cout << "*"; - domains[v].print(); - cout << "\t"; - } // v - } - cout << endl; - } // print - } // it - -#ifndef INPROGRESS - // Now create new problem with all singleton variables removed - // We do this by adding simplifying all factors using parial application - // TODO: create a new ordering as we go, to ensure a connected graph - // KeyOrdering ordering; - // vector dkeys; - for(const DiscreteFactor::shared_ptr& f: factors_) { - Constraint::shared_ptr constraint = boost::dynamic_pointer_cast(f); - if (!constraint) throw runtime_error("CSP:runArcConsistency: non-constraint factor"); - Constraint::shared_ptr reduced = constraint->partiallyApply(domains); - if (print) reduced->print(); + // Otherwise, loop over all factors/constraints for variable with given key. + for (size_t f : factors) { + // If this factor is a constraint, call its ensureArcConsistency method: + auto constraint = boost::dynamic_pointer_cast((*this)[f]); + if (constraint) { + changed = constraint->ensureArcConsistency(key, domains) || changed; + } } -#endif } -} // gtsam + return changed; +} +// TODO(dellaert): This is AC1, which is inefficient as any change will cause +// the algorithm to revisit *all* variables again. Implement AC3. +Domains CSP::runArcConsistency(size_t cardinality, size_t maxIterations) const { + // Create VariableIndex + VariableIndex index(*this); + + // Initialize domains + Domains domains; + for (auto entry : index) { + const Key key = entry.first; + domains.emplace(key, DiscreteKey(key, cardinality)); + } + + // Iterate until convergence or not a single domain changed. + for (size_t it = 0; it < maxIterations; it++) { + bool changed = runArcConsistency(index, &domains); + if (!changed) break; + } + return domains; +} + +CSP CSP::partiallyApply(const Domains& domains) const { + // Create new problem with all singleton variables removed + // We do this by adding simplifying all factors using partial application. + // TODO: create a new ordering as we go, to ensure a connected graph + // KeyOrdering ordering; + // vector dkeys; + CSP new_csp; + + // Add tightened domains as new factors: + for (auto key_domain : domains) { + new_csp.emplace_shared(key_domain.second); + } + + // Reduce all existing factors: + for (const DiscreteFactor::shared_ptr& f : factors_) { + auto constraint = boost::dynamic_pointer_cast(f); + if (!constraint) + throw runtime_error("CSP:runArcConsistency: non-constraint factor"); + Constraint::shared_ptr reduced = constraint->partiallyApply(domains); + if (reduced->size() > 1) { + new_csp.push_back(reduced); + } + } + return new_csp; +} +} // namespace gtsam diff --git a/gtsam_unstable/discrete/CSP.h b/gtsam_unstable/discrete/CSP.h index 9e843f667..0d4e0b177 100644 --- a/gtsam_unstable/discrete/CSP.h +++ b/gtsam_unstable/discrete/CSP.h @@ -7,84 +7,78 @@ #pragma once +#include #include #include -#include namespace gtsam { - /** - * Constraint Satisfaction Problem class - * A specialization of a DiscreteFactorGraph. - * It knows about CSP-specific constraints and algorithms +/** + * Constraint Satisfaction Problem class + * A specialization of a DiscreteFactorGraph. + * It knows about CSP-specific constraints and algorithms + */ +class GTSAM_UNSTABLE_EXPORT CSP : public DiscreteFactorGraph { + public: + /** A map from keys to values */ + typedef Assignment Values; + + public: + /// Add a unary constraint, allowing only a single value + void addSingleValue(const DiscreteKey& dkey, size_t value) { + emplace_shared(dkey, value); + } + + /// Add a binary AllDiff constraint + void addAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) { + emplace_shared(key1, key2); + } + + /// Add a general AllDiff constraint + void addAllDiff(const DiscreteKeys& dkeys) { emplace_shared(dkeys); } + + // /** return product of all factors as a single factor */ + // DecisionTreeFactor product() const { + // DecisionTreeFactor result; + // for(const sharedFactor& factor: *this) + // if (factor) result = (*factor) * result; + // return result; + // } + + /// Find the best total assignment - can be expensive. + Values optimalAssignment() const; + + /// Find the best total assignment, with given ordering - can be expensive. + Values optimalAssignment(const Ordering& ordering) const; + + // /* + // * Perform loopy belief propagation + // * True belief propagation would check for each value in domain + // * whether any satisfying separator assignment can be found. + // * This corresponds to hyper-arc consistency in CSP speak. + // * This can be done by creating a mini-factor graph and search. + // * For a nine-by-nine Sudoku, the search tree will be 8+6+6=20 levels + // deep. + // * It will be very expensive to exclude values that way. + // */ + // void applyBeliefPropagation(size_t maxIterations = 10) const; + + /* + * Apply arc-consistency ~ Approximate loopy belief propagation + * We need to give the domains to a constraint, and it returns + * a domain whose values don't conflict in the arc-consistency way. + * TODO: should get cardinality from DiscreteKeys */ - class GTSAM_UNSTABLE_EXPORT CSP: public DiscreteFactorGraph { - public: + Domains runArcConsistency(size_t cardinality, + size_t maxIterations = 10) const; - /** A map from keys to values */ - typedef KeyVector Indices; - typedef Assignment Values; - typedef boost::shared_ptr sharedValues; + /// Run arc consistency for all variables, return true if any domain changed. + bool runArcConsistency(const VariableIndex& index, Domains* domains) const; - public: - -// /// Constructor -// CSP() { -// } - - /// Add a unary constraint, allowing only a single value - void addSingleValue(const DiscreteKey& dkey, size_t value) { - boost::shared_ptr factor(new SingleValue(dkey, value)); - push_back(factor); - } - - /// Add a binary AllDiff constraint - void addAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) { - boost::shared_ptr factor( - new BinaryAllDiff(key1, key2)); - push_back(factor); - } - - /// Add a general AllDiff constraint - void addAllDiff(const DiscreteKeys& dkeys) { - boost::shared_ptr factor(new AllDiff(dkeys)); - push_back(factor); - } - -// /** return product of all factors as a single factor */ -// DecisionTreeFactor product() const { -// DecisionTreeFactor result; -// for(const sharedFactor& factor: *this) -// if (factor) result = (*factor) * result; -// return result; -// } - - /// Find the best total assignment - can be expensive - sharedValues optimalAssignment() const; - - /// Find the best total assignment - can be expensive - sharedValues optimalAssignment(const Ordering& ordering) const; - -// /* -// * Perform loopy belief propagation -// * True belief propagation would check for each value in domain -// * whether any satisfying separator assignment can be found. -// * This corresponds to hyper-arc consistency in CSP speak. -// * This can be done by creating a mini-factor graph and search. -// * For a nine-by-nine Sudoku, the search tree will be 8+6+6=20 levels deep. -// * It will be very expensive to exclude values that way. -// */ -// void applyBeliefPropagation(size_t nrIterations = 10) const; - - /* - * Apply arc-consistency ~ Approximate loopy belief propagation - * We need to give the domains to a constraint, and it returns - * a domain whose values don't conflict in the arc-consistency way. - * TODO: should get cardinality from Indices - */ - void runArcConsistency(size_t cardinality, size_t nrIterations = 10, - bool print = false) const; - }; // CSP - -} // gtsam + /* + * Create a new CSP, applying the given Domain constraints. + */ + CSP partiallyApply(const Domains& domains) const; +}; // CSP +} // namespace gtsam diff --git a/gtsam_unstable/discrete/Constraint.h b/gtsam_unstable/discrete/Constraint.h index c3a26de68..f0e51b723 100644 --- a/gtsam_unstable/discrete/Constraint.h +++ b/gtsam_unstable/discrete/Constraint.h @@ -17,77 +17,70 @@ #pragma once -#include #include +#include + #include +#include namespace gtsam { - class Domain; +class Domain; +using Domains = std::map; - /** - * Base class for discrete probabilistic factors - * The most general one is the derived DecisionTreeFactor +/** + * Base class for constraint factors + * Derived classes include SingleValue, BinaryAllDiff, and AllDiff. + */ +class GTSAM_EXPORT Constraint : public DiscreteFactor { + public: + typedef boost::shared_ptr shared_ptr; + + protected: + /// Construct unary constraint factor. + Constraint(Key j) : DiscreteFactor(boost::assign::cref_list_of<1>(j)) {} + + /// Construct binary constraint factor. + Constraint(Key j1, Key j2) + : DiscreteFactor(boost::assign::cref_list_of<2>(j1)(j2)) {} + + /// Construct n-way constraint factor. + Constraint(const KeyVector& js) : DiscreteFactor(js) {} + + /// construct from container + template + Constraint(KeyIterator beginKey, KeyIterator endKey) + : DiscreteFactor(beginKey, endKey) {} + + public: + /// @name Standard Constructors + /// @{ + + /// Default constructor for I/O + Constraint(); + + /// Virtual destructor + ~Constraint() override {} + + /// @} + /// @name Standard Interface + /// @{ + + /* + * Ensure Arc-consistency by checking every possible value of domain j. + * @param j domain to be checked + * @param (in/out) domains all domains, but only domains->at(j) will be checked. + * @return true if domains->at(j) was changed, false otherwise. */ - class Constraint : public DiscreteFactor { + virtual bool ensureArcConsistency(Key j, Domains* domains) const = 0; - public: + /// Partially apply known values + virtual shared_ptr partiallyApply(const Values&) const = 0; - typedef boost::shared_ptr shared_ptr; - - protected: - - /// Construct n-way factor - Constraint(const KeyVector& js) : - DiscreteFactor(js) { - } - - /// Construct unary factor - Constraint(Key j) : - DiscreteFactor(boost::assign::cref_list_of<1>(j)) { - } - - /// Construct binary factor - Constraint(Key j1, Key j2) : - DiscreteFactor(boost::assign::cref_list_of<2>(j1)(j2)) { - } - - /// construct from container - template - Constraint(KeyIterator beginKey, KeyIterator endKey) : - DiscreteFactor(beginKey, endKey) { - } - - public: - - /// @name Standard Constructors - /// @{ - - /// Default constructor for I/O - Constraint(); - - /// Virtual destructor - ~Constraint() override {} - - /// @} - /// @name Standard Interface - /// @{ - - /* - * Ensure Arc-consistency - * @param j domain to be checked - * @param domains all other domains - */ - virtual bool ensureArcConsistency(size_t j, std::vector& domains) const = 0; - - /// Partially apply known values - virtual shared_ptr partiallyApply(const Values&) const = 0; - - - /// Partially apply known values, domain version - virtual shared_ptr partiallyApply(const std::vector&) const = 0; - /// @} - }; + /// Partially apply known values, domain version + virtual shared_ptr partiallyApply(const Domains&) const = 0; + /// @} +}; // DiscreteFactor -}// namespace gtsam +} // namespace gtsam diff --git a/gtsam_unstable/discrete/Domain.cpp b/gtsam_unstable/discrete/Domain.cpp index 740ef067c..98b735c6c 100644 --- a/gtsam_unstable/discrete/Domain.cpp +++ b/gtsam_unstable/discrete/Domain.cpp @@ -5,92 +5,94 @@ * @author Frank Dellaert */ -#include -#include #include -#include +#include +#include +#include +#include namespace gtsam { - using namespace std; - - /* ************************************************************************* */ - void Domain::print(const string& s, - const KeyFormatter& formatter) const { -// cout << s << ": Domain on " << formatter(keys_[0]) << " (j=" << -// formatter(keys_[0]) << ") with values"; -// for (size_t v: values_) cout << " " << v; -// cout << endl; - for (size_t v: values_) cout << v; - } - - /* ************************************************************************* */ - double Domain::operator()(const Values& values) const { - return contains(values.at(keys_[0])); - } - - /* ************************************************************************* */ - DecisionTreeFactor Domain::toDecisionTreeFactor() const { - DiscreteKeys keys; - keys += DiscreteKey(keys_[0],cardinality_); - vector table; - for (size_t i1 = 0; i1 < cardinality_; ++i1) - table.push_back(contains(i1)); - DecisionTreeFactor converted(keys, table); - return converted; - } - - /* ************************************************************************* */ - DecisionTreeFactor Domain::operator*(const DecisionTreeFactor& f) const { - // TODO: can we do this more efficiently? - return toDecisionTreeFactor() * f; - } - - /* ************************************************************************* */ - bool Domain::ensureArcConsistency(size_t j, vector& domains) const { - if (j != keys_[0]) throw invalid_argument("Domain check on wrong domain"); - Domain& D = domains[j]; - for(size_t value: values_) - if (!D.contains(value)) throw runtime_error("Unsatisfiable"); - D = *this; - return true; - } - - /* ************************************************************************* */ - bool Domain::checkAllDiff(const KeyVector keys, vector& domains) { - Key j = keys_[0]; - // for all values in this domain - for(size_t value: values_) { - // for all connected domains - for(Key k: keys) - // if any domain contains the value we cannot make this domain singleton - if (k!=j && domains[k].contains(value)) - goto found; - values_.clear(); - values_.insert(value); - return true; // we changed it - found:; - } - return false; // we did not change it - } - - /* ************************************************************************* */ - Constraint::shared_ptr Domain::partiallyApply( - const Values& values) const { - Values::const_iterator it = values.find(keys_[0]); - if (it != values.end() && !contains(it->second)) throw runtime_error( - "Domain::partiallyApply: unsatisfiable"); - return boost::make_shared < Domain > (*this); - } - - /* ************************************************************************* */ - Constraint::shared_ptr Domain::partiallyApply( - const vector& domains) const { - const Domain& Dk = domains[keys_[0]]; - if (Dk.isSingleton() && !contains(*Dk.begin())) throw runtime_error( - "Domain::partiallyApply: unsatisfiable"); - return boost::make_shared < Domain > (Dk); - } +using namespace std; /* ************************************************************************* */ -} // namespace gtsam +void Domain::print(const string& s, const KeyFormatter& formatter) const { + cout << s << ": Domain on " << formatter(key()) << " (j=" << formatter(key()) + << ") with values"; + for (size_t v : values_) cout << " " << v; + cout << endl; +} + +/* ************************************************************************* */ +string Domain::base1Str() const { + stringstream ss; + for (size_t v : values_) ss << v + 1; + return ss.str(); +} + +/* ************************************************************************* */ +double Domain::operator()(const Values& values) const { + return contains(values.at(key())); +} + +/* ************************************************************************* */ +DecisionTreeFactor Domain::toDecisionTreeFactor() const { + DiscreteKeys keys; + keys += DiscreteKey(key(), cardinality_); + vector table; + for (size_t i1 = 0; i1 < cardinality_; ++i1) table.push_back(contains(i1)); + DecisionTreeFactor converted(keys, table); + return converted; +} + +/* ************************************************************************* */ +DecisionTreeFactor Domain::operator*(const DecisionTreeFactor& f) const { + // TODO: can we do this more efficiently? + return toDecisionTreeFactor() * f; +} + +/* ************************************************************************* */ +bool Domain::ensureArcConsistency(Key j, Domains* domains) const { + if (j != key()) throw invalid_argument("Domain check on wrong domain"); + Domain& D = domains->at(j); + for (size_t value : values_) + if (!D.contains(value)) throw runtime_error("Unsatisfiable"); + D = *this; + return true; +} + +/* ************************************************************************* */ +boost::optional Domain::checkAllDiff(const KeyVector keys, + const Domains& domains) const { + Key j = key(); + // for all values in this domain + for (const size_t value : values_) { + // for all connected domains + for (const Key k : keys) + // if any domain contains the value we cannot make this domain singleton + if (k != j && domains.at(k).contains(value)) goto found; + // Otherwise: return a singleton: + return Domain(this->discreteKey(), value); + found:; + } + return boost::none; // we did not change it +} + +/* ************************************************************************* */ +Constraint::shared_ptr Domain::partiallyApply(const Values& values) const { + Values::const_iterator it = values.find(key()); + if (it != values.end() && !contains(it->second)) + throw runtime_error("Domain::partiallyApply: unsatisfiable"); + return boost::make_shared(*this); +} + +/* ************************************************************************* */ +Constraint::shared_ptr Domain::partiallyApply(const Domains& domains) const { + const Domain& Dk = domains.at(key()); + if (Dk.isSingleton() && !contains(*Dk.begin())) + throw runtime_error("Domain::partiallyApply: unsatisfiable"); + return boost::make_shared(Dk); +} + +/* ************************************************************************* */ +} // namespace gtsam diff --git a/gtsam_unstable/discrete/Domain.h b/gtsam_unstable/discrete/Domain.h index 5acc5a08f..ae137ca33 100644 --- a/gtsam_unstable/discrete/Domain.h +++ b/gtsam_unstable/discrete/Domain.h @@ -7,111 +7,107 @@ #pragma once -#include #include +#include namespace gtsam { - /** - * Domain restriction constraint +/** + * The Domain class represents a constraint that restricts the possible values a + * particular variable, with given key, can take on. + */ +class GTSAM_UNSTABLE_EXPORT Domain : public Constraint { + size_t cardinality_; /// Cardinality + std::set values_; /// allowed values + + public: + typedef boost::shared_ptr shared_ptr; + + // Constructor on Discrete Key initializes an "all-allowed" domain + Domain(const DiscreteKey& dkey) + : Constraint(dkey.first), cardinality_(dkey.second) { + for (size_t v = 0; v < cardinality_; v++) values_.insert(v); + } + + // Constructor on Discrete Key with single allowed value + // Consider SingleValue constraint + Domain(const DiscreteKey& dkey, size_t v) + : Constraint(dkey.first), cardinality_(dkey.second) { + values_.insert(v); + } + + /// The one key + Key key() const { return keys_[0]; } + + // The associated discrete key + DiscreteKey discreteKey() const { return DiscreteKey(key(), cardinality_); } + + /// Insert a value, non const :-( + void insert(size_t value) { values_.insert(value); } + + /// Erase a value, non const :-( + void erase(size_t value) { values_.erase(value); } + + size_t nrValues() const { return values_.size(); } + + bool isSingleton() const { return nrValues() == 1; } + + size_t firstValue() const { return *values_.begin(); } + + // print + void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const override; + + /// equals + bool equals(const DiscreteFactor& other, double tol) const override { + if (!dynamic_cast(&other)) + return false; + else { + const Domain& f(static_cast(other)); + return (cardinality_ == f.cardinality_) && (values_ == f.values_); + } + } + + // Return concise string representation, mostly to debug arc consistency. + // Converts from base 0 to base1. + std::string base1Str() const; + + // Check whether domain cotains a specific value. + bool contains(size_t value) const { return values_.count(value) > 0; } + + /// Calculate value + double operator()(const Values& values) const override; + + /// Convert into a decisiontree + DecisionTreeFactor toDecisionTreeFactor() const override; + + /// Multiply into a decisiontree + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; + + /* + * Ensure Arc-consistency by checking every possible value of domain j. + * @param j domain to be checked + * @param (in/out) domains all domains, but only domains->at(j) will be + * checked. + * @return true if domains->at(j) was changed, false otherwise. */ - class GTSAM_UNSTABLE_EXPORT Domain: public Constraint { + bool ensureArcConsistency(Key j, Domains* domains) const override; - size_t cardinality_; /// Cardinality - std::set values_; /// allowed values + /** + * Check for a value in domain that does not occur in any other connected + * domain. If found, return a a new singleton domain... + * Called in AllDiff::ensureArcConsistency + * @param keys connected domains through alldiff + * @param keys other domains + */ + boost::optional checkAllDiff(const KeyVector keys, + const Domains& domains) const; - public: + /// Partially apply known values + Constraint::shared_ptr partiallyApply(const Values& values) const override; - typedef boost::shared_ptr shared_ptr; + /// Partially apply known values, domain version + Constraint::shared_ptr partiallyApply(const Domains& domains) const override; +}; - // Constructor on Discrete Key initializes an "all-allowed" domain - Domain(const DiscreteKey& dkey) : - Constraint(dkey.first), cardinality_(dkey.second) { - for (size_t v = 0; v < cardinality_; v++) - values_.insert(v); - } - - // Constructor on Discrete Key with single allowed value - // Consider SingleValue constraint - Domain(const DiscreteKey& dkey, size_t v) : - Constraint(dkey.first), cardinality_(dkey.second) { - values_.insert(v); - } - - /// Constructor - Domain(const Domain& other) : - Constraint(other.keys_[0]), values_(other.values_) { - } - - /// insert a value, non const :-( - void insert(size_t value) { - values_.insert(value); - } - - /// erase a value, non const :-( - void erase(size_t value) { - values_.erase(value); - } - - size_t nrValues() const { - return values_.size(); - } - - bool isSingleton() const { - return nrValues() == 1; - } - - size_t firstValue() const { - return *values_.begin(); - } - - // print - void print(const std::string& s = "", - const KeyFormatter& formatter = DefaultKeyFormatter) const override; - - /// equals - bool equals(const DiscreteFactor& other, double tol) const override { - if(!dynamic_cast(&other)) - return false; - else { - const Domain& f(static_cast(other)); - return (cardinality_==f.cardinality_) && (values_==f.values_); - } - } - - bool contains(size_t value) const { - return values_.count(value)>0; - } - - /// Calculate value - double operator()(const Values& values) const override; - - /// Convert into a decisiontree - DecisionTreeFactor toDecisionTreeFactor() const override; - - /// Multiply into a decisiontree - DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; - - /* - * Ensure Arc-consistency - * @param j domain to be checked - * @param domains all other domains - */ - bool ensureArcConsistency(size_t j, std::vector& domains) const override; - - /** - * Check for a value in domain that does not occur in any other connected domain. - * If found, we make this a singleton... Called in AllDiff::ensureArcConsistency - * @param keys connected domains through alldiff - */ - bool checkAllDiff(const KeyVector keys, std::vector& domains); - - /// Partially apply known values - Constraint::shared_ptr partiallyApply(const Values& values) const override; - - /// Partially apply known values, domain version - Constraint::shared_ptr partiallyApply( - const std::vector& domains) const override; - }; - -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam_unstable/discrete/Scheduler.cpp b/gtsam_unstable/discrete/Scheduler.cpp index 3273778c4..6210f8037 100644 --- a/gtsam_unstable/discrete/Scheduler.cpp +++ b/gtsam_unstable/discrete/Scheduler.cpp @@ -5,298 +5,285 @@ * @author Frank Dellaert */ -#include -#include #include #include +#include +#include #include - +#include #include #include -#include namespace gtsam { - using namespace std; +using namespace std; - Scheduler::Scheduler(size_t maxNrStudents, const string& filename): - maxNrStudents_(maxNrStudents) - { - typedef boost::tokenizer > Tokenizer; +Scheduler::Scheduler(size_t maxNrStudents, const string& filename) + : maxNrStudents_(maxNrStudents) { + typedef boost::tokenizer > Tokenizer; - // open file - ifstream is(filename.c_str()); - if (!is) { - cerr << "Scheduler: could not open file " << filename << endl; - throw runtime_error("Scheduler: could not open file " + filename); - } - - string line; // buffer - - // process first line with faculty - if (getline(is, line, '\r')) { - Tokenizer tok(line); - Tokenizer::iterator it = tok.begin(); - for (++it; it != tok.end(); ++it) - addFaculty(*it); - } - - // for all remaining lines - size_t count = 0; - while (getline(is, line, '\r')) { - if (count++ > 100) throw runtime_error("reached 100 lines, exiting"); - Tokenizer tok(line); - Tokenizer::iterator it = tok.begin(); - addSlot(*it++); // add slot - // add availability - for (; it != tok.end(); ++it) - available_ += (it->empty()) ? "0 " : "1 "; - available_ += '\n'; - } - } // constructor - - /** addStudent has to be called after adding slots and faculty */ - void Scheduler::addStudent(const string& studentName, - const string& area1, const string& area2, - const string& area3, const string& advisor) { - assert(nrStudents() area) const { - return area ? students_[s].keys_[*area] : students_[s].key_; + // open file + ifstream is(filename.c_str()); + if (!is) { + cerr << "Scheduler: could not open file " << filename << endl; + throw runtime_error("Scheduler: could not open file " + filename); } - const string& Scheduler::studentName(size_t i) const { - assert(i 100) throw runtime_error("reached 100 lines, exiting"); + Tokenizer tok(line); + Tokenizer::iterator it = tok.begin(); + addSlot(*it++); // add slot + // add availability + for (; it != tok.end(); ++it) available_ += (it->empty()) ? "0 " : "1 "; + available_ += '\n'; + } +} // constructor + +/** addStudent has to be called after adding slots and faculty */ +void Scheduler::addStudent(const string& studentName, const string& area1, + const string& area2, const string& area3, + const string& advisor) { + assert(nrStudents() < maxNrStudents_); + assert(facultyInArea_.count(area1)); + assert(facultyInArea_.count(area2)); + assert(facultyInArea_.count(area3)); + size_t advisorIndex = facultyIndex_[advisor]; + Student student(nrFaculty(), advisorIndex); + student.name_ = studentName; + // We fix the ordering by assigning a higher index to the student + // and numbering the areas lower + Key j = 3 * maxNrStudents_ + nrStudents(); + student.key_ = DiscreteKey(j, nrTimeSlots()); + Key base = 3 * nrStudents(); + student.keys_[0] = DiscreteKey(base + 0, nrFaculty()); + student.keys_[1] = DiscreteKey(base + 1, nrFaculty()); + student.keys_[2] = DiscreteKey(base + 2, nrFaculty()); + student.areaName_[0] = area1; + student.areaName_[1] = area2; + student.areaName_[2] = area3; + students_.push_back(student); +} + +/** get key for student and area, 0 is time slot itself */ +const DiscreteKey& Scheduler::key(size_t s, + boost::optional area) const { + return area ? students_[s].keys_[*area] : students_[s].key_; +} + +const string& Scheduler::studentName(size_t i) const { + assert(i < nrStudents()); + return students_[i].name_; +} + +const DiscreteKey& Scheduler::studentKey(size_t i) const { + assert(i < nrStudents()); + return students_[i].key_; +} + +const string& Scheduler::studentArea(size_t i, size_t area) const { + assert(i < nrStudents()); + return students_[i].areaName_[area]; +} + +/** Add student-specific constraints to the graph */ +void Scheduler::addStudentSpecificConstraints(size_t i, + boost::optional slot) { + bool debug = ISDEBUG("Scheduler::buildGraph"); + + assert(i < nrStudents()); + const Student& s = students_[i]; + + if (!slot && !slotsAvailable_.empty()) { + if (debug) cout << "Adding availability of slots" << endl; + assert(slotsAvailable_.size() == s.key_.second); + CSP::add(s.key_, slotsAvailable_); } - const string& Scheduler::studentArea(size_t i, size_t area) const { - assert(i slot) { - bool debug = ISDEBUG("Scheduler::buildGraph"); + if (debug) cout << "Area constraints " << areaName << endl; + assert(facultyInArea_[areaName].size() == areaKey.second); + CSP::add(areaKey, facultyInArea_[areaName]); - assert(iat(j); - cout << studentName(s) << " slot: " << slotName_[slot] << endl; - Key base = 3*s; - for (size_t area = 0; area < 3; area++) { - size_t faculty = assignment->at(base+area); - cout << setw(12) << studentArea(s,area) << ": " << facultyName_[faculty] - << endl; - } - cout << endl; - } } +} // buildGraph - /** Special print for single-student case */ - void Scheduler::printSpecial(sharedValues assignment) const { - Values::const_iterator it = assignment->begin(); - for (size_t area = 0; area < 3; area++, it++) { - size_t f = it->second; - cout << setw(12) << studentArea(0,area) << ": " << facultyName_[f] << endl; +/** print */ +void Scheduler::print(const string& s, const KeyFormatter& formatter) const { + cout << s << " Faculty:" << endl; + for (const string& name : facultyName_) cout << name << '\n'; + cout << endl; + + cout << s << " Slots:\n"; + size_t i = 0; + for (const string& name : slotName_) cout << i++ << " " << name << endl; + cout << endl; + + cout << "Availability:\n" << available_ << '\n'; + + cout << s << " Area constraints:\n"; + for (const FacultyInArea::value_type& it : facultyInArea_) { + cout << setw(12) << it.first << ": "; + for (double v : it.second) cout << v << " "; + cout << '\n'; + } + cout << endl; + + cout << s << " Students:\n"; + for (const Student& student : students_) student.print(); + cout << endl; + + CSP::print(s + " Factor graph"); + cout << endl; +} // print + +/** Print readable form of assignment */ +void Scheduler::printAssignment(const Values& assignment) const { + // Not intended to be general! Assumes very particular ordering ! + cout << endl; + for (size_t s = 0; s < nrStudents(); s++) { + Key j = 3 * maxNrStudents_ + s; + size_t slot = assignment.at(j); + cout << studentName(s) << " slot: " << slotName_[slot] << endl; + Key base = 3 * s; + for (size_t area = 0; area < 3; area++) { + size_t faculty = assignment.at(base + area); + cout << setw(12) << studentArea(s, area) << ": " << facultyName_[faculty] + << endl; } cout << endl; } +} - /** Accumulate faculty stats */ - void Scheduler::accumulateStats(sharedValues assignment, vector< - size_t>& stats) const { - for (size_t s = 0; s < nrStudents(); s++) { - Key base = 3*s; - for (size_t area = 0; area < 3; area++) { - size_t f = assignment->at(base+area); - assert(fsecond; + cout << setw(12) << studentArea(0, area) << ": " << facultyName_[f] << endl; + } + cout << endl; +} + +/** Accumulate faculty stats */ +void Scheduler::accumulateStats(const Values& assignment, + vector& stats) const { + for (size_t s = 0; s < nrStudents(); s++) { + Key base = 3 * s; + for (size_t area = 0; area < 3; area++) { + size_t f = assignment.at(base + area); + assert(f < stats.size()); + stats[f]++; + } // area + } // s +} + +/** Eliminate, return a Bayes net */ +DiscreteBayesNet::shared_ptr Scheduler::eliminate() const { + gttic(my_eliminate); + // TODO: fix this!! + size_t maxKey = keys().size(); + Ordering defaultKeyOrdering; + for (size_t i = 0; i < maxKey; ++i) defaultKeyOrdering += Key(i); + DiscreteBayesNet::shared_ptr chordal = + this->eliminateSequential(defaultKeyOrdering); + gttoc(my_eliminate); + return chordal; +} + +/** Find the best total assignment - can be expensive */ +Scheduler::Values Scheduler::optimalAssignment() const { + DiscreteBayesNet::shared_ptr chordal = eliminate(); + + if (ISDEBUG("Scheduler::optimalAssignment")) { + DiscreteBayesNet::const_iterator it = chordal->end() - 1; + const Student& student = students_.front(); + cout << endl; + (*it)->print(student.name_); } - /** Eliminate, return a Bayes net */ - DiscreteBayesNet::shared_ptr Scheduler::eliminate() const { - gttic(my_eliminate); - // TODO: fix this!! - size_t maxKey = keys().size(); - Ordering defaultKeyOrdering; - for (size_t i = 0; ieliminateSequential(defaultKeyOrdering); - gttoc(my_eliminate); - return chordal; - } + gttic(my_optimize); + Values mpe = chordal->optimize(); + gttoc(my_optimize); + return mpe; +} - /** Find the best total assignment - can be expensive */ - Scheduler::sharedValues Scheduler::optimalAssignment() const { - DiscreteBayesNet::shared_ptr chordal = eliminate(); - - if (ISDEBUG("Scheduler::optimalAssignment")) { - DiscreteBayesNet::const_iterator it = chordal->end()-1; - const Student & student = students_.front(); - cout << endl; - (*it)->print(student.name_); - } - - gttic(my_optimize); - sharedValues mpe = chordal->optimize(); - gttoc(my_optimize); - return mpe; - } - - /** find the assignment of students to slots with most possible committees */ - Scheduler::sharedValues Scheduler::bestSchedule() const { - sharedValues best; - throw runtime_error("bestSchedule not implemented"); - return best; - } - - /** find the corresponding most desirable committee assignment */ - Scheduler::sharedValues Scheduler::bestAssignment( - sharedValues bestSchedule) const { - sharedValues best; - throw runtime_error("bestAssignment not implemented"); - return best; - } - -} // gtsam +/** find the assignment of students to slots with most possible committees */ +Scheduler::Values Scheduler::bestSchedule() const { + Values best; + throw runtime_error("bestSchedule not implemented"); + return best; +} +/** find the corresponding most desirable committee assignment */ +Scheduler::Values Scheduler::bestAssignment(const Values& bestSchedule) const { + Values best; + throw runtime_error("bestAssignment not implemented"); + return best; +} +} // namespace gtsam diff --git a/gtsam_unstable/discrete/Scheduler.h b/gtsam_unstable/discrete/Scheduler.h index 6faf9956f..08e866efd 100644 --- a/gtsam_unstable/discrete/Scheduler.h +++ b/gtsam_unstable/discrete/Scheduler.h @@ -11,165 +11,150 @@ namespace gtsam { +/** + * Scheduler class + * Creates one variable for each student, and three variables for each + * of the student's areas, for a total of 4*nrStudents variables. + * The "student" variable will determine when the student takes the qual. + * The "area" variables determine which faculty are on his/her committee. + */ +class GTSAM_UNSTABLE_EXPORT Scheduler : public CSP { + private: + /** Internal data structure for students */ + struct Student { + std::string name_; + DiscreteKey key_; // key for student + std::vector keys_; // key for areas + std::vector areaName_; + std::vector advisor_; + Student(size_t nrFaculty, size_t advisorIndex) + : keys_(3), areaName_(3), advisor_(nrFaculty, 1.0) { + advisor_[advisorIndex] = 0.0; + } + void print() const { + using std::cout; + cout << name_ << ": "; + for (size_t area = 0; area < 3; area++) cout << areaName_[area] << " "; + cout << std::endl; + } + }; + + /** Maximum number of students */ + size_t maxNrStudents_; + + /** discrete keys, indexed by student and area index */ + std::vector students_; + + /** faculty identifiers */ + std::map facultyIndex_; + std::vector facultyName_, slotName_, areaName_; + + /** area constraints */ + typedef std::map > FacultyInArea; + FacultyInArea facultyInArea_; + + /** nrTimeSlots * nrFaculty availability constraints */ + std::string available_; + + /** which slots are good */ + std::vector slotsAvailable_; + + public: /** - * Scheduler class - * Creates one variable for each student, and three variables for each - * of the student's areas, for a total of 4*nrStudents variables. - * The "student" variable will determine when the student takes the qual. - * The "area" variables determine which faculty are on his/her committee. + * Constructor + * We need to know the number of students in advance for ordering keys. + * then add faculty, slots, areas, availability, students, in that order */ - class GTSAM_UNSTABLE_EXPORT Scheduler : public CSP { + Scheduler(size_t maxNrStudents) : maxNrStudents_(maxNrStudents) {} - private: + /// Destructor + virtual ~Scheduler() {} - /** Internal data structure for students */ - struct Student { - std::string name_; - DiscreteKey key_; // key for student - std::vector keys_; // key for areas - std::vector areaName_; - std::vector advisor_; - Student(size_t nrFaculty, size_t advisorIndex) : - keys_(3), areaName_(3), advisor_(nrFaculty, 1.0) { - advisor_[advisorIndex] = 0.0; - } - void print() const { - using std::cout; - cout << name_ << ": "; - for (size_t area = 0; area < 3; area++) - cout << areaName_[area] << " "; - cout << std::endl; - } - }; + void addFaculty(const std::string& facultyName) { + facultyIndex_[facultyName] = nrFaculty(); + facultyName_.push_back(facultyName); + } - /** Maximum number of students */ - size_t maxNrStudents_; + size_t nrFaculty() const { return facultyName_.size(); } - /** discrete keys, indexed by student and area index */ - std::vector students_; + /** boolean std::string of nrTimeSlots * nrFaculty */ + void setAvailability(const std::string& available) { available_ = available; } - /** faculty identifiers */ - std::map facultyIndex_; - std::vector facultyName_, slotName_, areaName_; + void addSlot(const std::string& slotName) { slotName_.push_back(slotName); } - /** area constraints */ - typedef std::map > FacultyInArea; - FacultyInArea facultyInArea_; + size_t nrTimeSlots() const { return slotName_.size(); } - /** nrTimeSlots * nrFaculty availability constraints */ - std::string available_; + const std::string& slotName(size_t s) const { return slotName_[s]; } - /** which slots are good */ - std::vector slotsAvailable_; + /** slots available, boolean */ + void setSlotsAvailable(const std::vector& slotsAvailable) { + slotsAvailable_ = slotsAvailable; + } - public: + void addArea(const std::string& facultyName, const std::string& areaName) { + areaName_.push_back(areaName); + std::vector& table = + facultyInArea_[areaName]; // will create if needed + if (table.empty()) table.resize(nrFaculty(), 0); + table[facultyIndex_[facultyName]] = 1; + } - /** - * Constructor - * We need to know the number of students in advance for ordering keys. - * then add faculty, slots, areas, availability, students, in that order - */ - Scheduler(size_t maxNrStudents) : maxNrStudents_(maxNrStudents) {} + /** + * Constructor that reads in faculty, slots, availibility. + * Still need to add areas and students after this + */ + Scheduler(size_t maxNrStudents, const std::string& filename); - /// Destructor - virtual ~Scheduler() {} + /** get key for student and area, 0 is time slot itself */ + const DiscreteKey& key(size_t s, + boost::optional area = boost::none) const; - void addFaculty(const std::string& facultyName) { - facultyIndex_[facultyName] = nrFaculty(); - facultyName_.push_back(facultyName); - } + /** addStudent has to be called after adding slots and faculty */ + void addStudent(const std::string& studentName, const std::string& area1, + const std::string& area2, const std::string& area3, + const std::string& advisor); - size_t nrFaculty() const { - return facultyName_.size(); - } + /// current number of students + size_t nrStudents() const { return students_.size(); } - /** boolean std::string of nrTimeSlots * nrFaculty */ - void setAvailability(const std::string& available) { - available_ = available; - } + const std::string& studentName(size_t i) const; + const DiscreteKey& studentKey(size_t i) const; + const std::string& studentArea(size_t i, size_t area) const; - void addSlot(const std::string& slotName) { - slotName_.push_back(slotName); - } + /** Add student-specific constraints to the graph */ + void addStudentSpecificConstraints( + size_t i, boost::optional slot = boost::none); - size_t nrTimeSlots() const { - return slotName_.size(); - } + /** Main routine that builds factor graph */ + void buildGraph(size_t mutexBound = 7); - const std::string& slotName(size_t s) const { - return slotName_[s]; - } + /** print */ + void print( + const std::string& s = "Scheduler", + const KeyFormatter& formatter = DefaultKeyFormatter) const override; - /** slots available, boolean */ - void setSlotsAvailable(const std::vector& slotsAvailable) { - slotsAvailable_ = slotsAvailable; - } + /** Print readable form of assignment */ + void printAssignment(const Values& assignment) const; - void addArea(const std::string& facultyName, const std::string& areaName) { - areaName_.push_back(areaName); - std::vector& table = facultyInArea_[areaName]; // will create if needed - if (table.empty()) table.resize(nrFaculty(), 0); - table[facultyIndex_[facultyName]] = 1; - } + /** Special print for single-student case */ + void printSpecial(const Values& assignment) const; - /** - * Constructor that reads in faculty, slots, availibility. - * Still need to add areas and students after this - */ - Scheduler(size_t maxNrStudents, const std::string& filename); + /** Accumulate faculty stats */ + void accumulateStats(const Values& assignment, + std::vector& stats) const; - /** get key for student and area, 0 is time slot itself */ - const DiscreteKey& key(size_t s, boost::optional area = boost::none) const; + /** Eliminate, return a Bayes net */ + DiscreteBayesNet::shared_ptr eliminate() const; - /** addStudent has to be called after adding slots and faculty */ - void addStudent(const std::string& studentName, const std::string& area1, - const std::string& area2, const std::string& area3, - const std::string& advisor); + /** Find the best total assignment - can be expensive */ + Values optimalAssignment() const; - /// current number of students - size_t nrStudents() const { - return students_.size(); - } + /** find the assignment of students to slots with most possible committees */ + Values bestSchedule() const; - const std::string& studentName(size_t i) const; - const DiscreteKey& studentKey(size_t i) const; - const std::string& studentArea(size_t i, size_t area) const; - - /** Add student-specific constraints to the graph */ - void addStudentSpecificConstraints(size_t i, boost::optional slot = boost::none); - - /** Main routine that builds factor graph */ - void buildGraph(size_t mutexBound = 7); - - /** print */ - void print( - const std::string& s = "Scheduler", - const KeyFormatter& formatter = DefaultKeyFormatter) const override; - - /** Print readable form of assignment */ - void printAssignment(sharedValues assignment) const; - - /** Special print for single-student case */ - void printSpecial(sharedValues assignment) const; - - /** Accumulate faculty stats */ - void accumulateStats(sharedValues assignment, - std::vector& stats) const; - - /** Eliminate, return a Bayes net */ - DiscreteBayesNet::shared_ptr eliminate() const; - - /** Find the best total assignment - can be expensive */ - sharedValues optimalAssignment() const; - - /** find the assignment of students to slots with most possible committees */ - sharedValues bestSchedule() const; - - /** find the corresponding most desirable committee assignment */ - sharedValues bestAssignment(sharedValues bestSchedule) const; - - }; // Scheduler - -} // gtsam + /** find the corresponding most desirable committee assignment */ + Values bestAssignment(const Values& bestSchedule) const; +}; // Scheduler +} // namespace gtsam diff --git a/gtsam_unstable/discrete/SingleValue.cpp b/gtsam_unstable/discrete/SingleValue.cpp index 6324f14cd..162e21512 100644 --- a/gtsam_unstable/discrete/SingleValue.cpp +++ b/gtsam_unstable/discrete/SingleValue.cpp @@ -5,75 +5,73 @@ * @author Frank Dellaert */ -#include -#include -#include #include +#include +#include +#include + #include namespace gtsam { - using namespace std; - - /* ************************************************************************* */ - void SingleValue::print(const string& s, - const KeyFormatter& formatter) const { - cout << s << "SingleValue on " << "j=" << formatter(keys_[0]) - << " with value " << value_ << endl; - } - - /* ************************************************************************* */ - double SingleValue::operator()(const Values& values) const { - return (double) (values.at(keys_[0]) == value_); - } - - /* ************************************************************************* */ - DecisionTreeFactor SingleValue::toDecisionTreeFactor() const { - DiscreteKeys keys; - keys += DiscreteKey(keys_[0],cardinality_); - vector table; - for (size_t i1 = 0; i1 < cardinality_; i1++) - table.push_back(i1 == value_); - DecisionTreeFactor converted(keys, table); - return converted; - } - - /* ************************************************************************* */ - DecisionTreeFactor SingleValue::operator*(const DecisionTreeFactor& f) const { - // TODO: can we do this more efficiently? - return toDecisionTreeFactor() * f; - } - - /* ************************************************************************* */ - bool SingleValue::ensureArcConsistency(size_t j, - vector& domains) const { - if (j != keys_[0]) throw invalid_argument( - "SingleValue check on wrong domain"); - Domain& D = domains[j]; - if (D.isSingleton()) { - if (D.firstValue() != value_) throw runtime_error("Unsatisfiable"); - return false; - } - D = Domain(discreteKey(),value_); - return true; - } - - /* ************************************************************************* */ - Constraint::shared_ptr SingleValue::partiallyApply(const Values& values) const { - Values::const_iterator it = values.find(keys_[0]); - if (it != values.end() && it->second != value_) throw runtime_error( - "SingleValue::partiallyApply: unsatisfiable"); - return boost::make_shared < SingleValue > (keys_[0], cardinality_, value_); - } - - /* ************************************************************************* */ - Constraint::shared_ptr SingleValue::partiallyApply( - const vector& domains) const { - const Domain& Dk = domains[keys_[0]]; - if (Dk.isSingleton() && !Dk.contains(value_)) throw runtime_error( - "SingleValue::partiallyApply: unsatisfiable"); - return boost::make_shared < SingleValue > (discreteKey(), value_); - } +using namespace std; /* ************************************************************************* */ -} // namespace gtsam +void SingleValue::print(const string& s, const KeyFormatter& formatter) const { + cout << s << "SingleValue on " + << "j=" << formatter(keys_[0]) << " with value " << value_ << endl; +} + +/* ************************************************************************* */ +double SingleValue::operator()(const Values& values) const { + return (double)(values.at(keys_[0]) == value_); +} + +/* ************************************************************************* */ +DecisionTreeFactor SingleValue::toDecisionTreeFactor() const { + DiscreteKeys keys; + keys += DiscreteKey(keys_[0], cardinality_); + vector table; + for (size_t i1 = 0; i1 < cardinality_; i1++) table.push_back(i1 == value_); + DecisionTreeFactor converted(keys, table); + return converted; +} + +/* ************************************************************************* */ +DecisionTreeFactor SingleValue::operator*(const DecisionTreeFactor& f) const { + // TODO: can we do this more efficiently? + return toDecisionTreeFactor() * f; +} + +/* ************************************************************************* */ +bool SingleValue::ensureArcConsistency(Key j, Domains* domains) const { + if (j != keys_[0]) + throw invalid_argument("SingleValue check on wrong domain"); + Domain& D = domains->at(j); + if (D.isSingleton()) { + if (D.firstValue() != value_) throw runtime_error("Unsatisfiable"); + return false; + } + D = Domain(discreteKey(), value_); + return true; +} + +/* ************************************************************************* */ +Constraint::shared_ptr SingleValue::partiallyApply(const Values& values) const { + Values::const_iterator it = values.find(keys_[0]); + if (it != values.end() && it->second != value_) + throw runtime_error("SingleValue::partiallyApply: unsatisfiable"); + return boost::make_shared(keys_[0], cardinality_, value_); +} + +/* ************************************************************************* */ +Constraint::shared_ptr SingleValue::partiallyApply( + const Domains& domains) const { + const Domain& Dk = domains.at(keys_[0]); + if (Dk.isSingleton() && !Dk.contains(value_)) + throw runtime_error("SingleValue::partiallyApply: unsatisfiable"); + return boost::make_shared(discreteKey(), value_); +} + +/* ************************************************************************* */ +} // namespace gtsam diff --git a/gtsam_unstable/discrete/SingleValue.h b/gtsam_unstable/discrete/SingleValue.h index c4d2addec..d826093df 100644 --- a/gtsam_unstable/discrete/SingleValue.h +++ b/gtsam_unstable/discrete/SingleValue.h @@ -7,76 +7,71 @@ #pragma once -#include #include +#include namespace gtsam { - /** - * SingleValue constraint +/** + * SingleValue constraint: ensures a variable takes on a certain value. + * This could of course also be implemented by changing its `Domain`. + */ +class GTSAM_UNSTABLE_EXPORT SingleValue : public Constraint { + size_t cardinality_; /// < Number of values + size_t value_; ///< allowed value + + DiscreteKey discreteKey() const { + return DiscreteKey(keys_[0], cardinality_); + } + + public: + typedef boost::shared_ptr shared_ptr; + + /// Construct from key, cardinality, and given value. + SingleValue(Key key, size_t n, size_t value) + : Constraint(key), cardinality_(n), value_(value) {} + + /// Construct from DiscreteKey and given value. + SingleValue(const DiscreteKey& dkey, size_t value) + : Constraint(dkey.first), cardinality_(dkey.second), value_(value) {} + + // print + void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const override; + + /// equals + bool equals(const DiscreteFactor& other, double tol) const override { + if (!dynamic_cast(&other)) + return false; + else { + const SingleValue& f(static_cast(other)); + return (cardinality_ == f.cardinality_) && (value_ == f.value_); + } + } + + /// Calculate value + double operator()(const Values& values) const override; + + /// Convert into a decisiontree + DecisionTreeFactor toDecisionTreeFactor() const override; + + /// Multiply into a decisiontree + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; + + /* + * Ensure Arc-consistency: just sets domain[j] to {value_}. + * @param j domain to be checked + * @param (in/out) domains all domains, but only domains->at(j) will be checked. + * @return true if domains->at(j) was changed, false otherwise. */ - class GTSAM_UNSTABLE_EXPORT SingleValue: public Constraint { + bool ensureArcConsistency(Key j, Domains* domains) const override; - /// Number of values - size_t cardinality_; + /// Partially apply known values + Constraint::shared_ptr partiallyApply(const Values& values) const override; - /// allowed value - size_t value_; + /// Partially apply known values, domain version + Constraint::shared_ptr partiallyApply( + const Domains& domains) const override; +}; - DiscreteKey discreteKey() const { - return DiscreteKey(keys_[0],cardinality_); - } - - public: - - typedef boost::shared_ptr shared_ptr; - - /// Constructor - SingleValue(Key key, size_t n, size_t value) : - Constraint(key), cardinality_(n), value_(value) { - } - - /// Constructor - SingleValue(const DiscreteKey& dkey, size_t value) : - Constraint(dkey.first), cardinality_(dkey.second), value_(value) { - } - - // print - void print(const std::string& s = "", - const KeyFormatter& formatter = DefaultKeyFormatter) const override; - - /// equals - bool equals(const DiscreteFactor& other, double tol) const override { - if(!dynamic_cast(&other)) - return false; - else { - const SingleValue& f(static_cast(other)); - return (cardinality_==f.cardinality_) && (value_==f.value_); - } - } - - /// Calculate value - double operator()(const Values& values) const override; - - /// Convert into a decisiontree - DecisionTreeFactor toDecisionTreeFactor() const override; - - /// Multiply into a decisiontree - DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; - - /* - * Ensure Arc-consistency - * @param j domain to be checked - * @param domains all other domains - */ - bool ensureArcConsistency(size_t j, std::vector& domains) const override; - - /// Partially apply known values - Constraint::shared_ptr partiallyApply(const Values& values) const override; - - /// Partially apply known values, domain version - Constraint::shared_ptr partiallyApply( - const std::vector& domains) const override; - }; - -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam_unstable/discrete/examples/schedulingExample.cpp b/gtsam_unstable/discrete/examples/schedulingExample.cpp index e9f63b2d8..3f270e45d 100644 --- a/gtsam_unstable/discrete/examples/schedulingExample.cpp +++ b/gtsam_unstable/discrete/examples/schedulingExample.cpp @@ -122,7 +122,7 @@ void runLargeExample() { // SETDEBUG("timing-verbose", true); SETDEBUG("DiscreteConditional::DiscreteConditional", true); gttic(large); - DiscreteFactor::sharedValues MPE = scheduler.optimalAssignment(); + auto MPE = scheduler.optimalAssignment(); gttoc(large); tictoc_finishedIteration(); tictoc_print(); @@ -225,7 +225,7 @@ void sampleSolutions() { // now, sample schedules for (size_t n = 0; n < 500; n++) { vector stats(19, 0); - vector samples; + vector samples; for (size_t i = 0; i < 7; i++) { samples.push_back(samplers[i]->sample()); schedulers[i].accumulateStats(samples[i], stats); @@ -331,7 +331,7 @@ void accomodateStudent() { // sample schedules for (size_t n = 0; n < 10; n++) { - Scheduler::sharedValues sample0 = chordal->sample(); + auto sample0 = chordal->sample(); scheduler.printAssignment(sample0); } } diff --git a/gtsam_unstable/discrete/examples/schedulingQuals12.cpp b/gtsam_unstable/discrete/examples/schedulingQuals12.cpp index 1fc4a1459..5ed5766d5 100644 --- a/gtsam_unstable/discrete/examples/schedulingQuals12.cpp +++ b/gtsam_unstable/discrete/examples/schedulingQuals12.cpp @@ -129,7 +129,7 @@ void runLargeExample() { tictoc_finishedIteration(); tictoc_print(); for (size_t i=0;i<100;i++) { - DiscreteFactor::sharedValues assignment = chordal->sample(); + auto assignment = chordal->sample(); vector stats(scheduler.nrFaculty()); scheduler.accumulateStats(assignment, stats); size_t max = *max_element(stats.begin(), stats.end()); @@ -143,7 +143,7 @@ void runLargeExample() { } #else gttic(large); - DiscreteFactor::sharedValues MPE = scheduler.optimalAssignment(); + auto MPE = scheduler.optimalAssignment(); gttoc(large); tictoc_finishedIteration(); tictoc_print(); @@ -234,7 +234,7 @@ void sampleSolutions() { // now, sample schedules for (size_t n = 0; n < 500; n++) { vector stats(19, 0); - vector samples; + vector samples; for (size_t i = 0; i < NRSTUDENTS; i++) { samples.push_back(samplers[i]->sample()); schedulers[i].accumulateStats(samples[i], stats); diff --git a/gtsam_unstable/discrete/examples/schedulingQuals13.cpp b/gtsam_unstable/discrete/examples/schedulingQuals13.cpp index 95b64f289..2da0eb9b1 100644 --- a/gtsam_unstable/discrete/examples/schedulingQuals13.cpp +++ b/gtsam_unstable/discrete/examples/schedulingQuals13.cpp @@ -153,7 +153,7 @@ void runLargeExample() { tictoc_finishedIteration(); tictoc_print(); for (size_t i=0;i<100;i++) { - DiscreteFactor::sharedValues assignment = sample(*chordal); + auto assignment = sample(*chordal); vector stats(scheduler.nrFaculty()); scheduler.accumulateStats(assignment, stats); size_t max = *max_element(stats.begin(), stats.end()); @@ -167,7 +167,7 @@ void runLargeExample() { } #else gttic(large); - DiscreteFactor::sharedValues MPE = scheduler.optimalAssignment(); + auto MPE = scheduler.optimalAssignment(); gttoc(large); tictoc_finishedIteration(); tictoc_print(); @@ -259,7 +259,7 @@ void sampleSolutions() { // now, sample schedules for (size_t n = 0; n < 10000; n++) { vector stats(nrFaculty, 0); - vector samples; + vector samples; for (size_t i = 0; i < NRSTUDENTS; i++) { samples.push_back(samplers[i]->sample()); schedulers[i].accumulateStats(samples[i], stats); diff --git a/gtsam_unstable/discrete/tests/testCSP.cpp b/gtsam_unstable/discrete/tests/testCSP.cpp index 3dd493b1b..4a3b22561 100644 --- a/gtsam_unstable/discrete/tests/testCSP.cpp +++ b/gtsam_unstable/discrete/tests/testCSP.cpp @@ -7,49 +7,109 @@ #include #include + #include using boost::assign::insert; #include -#include + #include +#include using namespace std; using namespace gtsam; /* ************************************************************************* */ -TEST_UNSAFE( BinaryAllDif, allInOne) -{ - // Create keys and ordering +TEST(CSP, SingleValue) { + // Create keys for Idaho, Arizona, and Utah, allowing two colors for each: + size_t nrColors = 3; + DiscreteKey ID(0, nrColors), AZ(1, nrColors), UT(2, nrColors); + + // Check that a single value is equal to a decision stump with only one "1": + SingleValue singleValue(AZ, 2); + DecisionTreeFactor f1(AZ, "0 0 1"); + EXPECT(assert_equal(f1, singleValue.toDecisionTreeFactor())); + + // Create domains + Domains domains; + domains.emplace(0, Domain(ID)); + domains.emplace(1, Domain(AZ)); + domains.emplace(2, Domain(UT)); + + // Ensure arc-consistency: just wipes out values in AZ domain: + EXPECT(singleValue.ensureArcConsistency(1, &domains)); + LONGS_EQUAL(3, domains.at(0).nrValues()); + LONGS_EQUAL(1, domains.at(1).nrValues()); + LONGS_EQUAL(3, domains.at(2).nrValues()); +} + +/* ************************************************************************* */ +TEST(CSP, BinaryAllDif) { + // Create keys for Idaho, Arizona, and Utah, allowing 2 colors for each: size_t nrColors = 2; -// DiscreteKey ID("Idaho", nrColors), UT("Utah", nrColors), AZ("Arizona", nrColors); - DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors); + DiscreteKey ID(0, nrColors), AZ(1, nrColors), UT(2, nrColors); // Check construction and conversion BinaryAllDiff c1(ID, UT); DecisionTreeFactor f1(ID & UT, "0 1 1 0"); - EXPECT(assert_equal(f1,c1.toDecisionTreeFactor())); + EXPECT(assert_equal(f1, c1.toDecisionTreeFactor())); // Check construction and conversion BinaryAllDiff c2(UT, AZ); DecisionTreeFactor f2(UT & AZ, "0 1 1 0"); - EXPECT(assert_equal(f2,c2.toDecisionTreeFactor())); + EXPECT(assert_equal(f2, c2.toDecisionTreeFactor())); - DecisionTreeFactor f3 = f1*f2; - EXPECT(assert_equal(f3,c1*f2)); - EXPECT(assert_equal(f3,c2*f1)); + // Check multiplication of factors with constraint: + DecisionTreeFactor f3 = f1 * f2; + EXPECT(assert_equal(f3, c1 * f2)); + EXPECT(assert_equal(f3, c2 * f1)); } /* ************************************************************************* */ -TEST_UNSAFE( CSP, allInOne) -{ - // Create keys and ordering +TEST(CSP, AllDiff) { + // Create keys for Idaho, Arizona, and Utah, allowing two colors for each: + size_t nrColors = 3; + DiscreteKey ID(0, nrColors), AZ(1, nrColors), UT(2, nrColors); + + // Check construction and conversion + vector dkeys{ID, UT, AZ}; + AllDiff alldiff(dkeys); + DecisionTreeFactor actual = alldiff.toDecisionTreeFactor(); + // GTSAM_PRINT(actual); + actual.dot("actual"); + DecisionTreeFactor f2( + ID & AZ & UT, + "0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0"); + EXPECT(assert_equal(f2, actual)); + + // Create domains. + Domains domains; + domains.emplace(0, Domain(ID)); + domains.emplace(1, Domain(AZ)); + domains.emplace(2, Domain(UT)); + + // First constrict AZ domain: + SingleValue singleValue(AZ, 2); + EXPECT(singleValue.ensureArcConsistency(1, &domains)); + + // Arc-consistency + EXPECT(alldiff.ensureArcConsistency(0, &domains)); + EXPECT(!alldiff.ensureArcConsistency(1, &domains)); + EXPECT(alldiff.ensureArcConsistency(2, &domains)); + LONGS_EQUAL(2, domains.at(0).nrValues()); + LONGS_EQUAL(1, domains.at(1).nrValues()); + LONGS_EQUAL(2, domains.at(2).nrValues()); +} + +/* ************************************************************************* */ +TEST(CSP, allInOne) { + // Create keys for Idaho, Arizona, and Utah, allowing 3 colors for each: size_t nrColors = 2; - DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors); + DiscreteKey ID(0, nrColors), AZ(1, nrColors), UT(2, nrColors); // Create the CSP CSP csp; - csp.addAllDiff(ID,UT); - csp.addAllDiff(UT,AZ); + csp.addAllDiff(ID, UT); + csp.addAllDiff(UT, AZ); // Check an invalid combination, with ID==UT==AZ all same color DiscreteFactor::Values invalid; @@ -69,68 +129,66 @@ TEST_UNSAFE( CSP, allInOne) DecisionTreeFactor product = csp.product(); // product.dot("product"); DecisionTreeFactor expectedProduct(ID & AZ & UT, "0 1 0 0 0 0 1 0"); - EXPECT(assert_equal(expectedProduct,product)); + EXPECT(assert_equal(expectedProduct, product)); // Solve - CSP::sharedValues mpe = csp.optimalAssignment(); + auto mpe = csp.optimalAssignment(); CSP::Values expected; insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 1); - EXPECT(assert_equal(expected,*mpe)); - EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9); + EXPECT(assert_equal(expected, mpe)); + EXPECT_DOUBLES_EQUAL(1, csp(mpe), 1e-9); } /* ************************************************************************* */ -TEST_UNSAFE( CSP, WesternUS) -{ - // Create keys +TEST(CSP, WesternUS) { + // Create keys for all states in Western US, with 4 color possibilities. size_t nrColors = 4; - DiscreteKey - // Create ordering according to example in ND-CSP.lyx - WA(0, nrColors), OR(3, nrColors), CA(1, nrColors),NV(2, nrColors), - ID(8, nrColors), UT(9, nrColors), AZ(10, nrColors), - MT(4, nrColors), WY(5, nrColors), CO(7, nrColors), NM(6, nrColors); + DiscreteKey WA(0, nrColors), OR(3, nrColors), CA(1, nrColors), + NV(2, nrColors), ID(8, nrColors), UT(9, nrColors), AZ(10, nrColors), + MT(4, nrColors), WY(5, nrColors), CO(7, nrColors), NM(6, nrColors); // Create the CSP CSP csp; - csp.addAllDiff(WA,ID); - csp.addAllDiff(WA,OR); - csp.addAllDiff(OR,ID); - csp.addAllDiff(OR,CA); - csp.addAllDiff(OR,NV); - csp.addAllDiff(CA,NV); - csp.addAllDiff(CA,AZ); - csp.addAllDiff(ID,MT); - csp.addAllDiff(ID,WY); - csp.addAllDiff(ID,UT); - csp.addAllDiff(ID,NV); - csp.addAllDiff(NV,UT); - csp.addAllDiff(NV,AZ); - csp.addAllDiff(UT,WY); - csp.addAllDiff(UT,CO); - csp.addAllDiff(UT,NM); - csp.addAllDiff(UT,AZ); - csp.addAllDiff(AZ,CO); - csp.addAllDiff(AZ,NM); - csp.addAllDiff(MT,WY); - csp.addAllDiff(WY,CO); - csp.addAllDiff(CO,NM); + csp.addAllDiff(WA, ID); + csp.addAllDiff(WA, OR); + csp.addAllDiff(OR, ID); + csp.addAllDiff(OR, CA); + csp.addAllDiff(OR, NV); + csp.addAllDiff(CA, NV); + csp.addAllDiff(CA, AZ); + csp.addAllDiff(ID, MT); + csp.addAllDiff(ID, WY); + csp.addAllDiff(ID, UT); + csp.addAllDiff(ID, NV); + csp.addAllDiff(NV, UT); + csp.addAllDiff(NV, AZ); + csp.addAllDiff(UT, WY); + csp.addAllDiff(UT, CO); + csp.addAllDiff(UT, NM); + csp.addAllDiff(UT, AZ); + csp.addAllDiff(AZ, CO); + csp.addAllDiff(AZ, NM); + csp.addAllDiff(MT, WY); + csp.addAllDiff(WY, CO); + csp.addAllDiff(CO, NM); - // Solve + // Create ordering according to example in ND-CSP.lyx Ordering ordering; - ordering += Key(0),Key(1),Key(2),Key(3),Key(4),Key(5),Key(6),Key(7),Key(8),Key(9),Key(10); - CSP::sharedValues mpe = csp.optimalAssignment(ordering); - // GTSAM_PRINT(*mpe); + ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7), + Key(8), Key(9), Key(10); + // Solve using that ordering: + auto mpe = csp.optimalAssignment(ordering); + // GTSAM_PRINT(mpe); CSP::Values expected; - insert(expected) - (WA.first,1)(CA.first,1)(NV.first,3)(OR.first,0) - (MT.first,1)(WY.first,0)(NM.first,3)(CO.first,2) - (ID.first,2)(UT.first,1)(AZ.first,0); + insert(expected)(WA.first, 1)(CA.first, 1)(NV.first, 3)(OR.first, 0)( + MT.first, 1)(WY.first, 0)(NM.first, 3)(CO.first, 2)(ID.first, 2)( + UT.first, 1)(AZ.first, 0); // TODO: Fix me! mpe result seems to be right. (See the printing) // It has the same prob as the expected solution. // Is mpe another solution, or the expected solution is unique??? - EXPECT(assert_equal(expected,*mpe)); - EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9); + EXPECT(assert_equal(expected, mpe)); + EXPECT_DOUBLES_EQUAL(1, csp(mpe), 1e-9); // Write out the dual graph for hmetis #ifdef DUAL @@ -142,33 +200,17 @@ TEST_UNSAFE( CSP, WesternUS) } /* ************************************************************************* */ -TEST_UNSAFE( CSP, AllDiff) -{ - // Create keys and ordering +TEST(CSP, ArcConsistency) { + // Create keys for Idaho, Arizona, and Utah, allowing three colors for each: size_t nrColors = 3; - DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors); + DiscreteKey ID(0, nrColors), AZ(1, nrColors), UT(2, nrColors); - // Create the CSP + // Create the CSP using just one all-diff constraint, plus constrain Arizona. CSP csp; - vector dkeys; - dkeys += ID,UT,AZ; + vector dkeys{ID, UT, AZ}; csp.addAllDiff(dkeys); - csp.addSingleValue(AZ,2); -// GTSAM_PRINT(csp); - - // Check construction and conversion - SingleValue s(AZ,2); - DecisionTreeFactor f1(AZ,"0 0 1"); - EXPECT(assert_equal(f1,s.toDecisionTreeFactor())); - - // Check construction and conversion - AllDiff alldiff(dkeys); - DecisionTreeFactor actual = alldiff.toDecisionTreeFactor(); -// GTSAM_PRINT(actual); -// actual.dot("actual"); - DecisionTreeFactor f2(ID & AZ & UT, - "0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0"); - EXPECT(assert_equal(f2,actual)); + csp.addSingleValue(AZ, 2); + // GTSAM_PRINT(csp); // Check an invalid combination, with ID==UT==AZ all same color DiscreteFactor::Values invalid; @@ -185,42 +227,47 @@ TEST_UNSAFE( CSP, AllDiff) EXPECT_DOUBLES_EQUAL(1, csp(valid), 1e-9); // Solve - CSP::sharedValues mpe = csp.optimalAssignment(); + auto mpe = csp.optimalAssignment(); CSP::Values expected; insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 2); - EXPECT(assert_equal(expected,*mpe)); - EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9); + EXPECT(assert_equal(expected, mpe)); + EXPECT_DOUBLES_EQUAL(1, csp(mpe), 1e-9); - // Arc-consistency - vector domains; - domains += Domain(ID), Domain(AZ), Domain(UT); - SingleValue singleValue(AZ,2); - EXPECT(singleValue.ensureArcConsistency(1,domains)); - EXPECT(alldiff.ensureArcConsistency(0,domains)); - EXPECT(!alldiff.ensureArcConsistency(1,domains)); - EXPECT(alldiff.ensureArcConsistency(2,domains)); - LONGS_EQUAL(2,domains[0].nrValues()); - LONGS_EQUAL(1,domains[1].nrValues()); - LONGS_EQUAL(2,domains[2].nrValues()); + // ensure arc-consistency, i.e., narrow domains... + Domains domains; + domains.emplace(0, Domain(ID)); + domains.emplace(1, Domain(AZ)); + domains.emplace(2, Domain(UT)); + + SingleValue singleValue(AZ, 2); + AllDiff alldiff(dkeys); + EXPECT(singleValue.ensureArcConsistency(1, &domains)); + EXPECT(alldiff.ensureArcConsistency(0, &domains)); + EXPECT(!alldiff.ensureArcConsistency(1, &domains)); + EXPECT(alldiff.ensureArcConsistency(2, &domains)); + LONGS_EQUAL(2, domains.at(0).nrValues()); + LONGS_EQUAL(1, domains.at(1).nrValues()); + LONGS_EQUAL(2, domains.at(2).nrValues()); // Parial application, version 1 DiscreteFactor::Values known; known[AZ.first] = 2; DiscreteFactor::shared_ptr reduced1 = alldiff.partiallyApply(known); DecisionTreeFactor f3(ID & UT, "0 1 1 1 0 1 1 1 0"); - EXPECT(assert_equal(f3,reduced1->toDecisionTreeFactor())); + EXPECT(assert_equal(f3, reduced1->toDecisionTreeFactor())); DiscreteFactor::shared_ptr reduced2 = singleValue.partiallyApply(known); DecisionTreeFactor f4(AZ, "0 0 1"); - EXPECT(assert_equal(f4,reduced2->toDecisionTreeFactor())); + EXPECT(assert_equal(f4, reduced2->toDecisionTreeFactor())); // Parial application, version 2 DiscreteFactor::shared_ptr reduced3 = alldiff.partiallyApply(domains); - EXPECT(assert_equal(f3,reduced3->toDecisionTreeFactor())); + EXPECT(assert_equal(f3, reduced3->toDecisionTreeFactor())); DiscreteFactor::shared_ptr reduced4 = singleValue.partiallyApply(domains); - EXPECT(assert_equal(f4,reduced4->toDecisionTreeFactor())); + EXPECT(assert_equal(f4, reduced4->toDecisionTreeFactor())); // full arc-consistency test csp.runArcConsistency(nrColors); + // GTSAM_PRINT(csp); } /* ************************************************************************* */ @@ -229,4 +276,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - diff --git a/gtsam_unstable/discrete/tests/testLoopyBelief.cpp b/gtsam_unstable/discrete/tests/testLoopyBelief.cpp index 9929938d5..c48d7639d 100644 --- a/gtsam_unstable/discrete/tests/testLoopyBelief.cpp +++ b/gtsam_unstable/discrete/tests/testLoopyBelief.cpp @@ -5,14 +5,15 @@ * @date Oct 11, 2013 */ -#include +#include #include #include -#include -#include +#include + #include -#include +#include #include +#include using namespace std; using namespace boost; @@ -23,11 +24,12 @@ using namespace gtsam; * Loopy belief solver for graphs with only binary and unary factors */ class LoopyBelief { - /** Star graph struct for each node, containing * - the star graph itself - * - the product of original unary factors so we don't have to recompute it later, and - * - the factor indices of the corrected belief factors of the neighboring nodes + * - the product of original unary factors so we don't have to recompute it + * later, and + * - the factor indices of the corrected belief factors of the neighboring + * nodes */ typedef std::map CorrectedBeliefIndices; struct StarGraph { @@ -36,41 +38,41 @@ class LoopyBelief { DecisionTreeFactor::shared_ptr unary; VariableIndex varIndex_; StarGraph(const DiscreteFactorGraph::shared_ptr& _star, - const CorrectedBeliefIndices& _beliefIndices, - const DecisionTreeFactor::shared_ptr& _unary) : - star(_star), correctedBeliefIndices(_beliefIndices), unary(_unary), varIndex_( - *_star) { - } + const CorrectedBeliefIndices& _beliefIndices, + const DecisionTreeFactor::shared_ptr& _unary) + : star(_star), + correctedBeliefIndices(_beliefIndices), + unary(_unary), + varIndex_(*_star) {} void print(const std::string& s = "") const { cout << s << ":" << endl; star->print("Star graph: "); - for(Key key: correctedBeliefIndices | boost::adaptors::map_keys) { + for (Key key : correctedBeliefIndices | boost::adaptors::map_keys) { cout << "Belief factor index for " << key << ": " - << correctedBeliefIndices.at(key) << endl; + << correctedBeliefIndices.at(key) << endl; } - if (unary) - unary->print("Unary: "); + if (unary) unary->print("Unary: "); } }; typedef std::map StarGraphs; - StarGraphs starGraphs_; ///< star graph at each variable + StarGraphs starGraphs_; ///< star graph at each variable -public: + public: /** Constructor - * Need all discrete keys to access node's cardinality for creating belief factors + * Need all discrete keys to access node's cardinality for creating belief + * factors * TODO: so troublesome!! */ LoopyBelief(const DiscreteFactorGraph& graph, - const std::map& allDiscreteKeys) : - starGraphs_(buildStarGraphs(graph, allDiscreteKeys)) { - } + const std::map& allDiscreteKeys) + : starGraphs_(buildStarGraphs(graph, allDiscreteKeys)) {} /// print void print(const std::string& s = "") const { cout << s << ":" << endl; - for(Key key: starGraphs_ | boost::adaptors::map_keys) { + for (Key key : starGraphs_ | boost::adaptors::map_keys) { starGraphs_.at(key).print((boost::format("Node %d:") % key).str()); } } @@ -79,12 +81,13 @@ public: DiscreteFactorGraph::shared_ptr iterate( const std::map& allDiscreteKeys) { static const bool debug = false; - static DiscreteConditional::shared_ptr dummyCond; // unused by-product of elimination + static DiscreteConditional::shared_ptr + dummyCond; // unused by-product of elimination DiscreteFactorGraph::shared_ptr beliefs(new DiscreteFactorGraph()); std::map > allMessages; // Eliminate each star graph - for(Key key: starGraphs_ | boost::adaptors::map_keys) { -// cout << "***** Node " << key << "*****" << endl; + for (Key key : starGraphs_ | boost::adaptors::map_keys) { + // cout << "***** Node " << key << "*****" << endl; // initialize belief to the unary factor from the original graph DecisionTreeFactor::shared_ptr beliefAtKey; @@ -92,15 +95,16 @@ public: std::map messages; // eliminate each neighbor in this star graph one by one - for(Key neighbor: starGraphs_.at(key).correctedBeliefIndices | boost::adaptors::map_keys) { + for (Key neighbor : starGraphs_.at(key).correctedBeliefIndices | + boost::adaptors::map_keys) { DiscreteFactorGraph subGraph; - for(size_t factor: starGraphs_.at(key).varIndex_[neighbor]) { + for (size_t factor : starGraphs_.at(key).varIndex_[neighbor]) { subGraph.push_back(starGraphs_.at(key).star->at(factor)); } if (debug) subGraph.print("------- Subgraph:"); DiscreteFactor::shared_ptr message; - boost::tie(dummyCond, message) = EliminateDiscrete(subGraph, - Ordering(list_of(neighbor))); + boost::tie(dummyCond, message) = + EliminateDiscrete(subGraph, Ordering(list_of(neighbor))); // store the new factor into messages messages.insert(make_pair(neighbor, message)); if (debug) message->print("------- Message: "); @@ -108,14 +112,12 @@ public: // Belief is the product of all messages and the unary factor // Incorporate new the factor to belief if (!beliefAtKey) - beliefAtKey = boost::dynamic_pointer_cast( - message); - else beliefAtKey = - boost::make_shared( - (*beliefAtKey) - * (*boost::dynamic_pointer_cast( - message))); + boost::dynamic_pointer_cast(message); + else + beliefAtKey = boost::make_shared( + (*beliefAtKey) * + (*boost::dynamic_pointer_cast(message))); } if (starGraphs_.at(key).unary) beliefAtKey = boost::make_shared( @@ -133,7 +135,8 @@ public: sumFactorTable = (boost::format("%s %f") % sumFactorTable % sum).str(); DecisionTreeFactor sumFactor(allDiscreteKeys.at(key), sumFactorTable); if (debug) sumFactor.print("denomFactor: "); - beliefAtKey = boost::make_shared((*beliefAtKey) / sumFactor); + beliefAtKey = + boost::make_shared((*beliefAtKey) / sumFactor); if (debug) beliefAtKey->print("New belief at key normalized: "); beliefs->push_back(beliefAtKey); allMessages[key] = messages; @@ -141,17 +144,20 @@ public: // Update corrected beliefs VariableIndex beliefFactors(*beliefs); - for(Key key: starGraphs_ | boost::adaptors::map_keys) { + for (Key key : starGraphs_ | boost::adaptors::map_keys) { std::map messages = allMessages[key]; - for(Key neighbor: starGraphs_.at(key).correctedBeliefIndices | boost::adaptors::map_keys) { - DecisionTreeFactor correctedBelief = (*boost::dynamic_pointer_cast< - DecisionTreeFactor>(beliefs->at(beliefFactors[key].front()))) - / (*boost::dynamic_pointer_cast( + for (Key neighbor : starGraphs_.at(key).correctedBeliefIndices | + boost::adaptors::map_keys) { + DecisionTreeFactor correctedBelief = + (*boost::dynamic_pointer_cast( + beliefs->at(beliefFactors[key].front()))) / + (*boost::dynamic_pointer_cast( messages.at(neighbor))); if (debug) correctedBelief.print("correctedBelief: "); - size_t beliefIndex = starGraphs_.at(neighbor).correctedBeliefIndices.at( - key); - starGraphs_.at(neighbor).star->replace(beliefIndex, + size_t beliefIndex = + starGraphs_.at(neighbor).correctedBeliefIndices.at(key); + starGraphs_.at(neighbor).star->replace( + beliefIndex, boost::make_shared(correctedBelief)); } } @@ -161,21 +167,22 @@ public: return beliefs; } -private: + private: /** * Build star graphs for each node. */ - StarGraphs buildStarGraphs(const DiscreteFactorGraph& graph, + StarGraphs buildStarGraphs( + const DiscreteFactorGraph& graph, const std::map& allDiscreteKeys) const { StarGraphs starGraphs; - VariableIndex varIndex(graph); ///< access to all factors of each node - for(Key key: varIndex | boost::adaptors::map_keys) { + VariableIndex varIndex(graph); ///< access to all factors of each node + for (Key key : varIndex | boost::adaptors::map_keys) { // initialize to multiply with other unary factors later DecisionTreeFactor::shared_ptr prodOfUnaries; // collect all factors involving this key in the original graph DiscreteFactorGraph::shared_ptr star(new DiscreteFactorGraph()); - for(size_t factorIndex: varIndex[key]) { + for (size_t factorIndex : varIndex[key]) { star->push_back(graph.at(factorIndex)); // accumulate unary factors @@ -185,9 +192,9 @@ private: graph.at(factorIndex)); else prodOfUnaries = boost::make_shared( - *prodOfUnaries - * (*boost::dynamic_pointer_cast( - graph.at(factorIndex)))); + *prodOfUnaries * + (*boost::dynamic_pointer_cast( + graph.at(factorIndex)))); } } @@ -196,7 +203,7 @@ private: KeySet neighbors = star->keys(); neighbors.erase(key); CorrectedBeliefIndices correctedBeliefIndices; - for(Key neighbor: neighbors) { + for (Key neighbor : neighbors) { // TODO: default table for keys with more than 2 values? string initialBelief; for (size_t v = 0; v < allDiscreteKeys.at(neighbor).second - 1; ++v) { @@ -207,9 +214,8 @@ private: DecisionTreeFactor(allDiscreteKeys.at(neighbor), initialBelief)); correctedBeliefIndices.insert(make_pair(neighbor, star->size() - 1)); } - starGraphs.insert( - make_pair(key, - StarGraph(star, correctedBeliefIndices, prodOfUnaries))); + starGraphs.insert(make_pair( + key, StarGraph(star, correctedBeliefIndices, prodOfUnaries))); } return starGraphs; } @@ -249,7 +255,6 @@ TEST_UNSAFE(LoopyBelief, construction) { DiscreteFactorGraph::shared_ptr beliefs = solver.iterate(allKeys); beliefs->print(); } - } /* ************************************************************************* */ diff --git a/gtsam_unstable/discrete/tests/testScheduler.cpp b/gtsam_unstable/discrete/tests/testScheduler.cpp index 3f6c6a1e0..7822cbd38 100644 --- a/gtsam_unstable/discrete/tests/testScheduler.cpp +++ b/gtsam_unstable/discrete/tests/testScheduler.cpp @@ -5,14 +5,13 @@ */ //#define ENABLE_TIMING -#include +#include #include #include +#include -#include - -#include #include +#include #include using namespace boost::assign; @@ -22,7 +21,6 @@ using namespace gtsam; /* ************************************************************************* */ // Create the expected graph of constraints DiscreteFactorGraph createExpected() { - // Start building size_t nrFaculty = 4, nrTimeSlots = 3; @@ -47,27 +45,27 @@ DiscreteFactorGraph createExpected() { string available = "1 1 1 0 1 1 1 1 0 1 1 1"; // Akansel - expected.add(A1, faculty_in_A); // Area 1 - expected.add(A1, "1 1 1 0"); // Advisor + expected.add(A1, faculty_in_A); // Area 1 + expected.add(A1, "1 1 1 0"); // Advisor expected.add(A & A1, available); - expected.add(A2, faculty_in_M); // Area 2 - expected.add(A2, "1 1 1 0"); // Advisor + expected.add(A2, faculty_in_M); // Area 2 + expected.add(A2, "1 1 1 0"); // Advisor expected.add(A & A2, available); - expected.add(A3, faculty_in_P); // Area 3 - expected.add(A3, "1 1 1 0"); // Advisor + expected.add(A3, faculty_in_P); // Area 3 + expected.add(A3, "1 1 1 0"); // Advisor expected.add(A & A3, available); // Mutual exclusion for faculty expected.addAllDiff(A1 & A2 & A3); // Jake - expected.add(J1, faculty_in_H); // Area 1 - expected.add(J1, "1 0 1 1"); // Advisor + expected.add(J1, faculty_in_H); // Area 1 + expected.add(J1, "1 0 1 1"); // Advisor expected.add(J & J1, available); - expected.add(J2, faculty_in_C); // Area 2 - expected.add(J2, "1 0 1 1"); // Advisor + expected.add(J2, faculty_in_C); // Area 2 + expected.add(J2, "1 0 1 1"); // Advisor expected.add(J & J2, available); - expected.add(J3, faculty_in_A); // Area 3 - expected.add(J3, "1 0 1 1"); // Advisor + expected.add(J3, faculty_in_A); // Area 3 + expected.add(J3, "1 0 1 1"); // Advisor expected.add(J & J3, available); // Mutual exclusion for faculty expected.addAllDiff(J1 & J2 & J3); @@ -79,8 +77,7 @@ DiscreteFactorGraph createExpected() { } /* ************************************************************************* */ -TEST( schedulingExample, test) -{ +TEST(schedulingExample, test) { Scheduler s(2); // add faculty @@ -121,33 +118,32 @@ TEST( schedulingExample, test) // Do brute force product and output that to file DecisionTreeFactor product = s.product(); - //product.dot("scheduling", false); + // product.dot("scheduling", false); // Do exact inference gttic(small); - DiscreteFactor::sharedValues MPE = s.optimalAssignment(); + auto MPE = s.optimalAssignment(); gttoc(small); // print MPE, commented out as unit tests don't print -// s.printAssignment(MPE); + // s.printAssignment(MPE); // Commented out as does not work yet // s.runArcConsistency(8,10,true); // find the assignment of students to slots with most possible committees // Commented out as not implemented yet -// sharedValues bestSchedule = s.bestSchedule(); -// GTSAM_PRINT(*bestSchedule); + // auto bestSchedule = s.bestSchedule(); + // GTSAM_PRINT(bestSchedule); // find the corresponding most desirable committee assignment // Commented out as not implemented yet -// sharedValues bestAssignment = s.bestAssignment(bestSchedule); -// GTSAM_PRINT(*bestAssignment); + // auto bestAssignment = s.bestAssignment(bestSchedule); + // GTSAM_PRINT(bestAssignment); } /* ************************************************************************* */ -TEST( schedulingExample, smallFromFile) -{ +TEST(schedulingExample, smallFromFile) { string path(TOPSRCDIR "/gtsam_unstable/discrete/examples/"); Scheduler s(2, path + "small.csv"); @@ -179,4 +175,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - diff --git a/gtsam_unstable/discrete/tests/testSudoku.cpp b/gtsam_unstable/discrete/tests/testSudoku.cpp index e2115e8bc..808f98b1c 100644 --- a/gtsam_unstable/discrete/tests/testSudoku.cpp +++ b/gtsam_unstable/discrete/tests/testSudoku.cpp @@ -5,74 +5,69 @@ * @author Frank Dellaert */ -#include #include +#include +#include + #include using boost::assign::insert; +#include + #include #include -#include using namespace std; using namespace gtsam; #define PRINT false -class Sudoku: public CSP { +/// A class that encodes Sudoku's as a CSP problem +class Sudoku : public CSP { + size_t n_; ///< Side of Sudoku, e.g. 4 or 9 - /// sudoku size - size_t n_; - - /// discrete keys - typedef std::pair IJ; + /// Mapping from base i,j coordinates to discrete keys: + using IJ = std::pair; std::map dkeys_; -public: - + public: /// return DiscreteKey for cell(i,j) const DiscreteKey& dkey(size_t i, size_t j) const { return dkeys_.at(IJ(i, j)); } /// return Key for cell(i,j) - Key key(size_t i, size_t j) const { - return dkey(i, j).first; - } + Key key(size_t i, size_t j) const { return dkey(i, j).first; } /// Constructor - Sudoku(size_t n, ...) : - n_(n) { + Sudoku(size_t n, ...) : n_(n) { // Create variables, ordering, and unary constraints va_list ap; va_start(ap, n); - Key k=0; for (size_t i = 0; i < n; ++i) { - for (size_t j = 0; j < n; ++j, ++k) { + for (size_t j = 0; j < n; ++j) { // create the key IJ ij(i, j); - dkeys_[ij] = DiscreteKey(k, n); + Symbol key('1' + i, j + 1); + dkeys_[ij] = DiscreteKey(key, n); // get the unary constraint, if any int value = va_arg(ap, int); - // cout << value << " "; if (value != 0) addSingleValue(dkeys_[ij], value - 1); } - //cout << endl; + // cout << endl; } va_end(ap); // add row constraints for (size_t i = 0; i < n; i++) { DiscreteKeys dkeys; - for (size_t j = 0; j < n; j++) - dkeys += dkey(i, j); + for (size_t j = 0; j < n; j++) dkeys += dkey(i, j); addAllDiff(dkeys); } // add col constraints for (size_t j = 0; j < n; j++) { DiscreteKeys dkeys; - for (size_t i = 0; i < n; i++) - dkeys += dkey(i, j); + for (size_t i = 0; i < n; i++) dkeys += dkey(i, j); addAllDiff(dkeys); } @@ -84,8 +79,7 @@ public: // Box I,J DiscreteKeys dkeys; for (size_t i = i0; i < i0 + N; i++) - for (size_t j = j0; j < j0 + N; j++) - dkeys += dkey(i, j); + for (size_t j = j0; j < j0 + N; j++) dkeys += dkey(i, j); addAllDiff(dkeys); j0 += N; } @@ -94,120 +88,171 @@ public: } /// Print readable form of assignment - void printAssignment(DiscreteFactor::sharedValues assignment) const { + void printAssignment(const DiscreteFactor::Values& assignment) const { for (size_t i = 0; i < n_; i++) { for (size_t j = 0; j < n_; j++) { Key k = key(i, j); - cout << 1 + assignment->at(k) << " "; + cout << 1 + assignment.at(k) << " "; } cout << endl; } } /// solve and print solution - void printSolution() { - DiscreteFactor::sharedValues MPE = optimalAssignment(); + void printSolution() const { + auto MPE = optimalAssignment(); printAssignment(MPE); } + // Print domain + void printDomains(const Domains& domains) { + for (size_t i = 0; i < n_; i++) { + for (size_t j = 0; j < n_; j++) { + Key k = key(i, j); + cout << domains.at(k).base1Str(); + cout << "\t"; + } // i + cout << endl; + } // j + } }; /* ************************************************************************* */ -TEST_UNSAFE( Sudoku, small) -{ - Sudoku csp(4, - 1,0, 0,4, - 0,0, 0,0, - - 4,0, 2,0, - 0,1, 0,0); - - // Do BP - csp.runArcConsistency(4,10,PRINT); +TEST(Sudoku, small) { + Sudoku csp(4, // + 1, 0, 0, 4, // + 0, 0, 0, 0, // + 4, 0, 2, 0, // + 0, 1, 0, 0); // optimize and check - CSP::sharedValues solution = csp.optimalAssignment(); + auto solution = csp.optimalAssignment(); CSP::Values expected; - insert(expected) - (csp.key(0,0), 0)(csp.key(0,1), 1)(csp.key(0,2), 2)(csp.key(0,3), 3) - (csp.key(1,0), 2)(csp.key(1,1), 3)(csp.key(1,2), 0)(csp.key(1,3), 1) - (csp.key(2,0), 3)(csp.key(2,1), 2)(csp.key(2,2), 1)(csp.key(2,3), 0) - (csp.key(3,0), 1)(csp.key(3,1), 0)(csp.key(3,2), 3)(csp.key(3,3), 2); - EXPECT(assert_equal(expected,*solution)); - //csp.printAssignment(solution); + insert(expected)(csp.key(0, 0), 0)(csp.key(0, 1), 1)(csp.key(0, 2), 2)( + csp.key(0, 3), 3)(csp.key(1, 0), 2)(csp.key(1, 1), 3)(csp.key(1, 2), 0)( + csp.key(1, 3), 1)(csp.key(2, 0), 3)(csp.key(2, 1), 2)(csp.key(2, 2), 1)( + csp.key(2, 3), 0)(csp.key(3, 0), 1)(csp.key(3, 1), 0)(csp.key(3, 2), 3)( + csp.key(3, 3), 2); + EXPECT(assert_equal(expected, solution)); + // csp.printAssignment(solution); + + // Do BP (AC1) + auto domains = csp.runArcConsistency(4, 3); + // csp.printDomains(domains); + Domain domain44 = domains.at(Symbol('4', 4)); + EXPECT_LONGS_EQUAL(1, domain44.nrValues()); + + // Test Creation of a new, simpler CSP + CSP new_csp = csp.partiallyApply(domains); + // Should only be 16 new Domains + EXPECT_LONGS_EQUAL(16, new_csp.size()); + + // Check that solution + auto new_solution = new_csp.optimalAssignment(); + // csp.printAssignment(new_solution); + EXPECT(assert_equal(expected, new_solution)); } /* ************************************************************************* */ -TEST_UNSAFE( Sudoku, easy) -{ - Sudoku sudoku(9, - 0,0,5, 0,9,0, 0,0,1, - 0,0,0, 0,0,2, 0,7,3, - 7,6,0, 0,0,8, 2,0,0, +TEST(Sudoku, easy) { + Sudoku csp(9, // + 0, 0, 5, 0, 9, 0, 0, 0, 1, // + 0, 0, 0, 0, 0, 2, 0, 7, 3, // + 7, 6, 0, 0, 0, 8, 2, 0, 0, // - 0,1,2, 0,0,9, 0,0,4, - 0,0,0, 2,0,3, 0,0,0, - 3,0,0, 1,0,0, 9,6,0, + 0, 1, 2, 0, 0, 9, 0, 0, 4, // + 0, 0, 0, 2, 0, 3, 0, 0, 0, // + 3, 0, 0, 1, 0, 0, 9, 6, 0, // - 0,0,1, 9,0,0, 0,5,8, - 9,7,0, 5,0,0, 0,0,0, - 5,0,0, 0,3,0, 7,0,0); + 0, 0, 1, 9, 0, 0, 0, 5, 8, // + 9, 7, 0, 5, 0, 0, 0, 0, 0, // + 5, 0, 0, 0, 3, 0, 7, 0, 0); - // Do BP - sudoku.runArcConsistency(4,10,PRINT); + // csp.printSolution(); // don't do it - // sudoku.printSolution(); // don't do it + // Do BP (AC1) + auto domains = csp.runArcConsistency(9, 10); + // csp.printDomains(domains); + Key key99 = Symbol('9', 9); + Domain domain99 = domains.at(key99); + EXPECT_LONGS_EQUAL(1, domain99.nrValues()); + + // Test Creation of a new, simpler CSP + CSP new_csp = csp.partiallyApply(domains); + // 81 new Domains, and still 26 all-diff constraints + EXPECT_LONGS_EQUAL(81 + 26, new_csp.size()); + + // csp.printSolution(); // still don't do it ! :-( } /* ************************************************************************* */ -TEST_UNSAFE( Sudoku, extreme) -{ - Sudoku sudoku(9, - 0,0,9, 7,4,8, 0,0,0, - 7,0,0, 0,0,0, 0,0,0, - 0,2,0, 1,0,9, 0,0,0, - - 0,0,7, 0,0,0, 2,4,0, - 0,6,4, 0,1,0, 5,9,0, - 0,9,8, 0,0,0, 3,0,0, - - 0,0,0, 8,0,3, 0,2,0, - 0,0,0, 0,0,0, 0,0,6, - 0,0,0, 2,7,5, 9,0,0); +TEST(Sudoku, extreme) { + Sudoku csp(9, // + 0, 0, 9, 7, 4, 8, 0, 0, 0, 7, // + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, // + 0, 1, 0, 9, 0, 0, 0, 0, 0, 7, // + 0, 0, 0, 2, 4, 0, 0, 6, 4, 0, // + 1, 0, 5, 9, 0, 0, 9, 8, 0, 0, // + 0, 3, 0, 0, 0, 0, 0, 8, 0, 3, // + 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, // + 0, 6, 0, 0, 0, 2, 7, 5, 9, 0, 0); // Do BP - sudoku.runArcConsistency(9,10,PRINT); + csp.runArcConsistency(9, 10); #ifdef METIS - VariableIndexOrdered index(sudoku); + VariableIndexOrdered index(csp); index.print("index"); ofstream os("/Users/dellaert/src/hmetis-1.5-osx-i686/extreme-dual.txt"); index.outputMetisFormat(os); #endif - //sudoku.printSolution(); // don't do it + // Do BP (AC1) + auto domains = csp.runArcConsistency(9, 10); + // csp.printDomains(domains); + Key key99 = Symbol('9', 9); + Domain domain99 = domains.at(key99); + EXPECT_LONGS_EQUAL(2, domain99.nrValues()); + + // Test Creation of a new, simpler CSP + CSP new_csp = csp.partiallyApply(domains); + // 81 new Domains, and still 20 all-diff constraints + EXPECT_LONGS_EQUAL(81 + 20, new_csp.size()); + + // csp.printSolution(); // still don't do it ! :-( } /* ************************************************************************* */ -TEST_UNSAFE( Sudoku, AJC_3star_Feb8_2012) -{ - Sudoku sudoku(9, - 9,5,0, 0,0,6, 0,0,0, - 0,8,4, 0,7,0, 0,0,0, - 6,2,0, 5,0,0, 4,0,0, +TEST(Sudoku, AJC_3star_Feb8_2012) { + Sudoku csp(9, // + 9, 5, 0, 0, 0, 6, 0, 0, 0, // + 0, 8, 4, 0, 7, 0, 0, 0, 0, // + 6, 2, 0, 5, 0, 0, 4, 0, 0, // - 0,0,0, 2,9,0, 6,0,0, - 0,9,0, 0,0,0, 0,2,0, - 0,0,2, 0,6,3, 0,0,0, + 0, 0, 0, 2, 9, 0, 6, 0, 0, // + 0, 9, 0, 0, 0, 0, 0, 2, 0, // + 0, 0, 2, 0, 6, 3, 0, 0, 0, // - 0,0,9, 0,0,7, 0,6,8, - 0,0,0, 0,3,0, 2,9,0, - 0,0,0, 1,0,0, 0,3,7); + 0, 0, 9, 0, 0, 7, 0, 6, 8, // + 0, 0, 0, 0, 3, 0, 2, 9, 0, // + 0, 0, 0, 1, 0, 0, 0, 3, 7); - // Do BP - sudoku.runArcConsistency(9,10,PRINT); + // Do BP (AC1) + auto domains = csp.runArcConsistency(9, 10); + // csp.printDomains(domains); + Key key99 = Symbol('9', 9); + Domain domain99 = domains.at(key99); + EXPECT_LONGS_EQUAL(1, domain99.nrValues()); - //sudoku.printSolution(); // don't do it + // Test Creation of a new, simpler CSP + CSP new_csp = csp.partiallyApply(domains); + // Just the 81 new Domains + EXPECT_LONGS_EQUAL(81, new_csp.size()); + + // Check that solution + auto solution = new_csp.optimalAssignment(); + // csp.printAssignment(solution); + EXPECT_LONGS_EQUAL(6, solution.at(key99)); } /* ************************************************************************* */ @@ -216,4 +261,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - diff --git a/gtsam_unstable/linear/tests/testQPSolver.cpp b/gtsam_unstable/linear/tests/testQPSolver.cpp index 67a0c971e..12bd93416 100644 --- a/gtsam_unstable/linear/tests/testQPSolver.cpp +++ b/gtsam_unstable/linear/tests/testQPSolver.cpp @@ -226,7 +226,7 @@ pair testParser(QPSParser parser) { expected.inequalities.add(X1, -I_1x1, 0, 2); // x >= 0 expected.inequalities.add(X2, -I_1x1, 0, 3); // y > = 0 return {expected, exampleqp}; -}; +} TEST(QPSolver, ParserSyntaticTest) { auto result = testParser(QPSParser("QPExample.QPS")); diff --git a/gtsam_unstable/slam/BetweenFactorEM.h b/gtsam_unstable/slam/BetweenFactorEM.h index 98ec59fe9..572935da3 100644 --- a/gtsam_unstable/slam/BetweenFactorEM.h +++ b/gtsam_unstable/slam/BetweenFactorEM.h @@ -56,7 +56,8 @@ private: bool flag_bump_up_near_zero_probs_; /** concept check by type */ - GTSAM_CONCEPT_LIE_TYPE(T)GTSAM_CONCEPT_TESTABLE_TYPE(T) + GTSAM_CONCEPT_LIE_TYPE(T) + GTSAM_CONCEPT_TESTABLE_TYPE(T) public: diff --git a/gtsam_unstable/slam/README.md b/gtsam_unstable/slam/README.md new file mode 100644 index 000000000..9aa0fed78 --- /dev/null +++ b/gtsam_unstable/slam/README.md @@ -0,0 +1,40 @@ +# SLAM Factors + +## SmartFactors + +These are "structure-less" factors, i.e., rather than introducing a new variable for an observed 3D point or landmark, a single factor is created that provides a multi-view constraint on several poses and/or cameras. + +### SmartRangeFactor + +An experiment in creating a structure-less 2D range-SLAM factor with range-only measurements. +It uses a sophisticated `triangulate` logic based on circle intersections. + +### SmartStereoProjectionFactor + +Version of `SmartProjectionFactor` for stereo observations, specializes SmartFactorBase for `CAMERA == StereoCamera`. + +TODO: a lot of commented out code and could move a lot to .cpp file. + +### SmartStereoProjectionPoseFactor + +Derives from `SmartStereoProjectionFactor` but adds an array of `Cal3_S2Stereo` calibration objects . + +TODO: Again, as no template arguments, we could move a lot to .cpp file. + +### SmartStereoProjectionFactorPP + +Similar `SmartStereoProjectionPoseFactor` but *additionally* adds an array of body_P_cam poses. The dimensions seem to be hardcoded and the types defined in the SmartFactorBase have been re-defined. +The body_P_cam poses are optimized here! + +TODO: See above, same issues as `SmartStereoProjectionPoseFactor`. + +### SmartProjectionPoseFactorRollingShutter + +Is templated on a `CAMERA` type and derives from `SmartProjectionFactor`. + +This factor optimizes two consecutive poses of a body assuming a rolling +shutter model of the camera with given readout time. The factor requires that +values contain (for each 2D observation) two consecutive camera poses from +which the 2D observation pose can be interpolated. + +TODO: the dimensions seem to be hardcoded and the types defined in the SmartFactorBase have been re-defined. Also, possibly a lot of copy/paste computation of things that (should) happen in base class. \ No newline at end of file diff --git a/gtsam_unstable/slam/SmartProjectionPoseFactorRollingShutter.h b/gtsam_unstable/slam/SmartProjectionPoseFactorRollingShutter.h index 7660ff236..7578b2a83 100644 --- a/gtsam_unstable/slam/SmartProjectionPoseFactorRollingShutter.h +++ b/gtsam_unstable/slam/SmartProjectionPoseFactorRollingShutter.h @@ -43,13 +43,14 @@ namespace gtsam { template class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor { - public: + private: + typedef SmartProjectionFactor Base; + typedef SmartProjectionPoseFactorRollingShutter This; typedef typename CAMERA::CalibrationType CALIBRATION; + typedef typename CAMERA::Measurement MEASUREMENT; + typedef typename CAMERA::MeasurementVector MEASUREMENTS; protected: - /// shared pointer to calibration object (one for each observation) - std::vector> K_all_; - /// The keys of the pose of the body (with respect to an external world /// frame): two consecutive poses for each observation std::vector> world_P_body_key_pairs_; @@ -58,21 +59,17 @@ class SmartProjectionPoseFactorRollingShutter /// pair of consecutive poses std::vector alphas_; - /// Pose of the camera in the body frame - std::vector body_P_sensors_; + /// one or more cameras taking observations (fixed poses wrt body + fixed + /// intrinsics) + boost::shared_ptr cameraRig_; + + /// vector of camera Ids (one for each observation, in the same order), + /// identifying which camera took the measurement + FastVector cameraIds_; public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW - /// shorthand for base class type - typedef SmartProjectionFactor> Base; - - /// shorthand for this class - typedef SmartProjectionPoseFactorRollingShutter This; - - /// shorthand for a smart pointer to a factor - typedef boost::shared_ptr shared_ptr; - static const int DimBlock = 12; ///< size of the variable stacking 2 poses from which the observation ///< pose is interpolated @@ -83,22 +80,43 @@ class SmartProjectionPoseFactorRollingShutter typedef std::vector> FBlocks; // vector of F blocks + typedef CAMERA Camera; + typedef CameraSet Cameras; + + /// shorthand for a smart pointer to a factor + typedef boost::shared_ptr shared_ptr; + + /// Default constructor, only for serialization + SmartProjectionPoseFactorRollingShutter() {} + /** * Constructor * @param Isotropic measurement noise + * @param cameraRig set of cameras (fixed poses wrt body and intrinsics) + * taking the measurements * @param params internal parameters of the smart factors */ SmartProjectionPoseFactorRollingShutter( const SharedNoiseModel& sharedNoiseModel, + const boost::shared_ptr& cameraRig, const SmartProjectionParams& params = SmartProjectionParams()) - : Base(sharedNoiseModel, params) {} + : Base(sharedNoiseModel, params), cameraRig_(cameraRig) { + // throw exception if configuration is not supported by this factor + if (Base::params_.degeneracyMode != gtsam::ZERO_ON_DEGENERACY) + throw std::runtime_error( + "SmartProjectionRigFactor: " + "degeneracyMode must be set to ZERO_ON_DEGENERACY"); + if (Base::params_.linearizationMode != gtsam::HESSIAN) + throw std::runtime_error( + "SmartProjectionRigFactor: " + "linearizationMode must be set to HESSIAN"); + } /** Virtual destructor */ ~SmartProjectionPoseFactorRollingShutter() override = default; /** - * add a new measurement, with 2 pose keys, interpolation factor, camera - * (intrinsic and extrinsic) calibration, and observed pixel. + * add a new measurement, with 2 pose keys, interpolation factor, and cameraId * @param measured 2-dimensional location of the projection of a single * landmark in a single view (the measurement), interpolated from the 2 poses * @param world_P_body_key1 key corresponding to the first body poses (time <= @@ -107,13 +125,11 @@ class SmartProjectionPoseFactorRollingShutter * >= time pixel is acquired) * @param alpha interpolation factor in [0,1], such that if alpha = 0 the * interpolated pose is the same as world_P_body_key1 - * @param K (fixed) camera intrinsic calibration - * @param body_P_sensor (fixed) camera extrinsic calibration + * @param cameraId ID of the camera taking the measurement (default 0) */ - void add(const Point2& measured, const Key& world_P_body_key1, + void add(const MEASUREMENT& measured, const Key& world_P_body_key1, const Key& world_P_body_key2, const double& alpha, - const boost::shared_ptr& K, - const Pose3& body_P_sensor = Pose3::identity()) { + const size_t& cameraId = 0) { // store measurements in base class this->measured_.push_back(measured); @@ -133,11 +149,8 @@ class SmartProjectionPoseFactorRollingShutter // store interpolation factor alphas_.push_back(alpha); - // store fixed intrinsic calibration - K_all_.push_back(K); - - // store fixed extrinsics of the camera - body_P_sensors_.push_back(body_P_sensor); + // store id of the camera taking the measurement + cameraIds_.push_back(cameraId); } /** @@ -150,56 +163,36 @@ class SmartProjectionPoseFactorRollingShutter * for the i0-th measurement can be interpolated * @param alphas vector of interpolation params (in [0,1]), one for each * measurement (in the same order) - * @param Ks vector of (fixed) intrinsic calibration objects - * @param body_P_sensors vector of (fixed) extrinsic calibration objects + * @param cameraIds IDs of the cameras taking each measurement (same order as + * the measurements) */ - void add(const Point2Vector& measurements, + void add(const MEASUREMENTS& measurements, const std::vector>& world_P_body_key_pairs, const std::vector& alphas, - const std::vector>& Ks, - const std::vector& body_P_sensors) { - assert(world_P_body_key_pairs.size() == measurements.size()); - assert(world_P_body_key_pairs.size() == alphas.size()); - assert(world_P_body_key_pairs.size() == Ks.size()); + const FastVector& cameraIds = FastVector()) { + if (world_P_body_key_pairs.size() != measurements.size() || + world_P_body_key_pairs.size() != alphas.size() || + (world_P_body_key_pairs.size() != cameraIds.size() && + cameraIds.size() != 0)) { // cameraIds.size()=0 is default + throw std::runtime_error( + "SmartProjectionPoseFactorRollingShutter: " + "trying to add inconsistent inputs"); + } + if (cameraIds.size() == 0 && cameraRig_->size() > 1) { + throw std::runtime_error( + "SmartProjectionPoseFactorRollingShutter: " + "camera rig includes multiple camera " + "but add did not input cameraIds"); + } for (size_t i = 0; i < measurements.size(); i++) { add(measurements[i], world_P_body_key_pairs[i].first, - world_P_body_key_pairs[i].second, alphas[i], Ks[i], - body_P_sensors[i]); + world_P_body_key_pairs[i].second, alphas[i], + cameraIds.size() == 0 ? 0 + : cameraIds[i]); // use 0 as default if + // cameraIds was not specified } } - /** - * Variant of the previous "add" function in which we include multiple - * measurements with the same (intrinsic and extrinsic) calibration - * @param measurements vector of the 2m dimensional location of the projection - * of a single landmark in the m views (the measurements) - * @param world_P_body_key_pairs vector where the i-th element contains a pair - * of keys corresponding to the pair of poses from which the observation pose - * for the i0-th measurement can be interpolated - * @param alphas vector of interpolation params (in [0,1]), one for each - * measurement (in the same order) - * @param K (fixed) camera intrinsic calibration (same for all measurements) - * @param body_P_sensor (fixed) camera extrinsic calibration (same for all - * measurements) - */ - void add(const Point2Vector& measurements, - const std::vector>& world_P_body_key_pairs, - const std::vector& alphas, - const boost::shared_ptr& K, - const Pose3& body_P_sensor = Pose3::identity()) { - assert(world_P_body_key_pairs.size() == measurements.size()); - assert(world_P_body_key_pairs.size() == alphas.size()); - for (size_t i = 0; i < measurements.size(); i++) { - add(measurements[i], world_P_body_key_pairs[i].first, - world_P_body_key_pairs[i].second, alphas[i], K, body_P_sensor); - } - } - - /// return the calibration object - const std::vector>& calibration() const { - return K_all_; - } - /// return (for each observation) the keys of the pair of poses from which we /// interpolate const std::vector>& world_P_body_key_pairs() const { @@ -209,8 +202,11 @@ class SmartProjectionPoseFactorRollingShutter /// return the interpolation factors alphas const std::vector& alphas() const { return alphas_; } - /// return the extrinsic camera calibration body_P_sensors - const std::vector& body_P_sensors() const { return body_P_sensors_; } + /// return the calibration object + const boost::shared_ptr& cameraRig() const { return cameraRig_; } + + /// return the calibration object + const FastVector& cameraIds() const { return cameraIds_; } /** * print @@ -221,15 +217,15 @@ class SmartProjectionPoseFactorRollingShutter const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "SmartProjectionPoseFactorRollingShutter: \n "; - for (size_t i = 0; i < K_all_.size(); i++) { + for (size_t i = 0; i < cameraIds_.size(); i++) { std::cout << "-- Measurement nr " << i << std::endl; std::cout << " pose1 key: " << keyFormatter(world_P_body_key_pairs_[i].first) << std::endl; std::cout << " pose2 key: " << keyFormatter(world_P_body_key_pairs_[i].second) << std::endl; std::cout << " alpha: " << alphas_[i] << std::endl; - body_P_sensors_[i].print("extrinsic calibration:\n"); - K_all_[i]->print("intrinsic calibration = "); + std::cout << "cameraId: " << cameraIds_[i] << std::endl; + (*cameraRig_)[cameraIds_[i]].print("camera in rig:\n"); } Base::print("", keyFormatter); } @@ -257,20 +253,48 @@ class SmartProjectionPoseFactorRollingShutter keyPairsEqual = false; } - double extrinsicCalibrationEqual = true; - if (this->body_P_sensors_.size() == e->body_P_sensors().size()) { - for (size_t i = 0; i < this->body_P_sensors_.size(); i++) { - if (!body_P_sensors_[i].equals(e->body_P_sensors()[i])) { - extrinsicCalibrationEqual = false; - break; - } - } - } else { - extrinsicCalibrationEqual = false; - } + return e && Base::equals(p, tol) && alphas_ == e->alphas() && + keyPairsEqual && cameraRig_->equals(*(e->cameraRig())) && + std::equal(cameraIds_.begin(), cameraIds_.end(), + e->cameraIds().begin()); + } - return e && Base::equals(p, tol) && K_all_ == e->calibration() && - alphas_ == e->alphas() && keyPairsEqual && extrinsicCalibrationEqual; + /** + * Collect all cameras involved in this factor + * @param values Values structure which must contain camera poses + * corresponding to keys involved in this factor + * @return Cameras + */ + typename Base::Cameras cameras(const Values& values) const override { + typename Base::Cameras cameras; + for (size_t i = 0; i < this->measured_.size(); + i++) { // for each measurement + const Pose3& w_P_body1 = + values.at(world_P_body_key_pairs_[i].first); + const Pose3& w_P_body2 = + values.at(world_P_body_key_pairs_[i].second); + double interpolationFactor = alphas_[i]; + const Pose3& w_P_body = + interpolate(w_P_body1, w_P_body2, interpolationFactor); + const typename Base::Camera& camera_i = (*cameraRig_)[cameraIds_[i]]; + const Pose3& body_P_cam = camera_i.pose(); + const Pose3& w_P_cam = w_P_body.compose(body_P_cam); + cameras.emplace_back(w_P_cam, + make_shared( + camera_i.calibration())); + } + return cameras; + } + + /** + * error calculates the error of the factor. + */ + double error(const Values& values) const override { + if (this->active(values)) { + return this->totalReprojectionError(this->cameras(values)); + } else { // else of active flag + return 0.0; + } } /** @@ -305,14 +329,16 @@ class SmartProjectionPoseFactorRollingShutter auto w_P_body = interpolate(w_P_body1, w_P_body2, interpolationFactor, dInterpPose_dPoseBody1, dInterpPose_dPoseBody2); - auto body_P_cam = body_P_sensors_[i]; + const typename Base::Camera& camera_i = (*cameraRig_)[cameraIds_[i]]; + auto body_P_cam = camera_i.pose(); auto w_P_cam = w_P_body.compose(body_P_cam, dPoseCam_dInterpPose); - PinholeCamera camera(w_P_cam, *K_all_[i]); + typename Base::Camera camera( + w_P_cam, make_shared( + camera_i.calibration())); // get jacobians and error vector for current measurement - Point2 reprojectionError_i = - Point2(camera.project(*this->result_, dProject_dPoseCam, Ei) - - this->measured_.at(i)); + Point2 reprojectionError_i = camera.reprojectionError( + *this->result_, this->measured_.at(i), dProject_dPoseCam, Ei); Eigen::Matrix J; // 2 x 12 J.block(0, 0, ZDim, 6) = dProject_dPoseCam * dPoseCam_dInterpPose * @@ -332,7 +358,7 @@ class SmartProjectionPoseFactorRollingShutter /// linearize and return a Hessianfactor that is an approximation of error(p) boost::shared_ptr> createHessianFactor( - const Values& values, const double lambda = 0.0, + const Values& values, const double& lambda = 0.0, bool diagonalDamping = false) const { // we may have multiple observation sharing the same keys (due to the // rolling shutter interpolation), hence the number of unique keys may be @@ -341,19 +367,21 @@ class SmartProjectionPoseFactorRollingShutter this->keys_ .size(); // note: by construction, keys_ only contains unique keys + typename Base::Cameras cameras = this->cameras(values); + // Create structures for Hessian Factors KeyVector js; std::vector Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2); std::vector gs(nrUniqueKeys); if (this->measured_.size() != - this->cameras(values).size()) // 1 observation per interpolated camera + cameras.size()) // 1 observation per interpolated camera throw std::runtime_error( "SmartProjectionPoseFactorRollingShutter: " "measured_.size() inconsistent with input"); // triangulate 3D point at given linearization point - this->triangulateSafe(this->cameras(values)); + this->triangulateSafe(cameras); if (!this->result_) { // failed: return "empty/zero" Hessian if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) { @@ -378,7 +406,7 @@ class SmartProjectionPoseFactorRollingShutter for (size_t i = 0; i < Fs.size(); i++) Fs[i] = this->noiseModel_->Whiten(Fs[i]); - Matrix3 P = Base::Cameras::PointCov(E, lambda, diagonalDamping); + Matrix3 P = Cameras::PointCov(E, lambda, diagonalDamping); // Collect all the key pairs: these are the keys that correspond to the // blocks in Fs (on which we apply the Schur Complement) @@ -399,46 +427,6 @@ class SmartProjectionPoseFactorRollingShutter this->keys_, augmentedHessianUniqueKeys); } - /** - * error calculates the error of the factor. - */ - double error(const Values& values) const override { - if (this->active(values)) { - return this->totalReprojectionError(this->cameras(values)); - } else { // else of active flag - return 0.0; - } - } - - /** - * Collect all cameras involved in this factor - * @param values Values structure which must contain camera poses - * corresponding to keys involved in this factor - * @return Cameras - */ - typename Base::Cameras cameras(const Values& values) const override { - size_t numViews = this->measured_.size(); - assert(numViews == K_all_.size()); - assert(numViews == alphas_.size()); - assert(numViews == body_P_sensors_.size()); - assert(numViews == world_P_body_key_pairs_.size()); - - typename Base::Cameras cameras; - for (size_t i = 0; i < numViews; i++) { // for each measurement - const Pose3& w_P_body1 = - values.at(world_P_body_key_pairs_[i].first); - const Pose3& w_P_body2 = - values.at(world_P_body_key_pairs_[i].second); - double interpolationFactor = alphas_[i]; - const Pose3& w_P_body = - interpolate(w_P_body1, w_P_body2, interpolationFactor); - const Pose3& body_P_cam = body_P_sensors_[i]; - const Pose3& w_P_cam = w_P_body.compose(body_P_cam); - cameras.emplace_back(w_P_cam, K_all_[i]); - } - return cameras; - } - /** * Linearize to Gaussian Factor (possibly adding a damping factor Lambda for * LM) @@ -447,7 +435,7 @@ class SmartProjectionPoseFactorRollingShutter * @return a Gaussian factor */ boost::shared_ptr linearizeDamped( - const Values& values, const double lambda = 0.0) const { + const Values& values, const double& lambda = 0.0) const { // depending on flag set on construction we may linearize to different // linear factors switch (this->params_.linearizationMode) { @@ -455,8 +443,8 @@ class SmartProjectionPoseFactorRollingShutter return this->createHessianFactor(values, lambda); default: throw std::runtime_error( - "SmartProjectionPoseFactorRollingShutter: unknown linearization " - "mode"); + "SmartProjectionPoseFactorRollingShutter: " + "unknown linearization mode"); } } @@ -472,7 +460,6 @@ class SmartProjectionPoseFactorRollingShutter template void serialize(ARCHIVE& ar, const unsigned int /*version*/) { ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); - ar& BOOST_SERIALIZATION_NVP(K_all_); } }; // end of class declaration diff --git a/gtsam_unstable/slam/SmartStereoProjectionFactor.h b/gtsam_unstable/slam/SmartStereoProjectionFactor.h index 52fd99356..88e112998 100644 --- a/gtsam_unstable/slam/SmartStereoProjectionFactor.h +++ b/gtsam_unstable/slam/SmartStereoProjectionFactor.h @@ -11,7 +11,7 @@ /** * @file SmartStereoProjectionFactor.h - * @brief Smart stereo factor on StereoCameras (pose + calibration) + * @brief Smart stereo factor on StereoCameras (pose) * @author Luca Carlone * @author Zsolt Kira * @author Frank Dellaert @@ -447,23 +447,23 @@ public: } /** - * This corrects the Jacobians and error vector for the case in which the right pixel in the monocular camera is missing (nan) + * This corrects the Jacobians and error vector for the case in which the + * right 2D measurement in the monocular camera is missing (nan). */ - void correctForMissingMeasurements(const Cameras& cameras, Vector& ue, - boost::optional Fs = boost::none, - boost::optional E = boost::none) const override - { + void correctForMissingMeasurements( + const Cameras& cameras, Vector& ue, + boost::optional Fs = boost::none, + boost::optional E = boost::none) const override { // when using stereo cameras, some of the measurements might be missing: - for(size_t i=0; i < cameras.size(); i++){ + for (size_t i = 0; i < cameras.size(); i++) { const StereoPoint2& z = measured_.at(i); - if(std::isnan(z.uR())) // if the right pixel is invalid + if (std::isnan(z.uR())) // if the right 2D measurement is invalid { - if(Fs){ // delete influence of right point on jacobian Fs + if (Fs) { // delete influence of right point on jacobian Fs MatrixZD& Fi = Fs->at(i); - for(size_t ii=0; iirow(ZDim * i + 1) = Matrix::Zero(1, E->cols()); // set the corresponding entry of vector ue to zero diff --git a/gtsam_unstable/slam/SmartStereoProjectionFactorPP.h b/gtsam_unstable/slam/SmartStereoProjectionFactorPP.h index 25be48b0f..ce6df15cb 100644 --- a/gtsam_unstable/slam/SmartStereoProjectionFactorPP.h +++ b/gtsam_unstable/slam/SmartStereoProjectionFactorPP.h @@ -33,9 +33,11 @@ namespace gtsam { */ /** - * This factor optimizes the pose of the body as well as the extrinsic camera calibration (pose of camera wrt body). - * Each camera may have its own extrinsic calibration or the same calibration can be shared by multiple cameras. - * This factor requires that values contain the involved poses and extrinsics (both are Pose3 variables). + * This factor optimizes the pose of the body as well as the extrinsic camera + * calibration (pose of camera wrt body). Each camera may have its own extrinsic + * calibration or the same calibration can be shared by multiple cameras. This + * factor requires that values contain the involved poses and extrinsics (both + * are Pose3 variables). * @addtogroup SLAM */ class SmartStereoProjectionFactorPP : public SmartStereoProjectionFactor { diff --git a/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp b/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp index 0b94d2c3f..d7e72d129 100644 --- a/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp +++ b/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp @@ -61,10 +61,13 @@ static double interp_factor1 = 0.3; static double interp_factor2 = 0.4; static double interp_factor3 = 0.5; +static size_t cameraId1 = 0; + /* ************************************************************************* */ // default Cal3_S2 poses with rolling shutter effect namespace vanillaPoseRS { typedef PinholePose Camera; +typedef CameraSet Cameras; static Cal3_S2::shared_ptr sharedK(new Cal3_S2(fov, w, h)); Pose3 interp_pose1 = interpolate(level_pose, pose_right, interp_factor1); Pose3 interp_pose2 = interpolate(pose_right, pose_above, interp_factor2); @@ -72,6 +75,9 @@ Pose3 interp_pose3 = interpolate(pose_above, level_pose, interp_factor3); Camera cam1(interp_pose1, sharedK); Camera cam2(interp_pose2, sharedK); Camera cam3(interp_pose3, sharedK); +SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with RS factors } // namespace vanillaPoseRS LevenbergMarquardtParams lmParams; @@ -80,26 +86,35 @@ typedef SmartProjectionPoseFactorRollingShutter> /* ************************************************************************* */ TEST(SmartProjectionPoseFactorRollingShutter, Constructor) { - SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); + using namespace vanillaPoseRS; + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + SmartFactorRS::shared_ptr factor1( + new SmartFactorRS(model, cameraRig, params)); } /* ************************************************************************* */ TEST(SmartProjectionPoseFactorRollingShutter, Constructor2) { - SmartProjectionParams params; + using namespace vanillaPoseRS; + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); params.setRankTolerance(rankTol); - SmartFactorRS factor1(model, params); + SmartFactorRS factor1(model, cameraRig, params); } /* ************************************************************************* */ TEST(SmartProjectionPoseFactorRollingShutter, add) { - using namespace vanillaPose; - SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); - factor1->add(measurement1, x1, x2, interp_factor, sharedK, body_P_sensor); + using namespace vanillaPoseRS; + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + SmartFactorRS::shared_ptr factor1( + new SmartFactorRS(model, cameraRig, params)); + factor1->add(measurement1, x1, x2, interp_factor); } /* ************************************************************************* */ TEST(SmartProjectionPoseFactorRollingShutter, Equals) { - using namespace vanillaPose; + using namespace vanillaPoseRS; // create fake measurements Point2Vector measurements; @@ -112,68 +127,88 @@ TEST(SmartProjectionPoseFactorRollingShutter, Equals) { key_pairs.push_back(std::make_pair(x2, x3)); key_pairs.push_back(std::make_pair(x3, x4)); - std::vector> intrinsicCalibrations; - intrinsicCalibrations.push_back(sharedK); - intrinsicCalibrations.push_back(sharedK); - intrinsicCalibrations.push_back(sharedK); - - std::vector extrinsicCalibrations; - extrinsicCalibrations.push_back(body_P_sensor); - extrinsicCalibrations.push_back(body_P_sensor); - extrinsicCalibrations.push_back(body_P_sensor); - std::vector interp_factors; interp_factors.push_back(interp_factor1); interp_factors.push_back(interp_factor2); interp_factors.push_back(interp_factor3); + FastVector cameraIds{0, 0, 0}; + + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(body_P_sensor, sharedK)); + // create by adding a batch of measurements with a bunch of calibrations - SmartFactorRS::shared_ptr factor2(new SmartFactorRS(model)); - factor2->add(measurements, key_pairs, interp_factors, intrinsicCalibrations, - extrinsicCalibrations); + SmartFactorRS::shared_ptr factor2( + new SmartFactorRS(model, cameraRig, params)); + factor2->add(measurements, key_pairs, interp_factors, cameraIds); // create by adding a batch of measurements with a single calibrations - SmartFactorRS::shared_ptr factor3(new SmartFactorRS(model)); - factor3->add(measurements, key_pairs, interp_factors, sharedK, body_P_sensor); + SmartFactorRS::shared_ptr factor3( + new SmartFactorRS(model, cameraRig, params)); + factor3->add(measurements, key_pairs, interp_factors, cameraIds); { // create equal factors and show equal returns true - SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); - factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor); - factor1->add(measurement2, x2, x3, interp_factor2, sharedK, body_P_sensor); - factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor); + SmartFactorRS::shared_ptr factor1( + new SmartFactorRS(model, cameraRig, params)); + factor1->add(measurement1, x1, x2, interp_factor1, cameraId1); + factor1->add(measurement2, x2, x3, interp_factor2, cameraId1); + factor1->add(measurement3, x3, x4, interp_factor3, cameraId1); + + EXPECT(factor1->equals(*factor2)); + EXPECT(factor1->equals(*factor3)); + } + { // create equal factors and show equal returns true (use default cameraId) + SmartFactorRS::shared_ptr factor1( + new SmartFactorRS(model, cameraRig, params)); + factor1->add(measurement1, x1, x2, interp_factor1); + factor1->add(measurement2, x2, x3, interp_factor2); + factor1->add(measurement3, x3, x4, interp_factor3); + + EXPECT(factor1->equals(*factor2)); + EXPECT(factor1->equals(*factor3)); + } + { // create equal factors and show equal returns true (use default cameraId) + SmartFactorRS::shared_ptr factor1( + new SmartFactorRS(model, cameraRig, params)); + factor1->add(measurements, key_pairs, interp_factors); EXPECT(factor1->equals(*factor2)); EXPECT(factor1->equals(*factor3)); } { // create slightly different factors (different keys) and show equal - // returns false - SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); - factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor); - factor1->add(measurement2, x2, x2, interp_factor2, sharedK, - body_P_sensor); // different! - factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor); + // returns false (use default cameraIds) + SmartFactorRS::shared_ptr factor1( + new SmartFactorRS(model, cameraRig, params)); + factor1->add(measurement1, x1, x2, interp_factor1, cameraId1); + factor1->add(measurement2, x2, x2, interp_factor2, + cameraId1); // different! + factor1->add(measurement3, x3, x4, interp_factor3, cameraId1); EXPECT(!factor1->equals(*factor2)); EXPECT(!factor1->equals(*factor3)); } { // create slightly different factors (different extrinsics) and show equal // returns false - SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); - factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor); - factor1->add(measurement2, x2, x3, interp_factor2, sharedK, - body_P_sensor * body_P_sensor); // different! - factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor); + boost::shared_ptr cameraRig2(new Cameras()); + cameraRig2->push_back(Camera(body_P_sensor * body_P_sensor, sharedK)); + SmartFactorRS::shared_ptr factor1( + new SmartFactorRS(model, cameraRig2, params)); + factor1->add(measurement1, x1, x2, interp_factor1, cameraId1); + factor1->add(measurement2, x2, x3, interp_factor2, + cameraId1); // different! + factor1->add(measurement3, x3, x4, interp_factor3, cameraId1); EXPECT(!factor1->equals(*factor2)); EXPECT(!factor1->equals(*factor3)); } { // create slightly different factors (different interp factors) and show // equal returns false - SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); - factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor); - factor1->add(measurement2, x2, x3, interp_factor1, sharedK, - body_P_sensor); // different! - factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor); + SmartFactorRS::shared_ptr factor1( + new SmartFactorRS(model, cameraRig, params)); + factor1->add(measurement1, x1, x2, interp_factor1, cameraId1); + factor1->add(measurement2, x2, x3, interp_factor1, + cameraId1); // different! + factor1->add(measurement3, x3, x4, interp_factor3, cameraId1); EXPECT(!factor1->equals(*factor2)); EXPECT(!factor1->equals(*factor3)); @@ -197,9 +232,12 @@ TEST(SmartProjectionPoseFactorRollingShutter, noiselessErrorAndJacobians) { Point2 level_uv_right = cam2.project(landmark1); Pose3 body_P_sensorId = Pose3::identity(); - SmartFactorRS factor(model); - factor.add(level_uv, x1, x2, interp_factor1, sharedK, body_P_sensorId); - factor.add(level_uv_right, x2, x3, interp_factor2, sharedK, body_P_sensorId); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(body_P_sensorId, sharedK)); + + SmartFactorRS factor(model, cameraRig, params); + factor.add(level_uv, x1, x2, interp_factor1); + factor.add(level_uv_right, x2, x3, interp_factor2); Values values; // it's a pose factor, hence these are poses values.insert(x1, level_pose); @@ -272,10 +310,12 @@ TEST(SmartProjectionPoseFactorRollingShutter, noisyErrorAndJacobians) { Point2 level_uv_right = cam2.project(landmark1); Pose3 body_P_sensorNonId = body_P_sensor; - SmartFactorRS factor(model); - factor.add(level_uv, x1, x2, interp_factor1, sharedK, body_P_sensorNonId); - factor.add(level_uv_right, x2, x3, interp_factor2, sharedK, - body_P_sensorNonId); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(body_P_sensorNonId, sharedK)); + + SmartFactorRS factor(model, cameraRig, params); + factor.add(level_uv, x1, x2, interp_factor1); + factor.add(level_uv_right, x2, x3, interp_factor2); Values values; // it's a pose factor, hence these are poses values.insert(x1, level_pose); @@ -364,14 +404,20 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses) { interp_factors.push_back(interp_factor2); interp_factors.push_back(interp_factor3); - SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); - smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, sharedK); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); - SmartFactorRS::shared_ptr smartFactor2(new SmartFactorRS(model)); - smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, sharedK); + SmartFactorRS::shared_ptr smartFactor1( + new SmartFactorRS(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1, key_pairs, interp_factors); - SmartFactorRS::shared_ptr smartFactor3(new SmartFactorRS(model)); - smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, sharedK); + SmartFactorRS::shared_ptr smartFactor2( + new SmartFactorRS(model, cameraRig, params)); + smartFactor2->add(measurements_lmk2, key_pairs, interp_factors); + + SmartFactorRS::shared_ptr smartFactor3( + new SmartFactorRS(model, cameraRig, params)); + smartFactor3->add(measurements_lmk3, key_pairs, interp_factors); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); @@ -411,6 +457,170 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses) { EXPECT(assert_equal(pose_above, result.at(x3), 1e-6)); } +/* *************************************************************************/ +TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_multiCam) { + using namespace vanillaPoseRS; + Point2Vector measurements_lmk1, measurements_lmk2, measurements_lmk3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_lmk1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_lmk2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_lmk3); + + // create inputs + std::vector> key_pairs; + key_pairs.push_back(std::make_pair(x1, x2)); + key_pairs.push_back(std::make_pair(x2, x3)); + key_pairs.push_back(std::make_pair(x3, x1)); + + std::vector interp_factors; + interp_factors.push_back(interp_factor1); + interp_factors.push_back(interp_factor2); + interp_factors.push_back(interp_factor3); + + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(body_P_sensor, sharedK)); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + + SmartFactorRS::shared_ptr smartFactor1( + new SmartFactorRS(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, {1, 1, 1}); + + SmartFactorRS::shared_ptr smartFactor2( + new SmartFactorRS(model, cameraRig, params)); + smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, {1, 1, 1}); + + SmartFactorRS::shared_ptr smartFactor3( + new SmartFactorRS(model, cameraRig, params)); + smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, {1, 1, 1}); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, level_pose, noisePrior); + graph.addPrior(x2, pose_right, noisePrior); + + Values groundTruth; + groundTruth.insert(x1, level_pose); + groundTruth.insert(x2, pose_right); + groundTruth.insert(x3, pose_above); // pose above is the pose of the camera + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), + // Point3(0.5,0.1,0.3)); // noise from regular projection factor test below + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, level_pose); + values.insert(x2, pose_right); + // initialize third pose with some noise, we expect it to move back to + // original pose_above + values.insert(x3, pose_above * noise_pose); + EXPECT( // check that the pose is actually noisy + assert_equal(Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656, + -0.0313952598, -0.000986635786, 0.0314107591, + -0.999013364, -0.0313952598), + Point3(0.1, -0.1, 1.9)), + values.at(x3))); + + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(pose_above, result.at(x3), 1e-6)); +} + +/* *************************************************************************/ +TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_multiCam2) { + using namespace vanillaPoseRS; + + Point2Vector measurements_lmk1, measurements_lmk2, measurements_lmk3; + + // create arbitrary body_T_sensor (transforms from sensor to body) + Pose3 body_T_sensor1 = Pose3(Rot3::Ypr(-0.03, 0., 0.01), Point3(1, 1, 1)); + Pose3 body_T_sensor2 = Pose3(Rot3::Ypr(-0.1, 0., 0.05), Point3(0, 0, 1)); + Pose3 body_T_sensor3 = Pose3(Rot3::Ypr(-0.3, 0., -0.05), Point3(0, 1, 1)); + + Camera camera1(interp_pose1 * body_T_sensor1, sharedK); + Camera camera2(interp_pose2 * body_T_sensor2, sharedK); + Camera camera3(interp_pose3 * body_T_sensor3, sharedK); + + // Project three landmarks into three cameras + projectToMultipleCameras(camera1, camera2, camera3, landmark1, + measurements_lmk1); + projectToMultipleCameras(camera1, camera2, camera3, landmark2, + measurements_lmk2); + projectToMultipleCameras(camera1, camera2, camera3, landmark3, + measurements_lmk3); + + // create inputs + std::vector> key_pairs; + key_pairs.push_back(std::make_pair(x1, x2)); + key_pairs.push_back(std::make_pair(x2, x3)); + key_pairs.push_back(std::make_pair(x3, x1)); + + std::vector interp_factors; + interp_factors.push_back(interp_factor1); + interp_factors.push_back(interp_factor2); + interp_factors.push_back(interp_factor3); + + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(body_T_sensor1, sharedK)); + cameraRig->push_back(Camera(body_T_sensor2, sharedK)); + cameraRig->push_back(Camera(body_T_sensor3, sharedK)); + + SmartFactorRS::shared_ptr smartFactor1( + new SmartFactorRS(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, {0, 1, 2}); + + SmartFactorRS::shared_ptr smartFactor2( + new SmartFactorRS(model, cameraRig, params)); + smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, {0, 1, 2}); + + SmartFactorRS::shared_ptr smartFactor3( + new SmartFactorRS(model, cameraRig, params)); + smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, {0, 1, 2}); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, level_pose, noisePrior); + graph.addPrior(x2, pose_right, noisePrior); + + Values groundTruth; + groundTruth.insert(x1, level_pose); + groundTruth.insert(x2, pose_right); + groundTruth.insert(x3, pose_above); // pose above is the pose of the camera + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), + // Point3(0.5,0.1,0.3)); // noise from regular projection factor test below + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, level_pose); + values.insert(x2, pose_right); + // initialize third pose with some noise, we expect it to move back to + // original pose_above + values.insert(x3, pose_above * noise_pose); + EXPECT( // check that the pose is actually noisy + assert_equal(Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656, + -0.0313952598, -0.000986635786, 0.0314107591, + -0.999013364, -0.0313952598), + Point3(0.1, -0.1, 1.9)), + values.at(x3))); + + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(pose_above, result.at(x3), 1e-4)); +} + /* *************************************************************************/ TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) { // here we replicate a test in SmartProjectionPoseFactor by setting @@ -418,7 +628,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) { // falls back to standard pixel measurements) Note: this is a quite extreme // test since in typical camera you would not have more than 1 measurement per // landmark at each interpolated pose - using namespace vanillaPose; + using namespace vanillaPoseRS; // Default cameras for simple derivatives static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0)); @@ -438,15 +648,17 @@ TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) { measurements_lmk1.push_back(cam1.project(landmark1)); measurements_lmk1.push_back(cam2.project(landmark1)); - SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); - double interp_factor = 0; // equivalent to measurement taken at pose 1 - smartFactor1->add(measurements_lmk1[0], x1, x2, interp_factor, sharedKSimple, - body_P_sensorId); - interp_factor = 1; // equivalent to measurement taken at pose 2 - smartFactor1->add(measurements_lmk1[1], x1, x2, interp_factor, sharedKSimple, - body_P_sensorId); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(body_P_sensorId, sharedKSimple)); - SmartFactor::Cameras cameras; + SmartFactorRS::shared_ptr smartFactor1( + new SmartFactorRS(model, cameraRig, params)); + double interp_factor = 0; // equivalent to measurement taken at pose 1 + smartFactor1->add(measurements_lmk1[0], x1, x2, interp_factor); + interp_factor = 1; // equivalent to measurement taken at pose 2 + smartFactor1->add(measurements_lmk1[1], x1, x2, interp_factor); + + SmartFactorRS::Cameras cameras; cameras.push_back(cam1); cameras.push_back(cam2); @@ -534,14 +746,17 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_EPI) { params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist); params.setEnableEPI(true); - SmartFactorRS smartFactor1(model, params); - smartFactor1.add(measurements_lmk1, key_pairs, interp_factors, sharedK); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); - SmartFactorRS smartFactor2(model, params); - smartFactor2.add(measurements_lmk2, key_pairs, interp_factors, sharedK); + SmartFactorRS smartFactor1(model, cameraRig, params); + smartFactor1.add(measurements_lmk1, key_pairs, interp_factors); - SmartFactorRS smartFactor3(model, params); - smartFactor3.add(measurements_lmk3, key_pairs, interp_factors, sharedK); + SmartFactorRS smartFactor2(model, cameraRig, params); + smartFactor2.add(measurements_lmk2, key_pairs, interp_factors); + + SmartFactorRS smartFactor3(model, cameraRig, params); + smartFactor3.add(measurements_lmk3, key_pairs, interp_factors); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); @@ -594,18 +809,23 @@ TEST(SmartProjectionPoseFactorRollingShutter, SmartProjectionParams params; params.setRankTolerance(1.0); params.setLinearizationMode(gtsam::HESSIAN); - params.setDegeneracyMode(gtsam::IGNORE_DEGENERACY); + // params.setDegeneracyMode(gtsam::IGNORE_DEGENERACY); // this would give an + // exception as expected + params.setDegeneracyMode(gtsam::ZERO_ON_DEGENERACY); params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist); params.setEnableEPI(false); - SmartFactorRS smartFactor1(model, params); - smartFactor1.add(measurements_lmk1, key_pairs, interp_factors, sharedK); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); - SmartFactorRS smartFactor2(model, params); - smartFactor2.add(measurements_lmk2, key_pairs, interp_factors, sharedK); + SmartFactorRS smartFactor1(model, cameraRig, params); + smartFactor1.add(measurements_lmk1, key_pairs, interp_factors); - SmartFactorRS smartFactor3(model, params); - smartFactor3.add(measurements_lmk3, key_pairs, interp_factors, sharedK); + SmartFactorRS smartFactor2(model, cameraRig, params); + smartFactor2.add(measurements_lmk2, key_pairs, interp_factors); + + SmartFactorRS smartFactor3(model, cameraRig, params); + smartFactor3.add(measurements_lmk3, key_pairs, interp_factors); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); @@ -673,17 +893,24 @@ TEST(SmartProjectionPoseFactorRollingShutter, params.setDynamicOutlierRejectionThreshold(dynamicOutlierRejectionThreshold); params.setEnableEPI(false); - SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model, params)); - smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, sharedK); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); - SmartFactorRS::shared_ptr smartFactor2(new SmartFactorRS(model, params)); - smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, sharedK); + SmartFactorRS::shared_ptr smartFactor1( + new SmartFactorRS(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1, key_pairs, interp_factors); - SmartFactorRS::shared_ptr smartFactor3(new SmartFactorRS(model, params)); - smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, sharedK); + SmartFactorRS::shared_ptr smartFactor2( + new SmartFactorRS(model, cameraRig, params)); + smartFactor2->add(measurements_lmk2, key_pairs, interp_factors); - SmartFactorRS::shared_ptr smartFactor4(new SmartFactorRS(model, params)); - smartFactor4->add(measurements_lmk4, key_pairs, interp_factors, sharedK); + SmartFactorRS::shared_ptr smartFactor3( + new SmartFactorRS(model, cameraRig, params)); + smartFactor3->add(measurements_lmk3, key_pairs, interp_factors); + + SmartFactorRS::shared_ptr smartFactor4( + new SmartFactorRS(model, cameraRig, params)); + smartFactor4->add(measurements_lmk4, key_pairs, interp_factors); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); @@ -733,8 +960,12 @@ TEST(SmartProjectionPoseFactorRollingShutter, interp_factors.push_back(interp_factor2); interp_factors.push_back(interp_factor3); - SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); - smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, sharedK); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + + SmartFactorRS::shared_ptr smartFactor1( + new SmartFactorRS(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1, key_pairs, interp_factors); Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), Point3(0.1, 0.1, 0.1)); // smaller noise @@ -870,9 +1101,12 @@ TEST(SmartProjectionPoseFactorRollingShutter, interp_factors.push_back(interp_factor3); interp_factors.push_back(interp_factor1); - SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); - smartFactor1->add(measurements_lmk1_redundant, key_pairs, interp_factors, - sharedK); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + + SmartFactorRS::shared_ptr smartFactor1( + new SmartFactorRS(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1_redundant, key_pairs, interp_factors); Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), Point3(0.1, 0.1, 0.1)); // smaller noise @@ -1026,15 +1260,21 @@ TEST(SmartProjectionPoseFactorRollingShutter, interp_factors_redundant.push_back( interp_factors.at(0)); // we readd the first interp factor - SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + + SmartFactorRS::shared_ptr smartFactor1( + new SmartFactorRS(model, cameraRig, params)); smartFactor1->add(measurements_lmk1_redundant, key_pairs_redundant, - interp_factors_redundant, sharedK); + interp_factors_redundant); - SmartFactorRS::shared_ptr smartFactor2(new SmartFactorRS(model)); - smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, sharedK); + SmartFactorRS::shared_ptr smartFactor2( + new SmartFactorRS(model, cameraRig, params)); + smartFactor2->add(measurements_lmk2, key_pairs, interp_factors); - SmartFactorRS::shared_ptr smartFactor3(new SmartFactorRS(model)); - smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, sharedK); + SmartFactorRS::shared_ptr smartFactor3( + new SmartFactorRS(model, cameraRig, params)); + smartFactor3->add(measurements_lmk3, key_pairs, interp_factors); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); @@ -1076,16 +1316,20 @@ TEST(SmartProjectionPoseFactorRollingShutter, #ifndef DISABLE_TIMING #include -// -Total: 0 CPU (0 times, 0 wall, 0.04 children, min: 0 max: 0) -//| -SF RS LINEARIZE: 0.02 CPU (1000 times, 0.017244 wall, 0.02 children, min: -// 0 max: 0) | -RS LINEARIZE: 0.02 CPU (1000 times, 0.009035 wall, 0.02 -// children, min: 0 max: 0) +//-Total: 0 CPU (0 times, 0 wall, 0.21 children, min: 0 max: 0) +//| -SF RS LINEARIZE: 0.14 CPU +//(10000 times, 0.131202 wall, 0.14 children, min: 0 max: 0) +//| -RS LINEARIZE: 0.06 CPU +//(10000 times, 0.066951 wall, 0.06 children, min: 0 max: 0) /* *************************************************************************/ TEST(SmartProjectionPoseFactorRollingShutter, timing) { using namespace vanillaPose; // Default cameras for simple derivatives static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0)); + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with RS factors Rot3 R = Rot3::identity(); Pose3 pose1 = Pose3(R, Point3(0, 0, 0)); @@ -1102,16 +1346,18 @@ TEST(SmartProjectionPoseFactorRollingShutter, timing) { measurements_lmk1.push_back(cam1.project(landmark1)); measurements_lmk1.push_back(cam2.project(landmark1)); - size_t nrTests = 1000; + size_t nrTests = 10000; for (size_t i = 0; i < nrTests; i++) { - SmartFactorRS::shared_ptr smartFactorRS(new SmartFactorRS(model)); + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(body_P_sensorId, sharedKSimple)); + + SmartFactorRS::shared_ptr smartFactorRS(new SmartFactorRS( + model, cameraRig, params)); double interp_factor = 0; // equivalent to measurement taken at pose 1 - smartFactorRS->add(measurements_lmk1[0], x1, x2, interp_factor, - sharedKSimple, body_P_sensorId); + smartFactorRS->add(measurements_lmk1[0], x1, x2, interp_factor); interp_factor = 1; // equivalent to measurement taken at pose 2 - smartFactorRS->add(measurements_lmk1[1], x1, x2, interp_factor, - sharedKSimple, body_P_sensorId); + smartFactorRS->add(measurements_lmk1[1], x1, x2, interp_factor); Values values; values.insert(x1, pose1); @@ -1122,7 +1368,8 @@ TEST(SmartProjectionPoseFactorRollingShutter, timing) { } for (size_t i = 0; i < nrTests; i++) { - SmartFactor::shared_ptr smartFactor(new SmartFactor(model, sharedKSimple)); + SmartFactor::shared_ptr smartFactor( + new SmartFactor(model, sharedKSimple, params)); smartFactor->add(measurements_lmk1[0], x1); smartFactor->add(measurements_lmk1[1], x2); @@ -1137,6 +1384,105 @@ TEST(SmartProjectionPoseFactorRollingShutter, timing) { } #endif +#include +/* ************************************************************************* */ +// spherical Camera with rolling shutter effect +namespace sphericalCameraRS { +typedef SphericalCamera Camera; +typedef CameraSet Cameras; +typedef SmartProjectionPoseFactorRollingShutter SmartFactorRS_spherical; +Pose3 interp_pose1 = interpolate(level_pose, pose_right, interp_factor1); +Pose3 interp_pose2 = interpolate(pose_right, pose_above, interp_factor2); +Pose3 interp_pose3 = interpolate(pose_above, level_pose, interp_factor3); +static EmptyCal::shared_ptr emptyK; +Camera cam1(interp_pose1, emptyK); +Camera cam2(interp_pose2, emptyK); +Camera cam3(interp_pose3, emptyK); +} // namespace sphericalCameraRS + +/* *************************************************************************/ +TEST(SmartProjectionPoseFactorRollingShutter, + optimization_3poses_sphericalCameras) { + using namespace sphericalCameraRS; + std::vector measurements_lmk1, measurements_lmk2, measurements_lmk3; + + // Project three landmarks into three cameras + projectToMultipleCameras(cam1, cam2, cam3, landmark1, + measurements_lmk1); + projectToMultipleCameras(cam1, cam2, cam3, landmark2, + measurements_lmk2); + projectToMultipleCameras(cam1, cam2, cam3, landmark3, + measurements_lmk3); + + // create inputs + std::vector> key_pairs; + key_pairs.push_back(std::make_pair(x1, x2)); + key_pairs.push_back(std::make_pair(x2, x3)); + key_pairs.push_back(std::make_pair(x3, x1)); + + std::vector interp_factors; + interp_factors.push_back(interp_factor1); + interp_factors.push_back(interp_factor2); + interp_factors.push_back(interp_factor3); + + SmartProjectionParams params( + gtsam::HESSIAN, + gtsam::ZERO_ON_DEGENERACY); // only config that works with RS factors + params.setRankTolerance(0.1); + + boost::shared_ptr cameraRig(new Cameras()); + cameraRig->push_back(Camera(Pose3::identity(), emptyK)); + + SmartFactorRS_spherical::shared_ptr smartFactor1( + new SmartFactorRS_spherical(model, cameraRig, params)); + smartFactor1->add(measurements_lmk1, key_pairs, interp_factors); + + SmartFactorRS_spherical::shared_ptr smartFactor2( + new SmartFactorRS_spherical(model, cameraRig, params)); + smartFactor2->add(measurements_lmk2, key_pairs, interp_factors); + + SmartFactorRS_spherical::shared_ptr smartFactor3( + new SmartFactorRS_spherical(model, cameraRig, params)); + smartFactor3->add(measurements_lmk3, key_pairs, interp_factors); + + const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); + + NonlinearFactorGraph graph; + graph.push_back(smartFactor1); + graph.push_back(smartFactor2); + graph.push_back(smartFactor3); + graph.addPrior(x1, level_pose, noisePrior); + graph.addPrior(x2, pose_right, noisePrior); + + Values groundTruth; + groundTruth.insert(x1, level_pose); + groundTruth.insert(x2, pose_right); + groundTruth.insert(x3, pose_above); + DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9); + + // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), + // Point3(0.5,0.1,0.3)); // noise from regular projection factor test below + Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), + Point3(0.1, 0.1, 0.1)); // smaller noise + Values values; + values.insert(x1, level_pose); + values.insert(x2, pose_right); + // initialize third pose with some noise, we expect it to move back to + // original pose_above + values.insert(x3, pose_above * noise_pose); + EXPECT( // check that the pose is actually noisy + assert_equal(Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656, + -0.0313952598, -0.000986635786, 0.0314107591, + -0.999013364, -0.0313952598), + Point3(0.1, -0.1, 1.9)), + values.at(x3))); + + Values result; + LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); + result = optimizer.optimize(); + EXPECT(assert_equal(pose_above, result.at(x3), 1e-6)); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index d1bfc5740..d3b20e312 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -67,8 +67,10 @@ set(interface_headers ${PROJECT_SOURCE_DIR}/gtsam/basis/basis.i ) +set(GTSAM_PYTHON_TARGET gtsam_py) +set(GTSAM_PYTHON_UNSTABLE_TARGET gtsam_unstable_py) -pybind_wrap(gtsam_py # target +pybind_wrap(${GTSAM_PYTHON_TARGET} # target "${interface_headers}" # interface_headers "gtsam.cpp" # generated_cpp "gtsam" # module_name @@ -80,7 +82,7 @@ pybind_wrap(gtsam_py # target ON # use_boost ) -set_target_properties(gtsam_py PROPERTIES +set_target_properties(${GTSAM_PYTHON_TARGET} PROPERTIES INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" INSTALL_RPATH_USE_LINK_PATH TRUE OUTPUT_NAME "gtsam" @@ -89,18 +91,25 @@ set_target_properties(gtsam_py PROPERTIES RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name ) +# Set the path for the GTSAM python module set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam) -# Symlink all tests .py files to build folder. -create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam" +# Copy all python files to build folder. +copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam" "${GTSAM_MODULE_PATH}") +# Hack to get python test files copied every time they are modified +file(GLOB GTSAM_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/tests/*.py") +foreach(test_file ${GTSAM_PYTHON_TEST_FILES}) + configure_file(${test_file} "${GTSAM_MODULE_PATH}/tests/${test_file}" COPYONLY) +endforeach() + # Common directory for data/datasets stored with the package. # This will store the data in the Python site package directly. file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}") # Add gtsam as a dependency to the install target -set(GTSAM_PYTHON_DEPENDENCIES gtsam_py) +set(GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_TARGET}) if(GTSAM_UNSTABLE_BUILD_PYTHON) @@ -124,7 +133,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) gtsam::CameraSetCal3Fisheye gtsam::KeyPairDoubleMap) - pybind_wrap(gtsam_unstable_py # target + pybind_wrap(${GTSAM_PYTHON_UNSTABLE_TARGET} # target ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header "gtsam_unstable.cpp" # generated_cpp "gtsam_unstable" # module_name @@ -136,7 +145,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) ON # use_boost ) - set_target_properties(gtsam_unstable_py PROPERTIES + set_target_properties(${GTSAM_PYTHON_UNSTABLE_TARGET} PROPERTIES INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" INSTALL_RPATH_USE_LINK_PATH TRUE OUTPUT_NAME "gtsam_unstable" @@ -147,12 +156,18 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON) set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable) - # Symlink all tests .py files to build folder. - create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable" + # Copy all python files to build folder. + copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable" "${GTSAM_UNSTABLE_MODULE_PATH}") + # Hack to get python test files copied every time they are modified + file(GLOB GTSAM_UNSTABLE_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable/tests/*.py") + foreach(test_file ${GTSAM_PYTHON_TEST_FILES}) + configure_file(${test_file} "${GTSAM_UNSTABLE_MODULE_PATH}/tests/${test_file}" COPYONLY) + endforeach() + # Add gtsam_unstable to the install target - list(APPEND GTSAM_PYTHON_DEPENDENCIES gtsam_unstable_py) + list(APPEND GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_UNSTABLE_TARGET}) endif() diff --git a/python/gtsam/__init__.py b/python/gtsam/__init__.py index 70a00c3dc..d00e47b65 100644 --- a/python/gtsam/__init__.py +++ b/python/gtsam/__init__.py @@ -1,6 +1,12 @@ -from . import utils -from .gtsam import * -from .utils import findExampleDataFile +"""Module definition file for GTSAM""" + +# pylint: disable=import-outside-toplevel, global-variable-not-assigned, possibly-unused-variable, import-error, import-self + +import sys + +from gtsam import gtsam, utils +from gtsam.gtsam import * +from gtsam.utils import findExampleDataFile def _init(): @@ -13,7 +19,7 @@ def _init(): def Point2(x=np.nan, y=np.nan): """Shim for the deleted Point2 type.""" if isinstance(x, np.ndarray): - assert x.shape == (2,), "Point2 takes 2-vector" + assert x.shape == (2, ), "Point2 takes 2-vector" return x # "copy constructor" return np.array([x, y], dtype=float) @@ -22,7 +28,7 @@ def _init(): def Point3(x=np.nan, y=np.nan, z=np.nan): """Shim for the deleted Point3 type.""" if isinstance(x, np.ndarray): - assert x.shape == (3,), "Point3 takes 3-vector" + assert x.shape == (3, ), "Point3 takes 3-vector" return x # "copy constructor" return np.array([x, y, z], dtype=float) diff --git a/python/gtsam/examples/CustomFactorExample.py b/python/gtsam/examples/CustomFactorExample.py index c7fe1e202..36c1e003d 100644 --- a/python/gtsam/examples/CustomFactorExample.py +++ b/python/gtsam/examples/CustomFactorExample.py @@ -9,15 +9,17 @@ CustomFactor demo that simulates a 1-D sensor fusion task. Author: Fan Jiang, Frank Dellaert """ +from functools import partial +from typing import List, Optional + import gtsam import numpy as np -from typing import List, Optional -from functools import partial +I = np.eye(1) -def simulate_car(): - # Simulate a car for one second +def simulate_car() -> List[float]: + """Simulate a car for one second""" x0 = 0 dt = 0.25 # 4 Hz, typical GPS v = 144 * 1000 / 3600 # 144 km/hour = 90mph, pretty fast @@ -26,46 +28,9 @@ def simulate_car(): return x -x = simulate_car() -print(f"Simulated car trajectory: {x}") - -# %% -add_noise = True # set this to False to run with "perfect" measurements - -# GPS measurements -sigma_gps = 3.0 # assume GPS is +/- 3m -g = [x[k] + (np.random.normal(scale=sigma_gps) if add_noise else 0) - for k in range(5)] - -# Odometry measurements -sigma_odo = 0.1 # assume Odometry is 10cm accurate at 4Hz -o = [x[k + 1] - x[k] + (np.random.normal(scale=sigma_odo) if add_noise else 0) - for k in range(4)] - -# Landmark measurements: -sigma_lm = 1 # assume landmark measurement is accurate up to 1m - -# Assume first landmark is at x=5, we measure it at time k=0 -lm_0 = 5.0 -z_0 = x[0] - lm_0 + (np.random.normal(scale=sigma_lm) if add_noise else 0) - -# Assume other landmark is at x=28, we measure it at time k=3 -lm_3 = 28.0 -z_3 = x[3] - lm_3 + (np.random.normal(scale=sigma_lm) if add_noise else 0) - -unknown = [gtsam.symbol('x', k) for k in range(5)] - -print("unknowns = ", list(map(gtsam.DefaultKeyFormatter, unknown))) - -# We now can use nonlinear factor graphs -factor_graph = gtsam.NonlinearFactorGraph() - -# Add factors for GPS measurements -I = np.eye(1) -gps_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_gps) - - -def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobians: Optional[List[np.ndarray]]): +def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor, + values: gtsam.Values, + jacobians: Optional[List[np.ndarray]]) -> float: """GPS Factor error function :param measurement: GPS measurement, to be filled with `partial` :param this: gtsam.CustomFactor handle @@ -82,36 +47,9 @@ def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobia return error -# Add the GPS factors -for k in range(5): - gf = gtsam.CustomFactor(gps_model, [unknown[k]], partial(error_gps, np.array([g[k]]))) - factor_graph.add(gf) - -# New Values container -v = gtsam.Values() - -# Add initial estimates to the Values container -for i in range(5): - v.insert(unknown[i], np.array([0.0])) - -# Initialize optimizer -params = gtsam.GaussNewtonParams() -optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params) - -# Optimize the factor graph -result = optimizer.optimize() - -# calculate the error from ground truth -error = np.array([(result.atVector(unknown[k]) - x[k])[0] for k in range(5)]) - -print("Result with only GPS") -print(result, np.round(error, 2), f"\nJ(X)={0.5 * np.sum(np.square(error))}") - -# Adding odometry will improve things a lot -odo_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_odo) - - -def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobians: Optional[List[np.ndarray]]): +def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor, + values: gtsam.Values, + jacobians: Optional[List[np.ndarray]]) -> float: """Odometry Factor error function :param measurement: Odometry measurement, to be filled with `partial` :param this: gtsam.CustomFactor handle @@ -130,25 +68,9 @@ def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobi return error -for k in range(4): - odof = gtsam.CustomFactor(odo_model, [unknown[k], unknown[k + 1]], partial(error_odom, np.array([o[k]]))) - factor_graph.add(odof) - -params = gtsam.GaussNewtonParams() -optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params) - -result = optimizer.optimize() - -error = np.array([(result.atVector(unknown[k]) - x[k])[0] for k in range(5)]) - -print("Result with GPS+Odometry") -print(result, np.round(error, 2), f"\nJ(X)={0.5 * np.sum(np.square(error))}") - -# This is great, but GPS noise is still apparent, so now we add the two landmarks -lm_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_lm) - - -def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobians: Optional[List[np.ndarray]]): +def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor, + values: gtsam.Values, + jacobians: Optional[List[np.ndarray]]) -> float: """Landmark Factor error function :param measurement: Landmark measurement, to be filled with `partial` :param this: gtsam.CustomFactor handle @@ -165,15 +87,120 @@ def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobian return error -factor_graph.add(gtsam.CustomFactor(lm_model, [unknown[0]], partial(error_lm, np.array([lm_0 + z_0])))) -factor_graph.add(gtsam.CustomFactor(lm_model, [unknown[3]], partial(error_lm, np.array([lm_3 + z_3])))) +def main(): + """Main runner.""" -params = gtsam.GaussNewtonParams() -optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params) + x = simulate_car() + print(f"Simulated car trajectory: {x}") -result = optimizer.optimize() + add_noise = True # set this to False to run with "perfect" measurements -error = np.array([(result.atVector(unknown[k]) - x[k])[0] for k in range(5)]) + # GPS measurements + sigma_gps = 3.0 # assume GPS is +/- 3m + g = [ + x[k] + (np.random.normal(scale=sigma_gps) if add_noise else 0) + for k in range(5) + ] -print("Result with GPS+Odometry+Landmark") -print(result, np.round(error, 2), f"\nJ(X)={0.5 * np.sum(np.square(error))}") + # Odometry measurements + sigma_odo = 0.1 # assume Odometry is 10cm accurate at 4Hz + o = [ + x[k + 1] - x[k] + + (np.random.normal(scale=sigma_odo) if add_noise else 0) + for k in range(4) + ] + + # Landmark measurements: + sigma_lm = 1 # assume landmark measurement is accurate up to 1m + + # Assume first landmark is at x=5, we measure it at time k=0 + lm_0 = 5.0 + z_0 = x[0] - lm_0 + (np.random.normal(scale=sigma_lm) if add_noise else 0) + + # Assume other landmark is at x=28, we measure it at time k=3 + lm_3 = 28.0 + z_3 = x[3] - lm_3 + (np.random.normal(scale=sigma_lm) if add_noise else 0) + + unknown = [gtsam.symbol('x', k) for k in range(5)] + + print("unknowns = ", list(map(gtsam.DefaultKeyFormatter, unknown))) + + # We now can use nonlinear factor graphs + factor_graph = gtsam.NonlinearFactorGraph() + + # Add factors for GPS measurements + gps_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_gps) + + # Add the GPS factors + for k in range(5): + gf = gtsam.CustomFactor(gps_model, [unknown[k]], + partial(error_gps, np.array([g[k]]))) + factor_graph.add(gf) + + # New Values container + v = gtsam.Values() + + # Add initial estimates to the Values container + for i in range(5): + v.insert(unknown[i], np.array([0.0])) + + # Initialize optimizer + params = gtsam.GaussNewtonParams() + optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params) + + # Optimize the factor graph + result = optimizer.optimize() + + # calculate the error from ground truth + error = np.array([(result.atVector(unknown[k]) - x[k])[0] + for k in range(5)]) + + print("Result with only GPS") + print(result, np.round(error, 2), + f"\nJ(X)={0.5 * np.sum(np.square(error))}") + + # Adding odometry will improve things a lot + odo_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_odo) + + for k in range(4): + odof = gtsam.CustomFactor(odo_model, [unknown[k], unknown[k + 1]], + partial(error_odom, np.array([o[k]]))) + factor_graph.add(odof) + + params = gtsam.GaussNewtonParams() + optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params) + + result = optimizer.optimize() + + error = np.array([(result.atVector(unknown[k]) - x[k])[0] + for k in range(5)]) + + print("Result with GPS+Odometry") + print(result, np.round(error, 2), + f"\nJ(X)={0.5 * np.sum(np.square(error))}") + + # This is great, but GPS noise is still apparent, so now we add the two landmarks + lm_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_lm) + + factor_graph.add( + gtsam.CustomFactor(lm_model, [unknown[0]], + partial(error_lm, np.array([lm_0 + z_0])))) + factor_graph.add( + gtsam.CustomFactor(lm_model, [unknown[3]], + partial(error_lm, np.array([lm_3 + z_3])))) + + params = gtsam.GaussNewtonParams() + optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params) + + result = optimizer.optimize() + + error = np.array([(result.atVector(unknown[k]) - x[k])[0] + for k in range(5)]) + + print("Result with GPS+Odometry+Landmark") + print(result, np.round(error, 2), + f"\nJ(X)={0.5 * np.sum(np.square(error))}") + + +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/GPSFactorExample.py b/python/gtsam/examples/GPSFactorExample.py index 0bc0d1bf3..8eb663cb4 100644 --- a/python/gtsam/examples/GPSFactorExample.py +++ b/python/gtsam/examples/GPSFactorExample.py @@ -13,13 +13,8 @@ Author: Mandy Xie from __future__ import print_function -import numpy as np - import gtsam -import matplotlib.pyplot as plt -import gtsam.utils.plot as gtsam_plot - # ENU Origin is where the plane was in hold next to runway lat0 = 33.86998 lon0 = -84.30626 @@ -29,28 +24,34 @@ h0 = 274 GPS_NOISE = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) PRIOR_NOISE = gtsam.noiseModel.Isotropic.Sigma(6, 0.25) -# Create an empty nonlinear factor graph -graph = gtsam.NonlinearFactorGraph() -# Add a prior on the first point, setting it to the origin -# A prior factor consists of a mean and a noise model (covariance matrix) -priorMean = gtsam.Pose3() # prior at origin -graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE)) +def main(): + """Main runner.""" + # Create an empty nonlinear factor graph + graph = gtsam.NonlinearFactorGraph() -# Add GPS factors -gps = gtsam.Point3(lat0, lon0, h0) -graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE)) -print("\nFactor Graph:\n{}".format(graph)) + # Add a prior on the first point, setting it to the origin + # A prior factor consists of a mean and a noise model (covariance matrix) + priorMean = gtsam.Pose3() # prior at origin + graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE)) -# Create the data structure to hold the initialEstimate estimate to the solution -# For illustrative purposes, these have been deliberately set to incorrect values -initial = gtsam.Values() -initial.insert(1, gtsam.Pose3()) -print("\nInitial Estimate:\n{}".format(initial)) + # Add GPS factors + gps = gtsam.Point3(lat0, lon0, h0) + graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE)) + print("\nFactor Graph:\n{}".format(graph)) -# optimize using Levenberg-Marquardt optimization -params = gtsam.LevenbergMarquardtParams() -optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) -result = optimizer.optimize() -print("\nFinal Result:\n{}".format(result)) + # Create the data structure to hold the initialEstimate estimate to the solution + # For illustrative purposes, these have been deliberately set to incorrect values + initial = gtsam.Values() + initial.insert(1, gtsam.Pose3()) + print("\nInitial Estimate:\n{}".format(initial)) + # optimize using Levenberg-Marquardt optimization + params = gtsam.LevenbergMarquardtParams() + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) + result = optimizer.optimize() + print("\nFinal Result:\n{}".format(result)) + + +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/IMUKittiExampleGPS.py b/python/gtsam/examples/IMUKittiExampleGPS.py new file mode 100644 index 000000000..8b6b4fdf0 --- /dev/null +++ b/python/gtsam/examples/IMUKittiExampleGPS.py @@ -0,0 +1,366 @@ +""" +Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE + +Author: Varun Agrawal +""" +import argparse +from typing import List, Tuple + +import gtsam +import numpy as np +from gtsam import ISAM2, Pose3, noiseModel +from gtsam.symbol_shorthand import B, V, X + +GRAVITY = 9.8 + + +class KittiCalibration: + """Class to hold KITTI calibration info.""" + def __init__(self, body_ptx: float, body_pty: float, body_ptz: float, + body_prx: float, body_pry: float, body_prz: float, + accelerometer_sigma: float, gyroscope_sigma: float, + integration_sigma: float, accelerometer_bias_sigma: float, + gyroscope_bias_sigma: float, average_delta_t: float): + self.bodyTimu = Pose3(gtsam.Rot3.RzRyRx(body_prx, body_pry, body_prz), + gtsam.Point3(body_ptx, body_pty, body_ptz)) + self.accelerometer_sigma = accelerometer_sigma + self.gyroscope_sigma = gyroscope_sigma + self.integration_sigma = integration_sigma + self.accelerometer_bias_sigma = accelerometer_bias_sigma + self.gyroscope_bias_sigma = gyroscope_bias_sigma + self.average_delta_t = average_delta_t + + +class ImuMeasurement: + """An instance of an IMU measurement.""" + def __init__(self, time: float, dt: float, accelerometer: gtsam.Point3, + gyroscope: gtsam.Point3): + self.time = time + self.dt = dt + self.accelerometer = accelerometer + self.gyroscope = gyroscope + + +class GpsMeasurement: + """An instance of a GPS measurement.""" + def __init__(self, time: float, position: gtsam.Point3): + self.time = time + self.position = position + + +def loadImuData(imu_data_file: str) -> List[ImuMeasurement]: + """Helper to load the IMU data.""" + # Read IMU data + # Time dt accelX accelY accelZ omegaX omegaY omegaZ + imu_data_file = gtsam.findExampleDataFile(imu_data_file) + imu_measurements = [] + + print("-- Reading IMU measurements from file") + with open(imu_data_file, encoding='UTF-8') as imu_data: + data = imu_data.readlines() + for i in range(1, len(data)): # ignore the first line + time, dt, acc_x, acc_y, acc_z, gyro_x, gyro_y, gyro_z = map( + float, data[i].split(' ')) + imu_measurement = ImuMeasurement( + time, dt, np.asarray([acc_x, acc_y, acc_z]), + np.asarray([gyro_x, gyro_y, gyro_z])) + imu_measurements.append(imu_measurement) + + return imu_measurements + + +def loadGpsData(gps_data_file: str) -> List[GpsMeasurement]: + """Helper to load the GPS data.""" + # Read GPS data + # Time,X,Y,Z + gps_data_file = gtsam.findExampleDataFile(gps_data_file) + gps_measurements = [] + + print("-- Reading GPS measurements from file") + with open(gps_data_file, encoding='UTF-8') as gps_data: + data = gps_data.readlines() + for i in range(1, len(data)): + time, x, y, z = map(float, data[i].split(',')) + gps_measurement = GpsMeasurement(time, np.asarray([x, y, z])) + gps_measurements.append(gps_measurement) + + return gps_measurements + + +def loadKittiData( + imu_data_file: str = "KittiEquivBiasedImu.txt", + gps_data_file: str = "KittiGps_converted.txt", + imu_metadata_file: str = "KittiEquivBiasedImu_metadata.txt" +) -> Tuple[KittiCalibration, List[ImuMeasurement], List[GpsMeasurement]]: + """ + Load the KITTI Dataset. + """ + # Read IMU metadata and compute relative sensor pose transforms + # BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma + # GyroscopeSigma IntegrationSigma AccelerometerBiasSigma GyroscopeBiasSigma + # AverageDeltaT + imu_metadata_file = gtsam.findExampleDataFile(imu_metadata_file) + with open(imu_metadata_file, encoding='UTF-8') as imu_metadata: + print("-- Reading sensor metadata") + line = imu_metadata.readline() # Ignore the first line + line = imu_metadata.readline().strip() + data = list(map(float, line.split(' '))) + kitti_calibration = KittiCalibration(*data) + print("IMU metadata:", data) + + imu_measurements = loadImuData(imu_data_file) + gps_measurements = loadGpsData(gps_data_file) + + return kitti_calibration, imu_measurements, gps_measurements + + +def getImuParams(kitti_calibration: KittiCalibration): + """Get the IMU parameters from the KITTI calibration data.""" + w_coriolis = np.zeros(3) + + # Set IMU preintegration parameters + measured_acc_cov = np.eye(3) * np.power( + kitti_calibration.accelerometer_sigma, 2) + measured_omega_cov = np.eye(3) * np.power( + kitti_calibration.gyroscope_sigma, 2) + # error committed in integrating position from velocities + integration_error_cov = np.eye(3) * np.power( + kitti_calibration.integration_sigma, 2) + + imu_params = gtsam.PreintegrationParams.MakeSharedU(GRAVITY) + # acc white noise in continuous + imu_params.setAccelerometerCovariance(measured_acc_cov) + # integration uncertainty continuous + imu_params.setIntegrationCovariance(integration_error_cov) + # gyro white noise in continuous + imu_params.setGyroscopeCovariance(measured_omega_cov) + imu_params.setOmegaCoriolis(w_coriolis) + + return imu_params + + +def save_results(isam: gtsam.ISAM2, output_filename: str, first_gps_pose: int, + gps_measurements: List[GpsMeasurement]): + """Write the results from `isam` to `output_filename`.""" + # Save results to file + print("Writing results to file...") + with open(output_filename, 'w', encoding='UTF-8') as fp_out: + fp_out.write( + "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n") + + result = isam.calculateEstimate() + for i in range(first_gps_pose, len(gps_measurements)): + pose_key = X(i) + vel_key = V(i) + bias_key = B(i) + + pose = result.atPose3(pose_key) + velocity = result.atVector(vel_key) + bias = result.atConstantBias(bias_key) + + pose_quat = pose.rotation().toQuaternion() + gps = gps_measurements[i].position + + print(f"State at #{i}") + print(f"Pose:\n{pose}") + print(f"Velocity:\n{velocity}") + print(f"Bias:\n{bias}") + + fp_out.write("{},{},{},{},{},{},{},{},{},{},{}\n".format( + gps_measurements[i].time, pose.x(), pose.y(), pose.z(), + pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), + gps[0], gps[1], gps[2])) + + +def parse_args() -> argparse.Namespace: + """Parse command line arguments.""" + parser = argparse.ArgumentParser() + parser.add_argument("--output_filename", + default="IMUKittiExampleGPSResults.csv") + return parser.parse_args() + + +def optimize(gps_measurements: List[GpsMeasurement], + imu_measurements: List[ImuMeasurement], + sigma_init_x: gtsam.noiseModel.Diagonal, + sigma_init_v: gtsam.noiseModel.Diagonal, + sigma_init_b: gtsam.noiseModel.Diagonal, + noise_model_gps: gtsam.noiseModel.Diagonal, + kitti_calibration: KittiCalibration, first_gps_pose: int, + gps_skip: int) -> gtsam.ISAM2: + """Run ISAM2 optimization on the measurements.""" + # Set initial conditions for the estimated trajectory + # initial pose is the reference frame (navigation frame) + current_pose_global = Pose3(gtsam.Rot3(), + gps_measurements[first_gps_pose].position) + + # the vehicle is stationary at the beginning at position 0,0,0 + current_velocity_global = np.zeros(3) + current_bias = gtsam.imuBias.ConstantBias() # init with zero bias + + imu_params = getImuParams(kitti_calibration) + + # Set ISAM2 parameters and create ISAM2 solver object + isam_params = gtsam.ISAM2Params() + isam_params.setFactorization("CHOLESKY") + isam_params.setRelinearizeSkip(10) + + isam = gtsam.ISAM2(isam_params) + + # Create the factor graph and values object that will store new factors and + # values to add to the incremental graph + new_factors = gtsam.NonlinearFactorGraph() + # values storing the initial estimates of new nodes in the factor graph + new_values = gtsam.Values() + + # Main loop: + # (1) we read the measurements + # (2) we create the corresponding factors in the graph + # (3) we solve the graph to obtain and optimal estimate of robot trajectory + print("-- Starting main loop: inference is performed at each time step, " + "but we plot trajectory every 10 steps") + + j = 0 + included_imu_measurement_count = 0 + + for i in range(first_gps_pose, len(gps_measurements)): + # At each non=IMU measurement we initialize a new node in the graph + current_pose_key = X(i) + current_vel_key = V(i) + current_bias_key = B(i) + t = gps_measurements[i].time + + if i == first_gps_pose: + # Create initial estimate and prior on initial pose, velocity, and biases + new_values.insert(current_pose_key, current_pose_global) + new_values.insert(current_vel_key, current_velocity_global) + new_values.insert(current_bias_key, current_bias) + + new_factors.addPriorPose3(current_pose_key, current_pose_global, + sigma_init_x) + new_factors.addPriorVector(current_vel_key, + current_velocity_global, sigma_init_v) + new_factors.addPriorConstantBias(current_bias_key, current_bias, + sigma_init_b) + else: + t_previous = gps_measurements[i - 1].time + + # Summarize IMU data between the previous GPS measurement and now + current_summarized_measurement = gtsam.PreintegratedImuMeasurements( + imu_params, current_bias) + + while (j < len(imu_measurements) + and imu_measurements[j].time <= t): + if imu_measurements[j].time >= t_previous: + current_summarized_measurement.integrateMeasurement( + imu_measurements[j].accelerometer, + imu_measurements[j].gyroscope, imu_measurements[j].dt) + included_imu_measurement_count += 1 + j += 1 + + # Create IMU factor + previous_pose_key = X(i - 1) + previous_vel_key = V(i - 1) + previous_bias_key = B(i - 1) + + new_factors.push_back( + gtsam.ImuFactor(previous_pose_key, previous_vel_key, + current_pose_key, current_vel_key, + previous_bias_key, + current_summarized_measurement)) + + # Bias evolution as given in the IMU metadata + sigma_between_b = gtsam.noiseModel.Diagonal.Sigmas( + np.asarray([ + np.sqrt(included_imu_measurement_count) * + kitti_calibration.accelerometer_bias_sigma + ] * 3 + [ + np.sqrt(included_imu_measurement_count) * + kitti_calibration.gyroscope_bias_sigma + ] * 3)) + + new_factors.push_back( + gtsam.BetweenFactorConstantBias(previous_bias_key, + current_bias_key, + gtsam.imuBias.ConstantBias(), + sigma_between_b)) + + # Create GPS factor + gps_pose = Pose3(current_pose_global.rotation(), + gps_measurements[i].position) + if (i % gps_skip) == 0: + new_factors.addPriorPose3(current_pose_key, gps_pose, + noise_model_gps) + new_values.insert(current_pose_key, gps_pose) + + print(f"############ POSE INCLUDED AT TIME {t} ############") + print(gps_pose.translation(), "\n") + else: + new_values.insert(current_pose_key, current_pose_global) + + # Add initial values for velocity and bias based on the previous + # estimates + new_values.insert(current_vel_key, current_velocity_global) + new_values.insert(current_bias_key, current_bias) + + # Update solver + # ======================================================================= + # We accumulate 2*GPSskip GPS measurements before updating the solver at + # first so that the heading becomes observable. + if i > (first_gps_pose + 2 * gps_skip): + print(f"############ NEW FACTORS AT TIME {t:.6f} ############") + new_factors.print() + + isam.update(new_factors, new_values) + + # Reset the newFactors and newValues list + new_factors.resize(0) + new_values.clear() + + # Extract the result/current estimates + result = isam.calculateEstimate() + + current_pose_global = result.atPose3(current_pose_key) + current_velocity_global = result.atVector(current_vel_key) + current_bias = result.atConstantBias(current_bias_key) + + print(f"############ POSE AT TIME {t} ############") + current_pose_global.print() + print("\n") + + return isam + + +def main(): + """Main runner.""" + args = parse_args() + kitti_calibration, imu_measurements, gps_measurements = loadKittiData() + + if not kitti_calibration.bodyTimu.equals(Pose3(), 1e-8): + raise ValueError( + "Currently only support IMUinBody is identity, i.e. IMU and body frame are the same" + ) + + # Configure different variables + first_gps_pose = 1 + gps_skip = 10 + + # Configure noise models + noise_model_gps = noiseModel.Diagonal.Precisions( + np.asarray([0, 0, 0] + [1.0 / 0.07] * 3)) + + sigma_init_x = noiseModel.Diagonal.Precisions( + np.asarray([0, 0, 0, 1, 1, 1])) + sigma_init_v = noiseModel.Diagonal.Sigmas(np.ones(3) * 1000.0) + sigma_init_b = noiseModel.Diagonal.Sigmas( + np.asarray([0.1] * 3 + [5.00e-05] * 3)) + + isam = optimize(gps_measurements, imu_measurements, sigma_init_x, + sigma_init_v, sigma_init_b, noise_model_gps, + kitti_calibration, first_gps_pose, gps_skip) + + save_results(isam, args.output_filename, first_gps_pose, gps_measurements) + + +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/ImuFactorExample.py b/python/gtsam/examples/ImuFactorExample.py index 86613234d..c86a4e216 100644 --- a/python/gtsam/examples/ImuFactorExample.py +++ b/python/gtsam/examples/ImuFactorExample.py @@ -10,31 +10,51 @@ A script validating and demonstrating the ImuFactor inference. Author: Frank Dellaert, Varun Agrawal """ -# pylint: disable=no-name-in-module,unused-import,arguments-differ +# pylint: disable=no-name-in-module,unused-import,arguments-differ,import-error,wrong-import-order from __future__ import print_function import argparse import math +import gtsam import matplotlib.pyplot as plt import numpy as np -from mpl_toolkits.mplot3d import Axes3D - -import gtsam from gtsam.symbol_shorthand import B, V, X from gtsam.utils.plot import plot_pose3 +from mpl_toolkits.mplot3d import Axes3D from PreintegrationExample import POSES_FIG, PreintegrationExample BIAS_KEY = B(0) +GRAVITY = 9.81 np.set_printoptions(precision=3, suppress=True) +def parse_args() -> argparse.Namespace: + """Parse command line arguments.""" + parser = argparse.ArgumentParser("ImuFactorExample.py") + parser.add_argument("--twist_scenario", + default="sick_twist", + choices=("zero_twist", "forward_twist", "loop_twist", + "sick_twist")) + parser.add_argument("--time", + "-T", + default=12, + type=int, + help="Total navigation time in seconds") + parser.add_argument("--compute_covariances", + default=False, + action='store_true') + parser.add_argument("--verbose", default=False, action='store_true') + args = parser.parse_args() + return args + + class ImuFactorExample(PreintegrationExample): """Class to run example of the Imu Factor.""" - def __init__(self, twist_scenario="sick_twist"): + def __init__(self, twist_scenario: str = "sick_twist"): self.velocity = np.array([2, 0, 0]) self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) @@ -51,19 +71,30 @@ class ImuFactorExample(PreintegrationExample): gyroBias = np.array([0.1, 0.3, -0.1]) bias = gtsam.imuBias.ConstantBias(accBias, gyroBias) + params = gtsam.PreintegrationParams.MakeSharedU(GRAVITY) + + # Some arbitrary noise sigmas + gyro_sigma = 1e-3 + accel_sigma = 1e-3 + I_3x3 = np.eye(3) + params.setGyroscopeCovariance(gyro_sigma**2 * I_3x3) + params.setAccelerometerCovariance(accel_sigma**2 * I_3x3) + params.setIntegrationCovariance(1e-7**2 * I_3x3) + dt = 1e-2 super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario], - bias, dt) + bias, params, dt) - def addPrior(self, i, graph): - """Add priors at time step `i`.""" + def addPrior(self, i: int, graph: gtsam.NonlinearFactorGraph): + """Add a prior on the navigation state at time `i`.""" state = self.scenario.navState(i) graph.push_back( gtsam.PriorFactorPose3(X(i), state.pose(), self.priorNoise)) graph.push_back( gtsam.PriorFactorVector(V(i), state.velocity(), self.velNoise)) - def optimize(self, graph, initial): + def optimize(self, graph: gtsam.NonlinearFactorGraph, + initial: gtsam.Values): """Optimize using Levenberg-Marquardt optimization.""" params = gtsam.LevenbergMarquardtParams() params.setVerbosityLM("SUMMARY") @@ -71,24 +102,49 @@ class ImuFactorExample(PreintegrationExample): result = optimizer.optimize() return result - def plot(self, result): - """Plot resulting poses.""" + def plot(self, + values: gtsam.Values, + title: str = "Estimated Trajectory", + fignum: int = POSES_FIG + 1, + show: bool = False): + """ + Plot poses in values. + + Args: + values: The values object with the poses to plot. + title: The title of the plot. + fignum: The matplotlib figure number. + POSES_FIG is a value from the PreintegrationExample which we simply increment to generate a new figure. + show: Flag indicating whether to display the figure. + """ i = 0 - while result.exists(X(i)): - pose_i = result.atPose3(X(i)) - plot_pose3(POSES_FIG + 1, pose_i, 1) + while values.exists(X(i)): + pose_i = values.atPose3(X(i)) + plot_pose3(fignum, pose_i, 1) i += 1 - plt.title("Estimated Trajectory") + plt.title(title) - gtsam.utils.plot.set_axes_equal(POSES_FIG + 1) + gtsam.utils.plot.set_axes_equal(fignum) - print("Bias Values", result.atConstantBias(BIAS_KEY)) + print("Bias Values", values.atConstantBias(BIAS_KEY)) plt.ioff() - plt.show() - def run(self, T=12, compute_covariances=False, verbose=True): - """Main runner.""" + if show: + plt.show() + + def run(self, + T: int = 12, + compute_covariances: bool = False, + verbose: bool = True): + """ + Main runner. + + Args: + T: Total trajectory time. + compute_covariances: Flag indicating whether to compute marginal covariances. + verbose: Flag indicating if printing should be verbose. + """ graph = gtsam.NonlinearFactorGraph() # initialize data structure for pre-integrated IMU measurements @@ -173,25 +229,11 @@ class ImuFactorExample(PreintegrationExample): print("Covariance on vel {}:\n{}\n".format( i, marginals.marginalCovariance(V(i)))) - self.plot(result) + self.plot(result, show=True) if __name__ == '__main__': - parser = argparse.ArgumentParser("ImuFactorExample.py") - parser.add_argument("--twist_scenario", - default="sick_twist", - choices=("zero_twist", "forward_twist", "loop_twist", - "sick_twist")) - parser.add_argument("--time", - "-T", - default=12, - type=int, - help="Total time in seconds") - parser.add_argument("--compute_covariances", - default=False, - action='store_true') - parser.add_argument("--verbose", default=False, action='store_true') - args = parser.parse_args() + args = parse_args() ImuFactorExample(args.twist_scenario).run(args.time, args.compute_covariances, diff --git a/python/gtsam/examples/OdometryExample.py b/python/gtsam/examples/OdometryExample.py index 8b519ce9a..210aeb808 100644 --- a/python/gtsam/examples/OdometryExample.py +++ b/python/gtsam/examples/OdometryExample.py @@ -13,57 +13,60 @@ Author: Frank Dellaert from __future__ import print_function -import numpy as np - import gtsam - -import matplotlib.pyplot as plt import gtsam.utils.plot as gtsam_plot +import matplotlib.pyplot as plt +import numpy as np # Create noise models ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) -# Create an empty nonlinear factor graph -graph = gtsam.NonlinearFactorGraph() -# Add a prior on the first pose, setting it to the origin -# A prior factor consists of a mean and a noise model (covariance matrix) -priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin -graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE)) +def main(): + """Main runner""" + # Create an empty nonlinear factor graph + graph = gtsam.NonlinearFactorGraph() -# Add odometry factors -odometry = gtsam.Pose2(2.0, 0.0, 0.0) -# For simplicity, we will use the same noise model for each odometry factor -# Create odometry (Between) factors between consecutive poses -graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, ODOMETRY_NOISE)) -graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, ODOMETRY_NOISE)) -print("\nFactor Graph:\n{}".format(graph)) + # Add a prior on the first pose, setting it to the origin + # A prior factor consists of a mean and a noise model (covariance matrix) + priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin + graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE)) -# Create the data structure to hold the initialEstimate estimate to the solution -# For illustrative purposes, these have been deliberately set to incorrect values -initial = gtsam.Values() -initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) -initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) -initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1)) -print("\nInitial Estimate:\n{}".format(initial)) + # Add odometry factors + odometry = gtsam.Pose2(2.0, 0.0, 0.0) + # For simplicity, we will use the same noise model for each odometry factor + # Create odometry (Between) factors between consecutive poses + graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, ODOMETRY_NOISE)) + graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, ODOMETRY_NOISE)) + print("\nFactor Graph:\n{}".format(graph)) -# optimize using Levenberg-Marquardt optimization -params = gtsam.LevenbergMarquardtParams() -optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) -result = optimizer.optimize() -print("\nFinal Result:\n{}".format(result)) + # Create the data structure to hold the initialEstimate estimate to the solution + # For illustrative purposes, these have been deliberately set to incorrect values + initial = gtsam.Values() + initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) + initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) + initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1)) + print("\nInitial Estimate:\n{}".format(initial)) -# 5. Calculate and print marginal covariances for all variables -marginals = gtsam.Marginals(graph, result) -for i in range(1, 4): - print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i))) - -fig = plt.figure(0) -for i in range(1, 4): - gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i)) -plt.axis('equal') -plt.show() + # optimize using Levenberg-Marquardt optimization + params = gtsam.LevenbergMarquardtParams() + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) + result = optimizer.optimize() + print("\nFinal Result:\n{}".format(result)) + # 5. Calculate and print marginal covariances for all variables + marginals = gtsam.Marginals(graph, result) + for i in range(1, 4): + print("X{} covariance:\n{}\n".format(i, + marginals.marginalCovariance(i))) + + for i in range(1, 4): + gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, + marginals.marginalCovariance(i)) + plt.axis('equal') + plt.show() +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/PlanarSLAMExample.py b/python/gtsam/examples/PlanarSLAMExample.py index 5ffdf048d..d2ee92c95 100644 --- a/python/gtsam/examples/PlanarSLAMExample.py +++ b/python/gtsam/examples/PlanarSLAMExample.py @@ -13,69 +13,85 @@ Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python) from __future__ import print_function -import numpy as np - import gtsam -from gtsam.symbol_shorthand import X, L +import numpy as np +from gtsam.symbol_shorthand import L, X # Create noise models PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2])) -# Create an empty nonlinear factor graph -graph = gtsam.NonlinearFactorGraph() -# Create the keys corresponding to unknown variables in the factor graph -X1 = X(1) -X2 = X(2) -X3 = X(3) -L1 = L(4) -L2 = L(5) +def main(): + """Main runner""" -# Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model -graph.add(gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE)) + # Create an empty nonlinear factor graph + graph = gtsam.NonlinearFactorGraph() -# Add odometry factors between X1,X2 and X2,X3, respectively -graph.add(gtsam.BetweenFactorPose2( - X1, X2, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE)) -graph.add(gtsam.BetweenFactorPose2( - X2, X3, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE)) + # Create the keys corresponding to unknown variables in the factor graph + X1 = X(1) + X2 = X(2) + X3 = X(3) + L1 = L(4) + L2 = L(5) -# Add Range-Bearing measurements to two different landmarks L1 and L2 -graph.add(gtsam.BearingRangeFactor2D( - X1, L1, gtsam.Rot2.fromDegrees(45), np.sqrt(4.0+4.0), MEASUREMENT_NOISE)) -graph.add(gtsam.BearingRangeFactor2D( - X2, L1, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE)) -graph.add(gtsam.BearingRangeFactor2D( - X3, L2, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE)) + # Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model + graph.add( + gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE)) -# Print graph -print("Factor Graph:\n{}".format(graph)) + # Add odometry factors between X1,X2 and X2,X3, respectively + graph.add( + gtsam.BetweenFactorPose2(X1, X2, gtsam.Pose2(2.0, 0.0, 0.0), + ODOMETRY_NOISE)) + graph.add( + gtsam.BetweenFactorPose2(X2, X3, gtsam.Pose2(2.0, 0.0, 0.0), + ODOMETRY_NOISE)) -# Create (deliberately inaccurate) initial estimate -initial_estimate = gtsam.Values() -initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15)) -initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20)) -initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10)) -initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10)) -initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80)) + # Add Range-Bearing measurements to two different landmarks L1 and L2 + graph.add( + gtsam.BearingRangeFactor2D(X1, L1, gtsam.Rot2.fromDegrees(45), + np.sqrt(4.0 + 4.0), MEASUREMENT_NOISE)) + graph.add( + gtsam.BearingRangeFactor2D(X2, L1, gtsam.Rot2.fromDegrees(90), 2.0, + MEASUREMENT_NOISE)) + graph.add( + gtsam.BearingRangeFactor2D(X3, L2, gtsam.Rot2.fromDegrees(90), 2.0, + MEASUREMENT_NOISE)) -# Print -print("Initial Estimate:\n{}".format(initial_estimate)) + # Print graph + print("Factor Graph:\n{}".format(graph)) -# Optimize using Levenberg-Marquardt optimization. The optimizer -# accepts an optional set of configuration parameters, controlling -# things like convergence criteria, the type of linear system solver -# to use, and the amount of information displayed during optimization. -# Here we will use the default set of parameters. See the -# documentation for the full set of parameters. -params = gtsam.LevenbergMarquardtParams() -optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params) -result = optimizer.optimize() -print("\nFinal Result:\n{}".format(result)) + # Create (deliberately inaccurate) initial estimate + initial_estimate = gtsam.Values() + initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15)) + initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20)) + initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10)) + initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10)) + initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80)) -# Calculate and print marginal covariances for all variables -marginals = gtsam.Marginals(graph, result) -for (key, str) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"), (L2, "L2")]: - print("{} covariance:\n{}\n".format(str, marginals.marginalCovariance(key))) + # Print + print("Initial Estimate:\n{}".format(initial_estimate)) + + # Optimize using Levenberg-Marquardt optimization. The optimizer + # accepts an optional set of configuration parameters, controlling + # things like convergence criteria, the type of linear system solver + # to use, and the amount of information displayed during optimization. + # Here we will use the default set of parameters. See the + # documentation for the full set of parameters. + params = gtsam.LevenbergMarquardtParams() + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, + params) + result = optimizer.optimize() + print("\nFinal Result:\n{}".format(result)) + + # Calculate and print marginal covariances for all variables + marginals = gtsam.Marginals(graph, result) + for (key, s) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"), + (L2, "L2")]: + print("{} covariance:\n{}\n".format(s, + marginals.marginalCovariance(key))) + + +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/Pose2ISAM2Example.py b/python/gtsam/examples/Pose2ISAM2Example.py new file mode 100644 index 000000000..c70dbfa6f --- /dev/null +++ b/python/gtsam/examples/Pose2ISAM2Example.py @@ -0,0 +1,178 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Pose SLAM example using iSAM2 in the 2D plane. +Author: Jerred Chen, Yusuf Ali +Modeled after: + - VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python) + - Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python) +""" + +import math + +import matplotlib.pyplot as plt +import numpy as np + +import gtsam +import gtsam.utils.plot as gtsam_plot + +def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values, + key: int): + """Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2.""" + + # Print the current estimates computed using iSAM2. + print("*"*50 + f"\nInference after State {key+1}:\n") + print(current_estimate) + + # Compute the marginals for all states in the graph. + marginals = gtsam.Marginals(graph, current_estimate) + + # Plot the newly updated iSAM2 inference. + fig = plt.figure(0) + axes = fig.gca() + plt.cla() + + i = 1 + while current_estimate.exists(i): + gtsam_plot.plot_pose2(0, current_estimate.atPose2(i), 0.5, marginals.marginalCovariance(i)) + i += 1 + + plt.axis('equal') + axes.set_xlim(-1, 5) + axes.set_ylim(-1, 3) + plt.pause(1) + +def determine_loop_closure(odom: np.ndarray, current_estimate: gtsam.Values, + key: int, xy_tol=0.6, theta_tol=17) -> int: + """Simple brute force approach which iterates through previous states + and checks for loop closure. + + Args: + odom: Vector representing noisy odometry (x, y, theta) measurement in the body frame. + current_estimate: The current estimates computed by iSAM2. + key: Key corresponding to the current state estimate of the robot. + xy_tol: Optional argument for the x-y measurement tolerance, in meters. + theta_tol: Optional argument for the theta measurement tolerance, in degrees. + Returns: + k: The key of the state which is helping add the loop closure constraint. + If loop closure is not found, then None is returned. + """ + if current_estimate: + prev_est = current_estimate.atPose2(key+1) + rotated_odom = prev_est.rotation().matrix() @ odom[:2] + curr_xy = np.array([prev_est.x() + rotated_odom[0], + prev_est.y() + rotated_odom[1]]) + curr_theta = prev_est.theta() + odom[2] + for k in range(1, key+1): + pose_xy = np.array([current_estimate.atPose2(k).x(), + current_estimate.atPose2(k).y()]) + pose_theta = current_estimate.atPose2(k).theta() + if (abs(pose_xy - curr_xy) <= xy_tol).all() and \ + (abs(pose_theta - curr_theta) <= theta_tol*np.pi/180): + return k + +def Pose2SLAM_ISAM2_example(): + """Perform 2D SLAM given the ground truth changes in pose as well as + simple loop closure detection.""" + plt.ion() + + # Declare the 2D translational standard deviations of the prior factor's Gaussian model, in meters. + prior_xy_sigma = 0.3 + + # Declare the 2D rotational standard deviation of the prior factor's Gaussian model, in degrees. + prior_theta_sigma = 5 + + # Declare the 2D translational standard deviations of the odometry factor's Gaussian model, in meters. + odometry_xy_sigma = 0.2 + + # Declare the 2D rotational standard deviation of the odometry factor's Gaussian model, in degrees. + odometry_theta_sigma = 5 + + # Although this example only uses linear measurements and Gaussian noise models, it is important + # to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example + # simply showcases how iSAM2 may be applied to a Pose2 SLAM problem. + PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_xy_sigma, + prior_xy_sigma, + prior_theta_sigma*np.pi/180])) + ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_xy_sigma, + odometry_xy_sigma, + odometry_theta_sigma*np.pi/180])) + + # Create a Nonlinear factor graph as well as the data structure to hold state estimates. + graph = gtsam.NonlinearFactorGraph() + initial_estimate = gtsam.Values() + + # Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many + # update calls are required to perform the relinearization. + parameters = gtsam.ISAM2Params() + parameters.setRelinearizeThreshold(0.1) + parameters.setRelinearizeSkip(1) + isam = gtsam.ISAM2(parameters) + + # Create the ground truth odometry measurements of the robot during the trajectory. + true_odometry = [(2, 0, 0), + (2, 0, math.pi/2), + (2, 0, math.pi/2), + (2, 0, math.pi/2), + (2, 0, math.pi/2)] + + # Corrupt the odometry measurements with gaussian noise to create noisy odometry measurements. + odometry_measurements = [np.random.multivariate_normal(true_odom, ODOMETRY_NOISE.covariance()) + for true_odom in true_odometry] + + # Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate + # iSAM2 incremental optimization. + graph.push_back(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE)) + initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) + + # Initialize the current estimate which is used during the incremental inference loop. + current_estimate = initial_estimate + + for i in range(len(true_odometry)): + + # Obtain the noisy odometry that is received by the robot and corrupted by gaussian noise. + noisy_odom_x, noisy_odom_y, noisy_odom_theta = odometry_measurements[i] + + # Determine if there is loop closure based on the odometry measurement and the previous estimate of the state. + loop = determine_loop_closure(odometry_measurements[i], current_estimate, i, xy_tol=0.8, theta_tol=25) + + # Add a binary factor in between two existing states if loop closure is detected. + # Otherwise, add a binary factor between a newly observed state and the previous state. + if loop: + graph.push_back(gtsam.BetweenFactorPose2(i + 1, loop, + gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE)) + else: + graph.push_back(gtsam.BetweenFactorPose2(i + 1, i + 2, + gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE)) + + # Compute and insert the initialization estimate for the current pose using the noisy odometry measurement. + computed_estimate = current_estimate.atPose2(i + 1).compose(gtsam.Pose2(noisy_odom_x, + noisy_odom_y, + noisy_odom_theta)) + initial_estimate.insert(i + 2, computed_estimate) + + # Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables. + isam.update(graph, initial_estimate) + current_estimate = isam.calculateEstimate() + + # Report all current state estimates from the iSAM2 optimzation. + report_on_progress(graph, current_estimate, i) + initial_estimate.clear() + + # Print the final covariance matrix for each pose after completing inference on the trajectory. + marginals = gtsam.Marginals(graph, current_estimate) + i = 1 + for i in range(1, len(true_odometry)+1): + print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n") + + plt.ioff() + plt.show() + + +if __name__ == "__main__": + Pose2SLAM_ISAM2_example() diff --git a/python/gtsam/examples/Pose2SLAMExample.py b/python/gtsam/examples/Pose2SLAMExample.py index 2ec190d73..300a70fbd 100644 --- a/python/gtsam/examples/Pose2SLAMExample.py +++ b/python/gtsam/examples/Pose2SLAMExample.py @@ -15,82 +15,88 @@ from __future__ import print_function import math -import numpy as np - import gtsam - -import matplotlib.pyplot as plt import gtsam.utils.plot as gtsam_plot +import matplotlib.pyplot as plt -def vector3(x, y, z): - """Create 3d double numpy array.""" - return np.array([x, y, z], dtype=float) +def main(): + """Main runner.""" + # Create noise models + PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(gtsam.Point3(0.3, 0.3, 0.1)) + ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas( + gtsam.Point3(0.2, 0.2, 0.1)) -# Create noise models -PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) -ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) + # 1. Create a factor graph container and add factors to it + graph = gtsam.NonlinearFactorGraph() -# 1. Create a factor graph container and add factors to it -graph = gtsam.NonlinearFactorGraph() + # 2a. Add a prior on the first pose, setting it to the origin + # A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix) + graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE)) -# 2a. Add a prior on the first pose, setting it to the origin -# A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix) -graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE)) + # 2b. Add odometry factors + # Create odometry (Between) factors between consecutive poses + graph.add( + gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE)) + graph.add( + gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2), + ODOMETRY_NOISE)) + graph.add( + gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2), + ODOMETRY_NOISE)) + graph.add( + gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2), + ODOMETRY_NOISE)) -# 2b. Add odometry factors -# Create odometry (Between) factors between consecutive poses -graph.add(gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE)) -graph.add(gtsam.BetweenFactorPose2( - 2, 3, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) -graph.add(gtsam.BetweenFactorPose2( - 3, 4, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) -graph.add(gtsam.BetweenFactorPose2( - 4, 5, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) + # 2c. Add the loop closure constraint + # This factor encodes the fact that we have returned to the same pose. In real + # systems, these constraints may be identified in many ways, such as appearance-based + # techniques with camera images. We will use another Between Factor to enforce this constraint: + graph.add( + gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2, 0, math.pi / 2), + ODOMETRY_NOISE)) + print("\nFactor Graph:\n{}".format(graph)) # print -# 2c. Add the loop closure constraint -# This factor encodes the fact that we have returned to the same pose. In real -# systems, these constraints may be identified in many ways, such as appearance-based -# techniques with camera images. We will use another Between Factor to enforce this constraint: -graph.add(gtsam.BetweenFactorPose2( - 5, 2, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) -print("\nFactor Graph:\n{}".format(graph)) # print + # 3. Create the data structure to hold the initial_estimate estimate to the + # solution. For illustrative purposes, these have been deliberately set to incorrect values + initial_estimate = gtsam.Values() + initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) + initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) + initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2)) + initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi)) + initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2)) + print("\nInitial Estimate:\n{}".format(initial_estimate)) # print -# 3. Create the data structure to hold the initial_estimate estimate to the -# solution. For illustrative purposes, these have been deliberately set to incorrect values -initial_estimate = gtsam.Values() -initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) -initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) -initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2)) -initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi)) -initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2)) -print("\nInitial Estimate:\n{}".format(initial_estimate)) # print + # 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer + # The optimizer accepts an optional set of configuration parameters, + # controlling things like convergence criteria, the type of linear + # system solver to use, and the amount of information displayed during + # optimization. We will set a few parameters as a demonstration. + parameters = gtsam.GaussNewtonParams() -# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer -# The optimizer accepts an optional set of configuration parameters, -# controlling things like convergence criteria, the type of linear -# system solver to use, and the amount of information displayed during -# optimization. We will set a few parameters as a demonstration. -parameters = gtsam.GaussNewtonParams() + # Stop iterating once the change in error between steps is less than this value + parameters.setRelativeErrorTol(1e-5) + # Do not perform more than N iteration steps + parameters.setMaxIterations(100) + # Create the optimizer ... + optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters) + # ... and optimize + result = optimizer.optimize() + print("Final Result:\n{}".format(result)) -# Stop iterating once the change in error between steps is less than this value -parameters.setRelativeErrorTol(1e-5) -# Do not perform more than N iteration steps -parameters.setMaxIterations(100) -# Create the optimizer ... -optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters) -# ... and optimize -result = optimizer.optimize() -print("Final Result:\n{}".format(result)) + # 5. Calculate and print marginal covariances for all variables + marginals = gtsam.Marginals(graph, result) + for i in range(1, 6): + print("X{} covariance:\n{}\n".format(i, + marginals.marginalCovariance(i))) -# 5. Calculate and print marginal covariances for all variables -marginals = gtsam.Marginals(graph, result) -for i in range(1, 6): - print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i))) + for i in range(1, 6): + gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, + marginals.marginalCovariance(i)) -fig = plt.figure(0) -for i in range(1, 6): - gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i)) + plt.axis('equal') + plt.show() -plt.axis('equal') -plt.show() + +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/Pose2SLAMExample_g2o.py b/python/gtsam/examples/Pose2SLAMExample_g2o.py index 97fb46c5f..cf029c049 100644 --- a/python/gtsam/examples/Pose2SLAMExample_g2o.py +++ b/python/gtsam/examples/Pose2SLAMExample_g2o.py @@ -12,77 +12,86 @@ and does the optimization. Output is written on a file, in g2o format # pylint: disable=invalid-name, E1101 from __future__ import print_function + import argparse -import math -import numpy as np -import matplotlib.pyplot as plt import gtsam +import matplotlib.pyplot as plt from gtsam.utils import plot -def vector3(x, y, z): - """Create 3d double numpy array.""" - return np.array([x, y, z], dtype=float) +def main(): + """Main runner.""" + + parser = argparse.ArgumentParser( + description="A 2D Pose SLAM example that reads input from g2o, " + "converts it to a factor graph and does the optimization. " + "Output is written on a file, in g2o format") + parser.add_argument('-i', '--input', help='input file g2o format') + parser.add_argument( + '-o', + '--output', + help="the path to the output file with optimized graph") + parser.add_argument('-m', + '--maxiter', + type=int, + help="maximum number of iterations for optimizer") + parser.add_argument('-k', + '--kernel', + choices=['none', 'huber', 'tukey'], + default="none", + help="Type of kernel used") + parser.add_argument("-p", + "--plot", + action="store_true", + help="Flag to plot results") + args = parser.parse_args() + + g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\ + else args.input + + maxIterations = 100 if args.maxiter is None else args.maxiter + + is3D = False + + graph, initial = gtsam.readG2o(g2oFile, is3D) + + assert args.kernel == "none", "Supplied kernel type is not yet implemented" + + # Add prior on the pose having index (key) = 0 + priorModel = gtsam.noiseModel.Diagonal.Variances(gtsam.Point3(1e-6, 1e-6, 1e-8)) + graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) + + params = gtsam.GaussNewtonParams() + params.setVerbosity("Termination") + params.setMaxIterations(maxIterations) + # parameters.setRelativeErrorTol(1e-5) + # Create the optimizer ... + optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) + # ... and optimize + result = optimizer.optimize() + + print("Optimization complete") + print("initial error = ", graph.error(initial)) + print("final error = ", graph.error(result)) + + if args.output is None: + print("\nFactor Graph:\n{}".format(graph)) + print("\nInitial Estimate:\n{}".format(initial)) + print("Final Result:\n{}".format(result)) + else: + outputFile = args.output + print("Writing results to file: ", outputFile) + graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D) + gtsam.writeG2o(graphNoKernel, result, outputFile) + print("Done!") + + if args.plot: + resultPoses = gtsam.utilities.extractPose2(result) + for i in range(resultPoses.shape[0]): + plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :])) + plt.show() -parser = argparse.ArgumentParser( - description="A 2D Pose SLAM example that reads input from g2o, " - "converts it to a factor graph and does the optimization. " - "Output is written on a file, in g2o format") -parser.add_argument('-i', '--input', help='input file g2o format') -parser.add_argument('-o', '--output', - help="the path to the output file with optimized graph") -parser.add_argument('-m', '--maxiter', type=int, - help="maximum number of iterations for optimizer") -parser.add_argument('-k', '--kernel', choices=['none', 'huber', 'tukey'], - default="none", help="Type of kernel used") -parser.add_argument("-p", "--plot", action="store_true", - help="Flag to plot results") -args = parser.parse_args() - -g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\ - else args.input - -maxIterations = 100 if args.maxiter is None else args.maxiter - -is3D = False - -graph, initial = gtsam.readG2o(g2oFile, is3D) - -assert args.kernel == "none", "Supplied kernel type is not yet implemented" - -# Add prior on the pose having index (key) = 0 -priorModel = gtsam.noiseModel.Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) -graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) - -params = gtsam.GaussNewtonParams() -params.setVerbosity("Termination") -params.setMaxIterations(maxIterations) -# parameters.setRelativeErrorTol(1e-5) -# Create the optimizer ... -optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) -# ... and optimize -result = optimizer.optimize() - -print("Optimization complete") -print("initial error = ", graph.error(initial)) -print("final error = ", graph.error(result)) - - -if args.output is None: - print("\nFactor Graph:\n{}".format(graph)) - print("\nInitial Estimate:\n{}".format(initial)) - print("Final Result:\n{}".format(result)) -else: - outputFile = args.output - print("Writing results to file: ", outputFile) - graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D) - gtsam.writeG2o(graphNoKernel, result, outputFile) - print ("Done!") - -if args.plot: - resultPoses = gtsam.utilities.extractPose2(result) - for i in range(resultPoses.shape[0]): - plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :])) - plt.show() +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/Pose2SLAMExample_lago.py b/python/gtsam/examples/Pose2SLAMExample_lago.py new file mode 100644 index 000000000..d8cddde0b --- /dev/null +++ b/python/gtsam/examples/Pose2SLAMExample_lago.py @@ -0,0 +1,67 @@ +""" +GTSAM Copyright 2010, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) +See LICENSE for the license information + +A 2D Pose SLAM example that reads input from g2o, and solve the Pose2 problem +using LAGO (Linear Approximation for Graph Optimization). +Output is written to a file, in g2o format + +Reference: +L. Carlone, R. Aragues, J. Castellanos, and B. Bona, A fast and accurate +approximation for planar pose graph optimization, IJRR, 2014. + +L. Carlone, R. Aragues, J.A. Castellanos, and B. Bona, A linear approximation +for graph-based simultaneous localization and mapping, RSS, 2011. + +Author: Luca Carlone (C++), John Lambert (Python) +""" + +import argparse +from argparse import Namespace + +import numpy as np + +import gtsam +from gtsam import Point3, Pose2, PriorFactorPose2, Values + + +def run(args: Namespace) -> None: + """Run LAGO on input data stored in g2o file.""" + g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None else args.input + + graph = gtsam.NonlinearFactorGraph() + graph, initial = gtsam.readG2o(g2oFile) + + # Add prior on the pose having index (key) = 0 + priorModel = gtsam.noiseModel.Diagonal.Variances(Point3(1e-6, 1e-6, 1e-8)) + graph.add(PriorFactorPose2(0, Pose2(), priorModel)) + print(graph) + + print("Computing LAGO estimate") + estimateLago: Values = gtsam.lago.initialize(graph) + print("done!") + + if args.output is None: + estimateLago.print("estimateLago") + else: + outputFile = args.output + print("Writing results to file: ", outputFile) + graphNoKernel = gtsam.NonlinearFactorGraph() + graphNoKernel, initial2 = gtsam.readG2o(g2oFile) + gtsam.writeG2o(graphNoKernel, estimateLago, outputFile) + print("Done! ") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A 2D Pose SLAM example that reads input from g2o, " + "converts it to a factor graph and does the optimization. " + "Output is written on a file, in g2o format" + ) + parser.add_argument("-i", "--input", help="input file g2o format") + parser.add_argument("-o", "--output", help="the path to the output file with optimized graph") + args = parser.parse_args() + run(args) diff --git a/python/gtsam/examples/Pose3ISAM2Example.py b/python/gtsam/examples/Pose3ISAM2Example.py new file mode 100644 index 000000000..59b9fb2ac --- /dev/null +++ b/python/gtsam/examples/Pose3ISAM2Example.py @@ -0,0 +1,208 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Pose SLAM example using iSAM2 in 3D space. +Author: Jerred Chen +Modeled after: + - VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python) + - Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python) +""" + +from typing import List + +import matplotlib.pyplot as plt +import numpy as np + +import gtsam +import gtsam.utils.plot as gtsam_plot + +def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values, + key: int): + """Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2.""" + + # Print the current estimates computed using iSAM2. + print("*"*50 + f"\nInference after State {key+1}:\n") + print(current_estimate) + + # Compute the marginals for all states in the graph. + marginals = gtsam.Marginals(graph, current_estimate) + + # Plot the newly updated iSAM2 inference. + fig = plt.figure(0) + axes = fig.gca(projection='3d') + plt.cla() + + i = 1 + while current_estimate.exists(i): + gtsam_plot.plot_pose3(0, current_estimate.atPose3(i), 10, + marginals.marginalCovariance(i)) + i += 1 + + axes.set_xlim3d(-30, 45) + axes.set_ylim3d(-30, 45) + axes.set_zlim3d(-30, 45) + plt.pause(1) + +def create_poses() -> List[gtsam.Pose3]: + """Creates ground truth poses of the robot.""" + P0 = np.array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]]) + P1 = np.array([[0, -1, 0, 15], + [1, 0, 0, 15], + [0, 0, 1, 20], + [0, 0, 0, 1]]) + P2 = np.array([[np.cos(np.pi/4), 0, np.sin(np.pi/4), 30], + [0, 1, 0, 30], + [-np.sin(np.pi/4), 0, np.cos(np.pi/4), 30], + [0, 0, 0, 1]]) + P3 = np.array([[0, 1, 0, 30], + [0, 0, -1, 0], + [-1, 0, 0, -15], + [0, 0, 0, 1]]) + P4 = np.array([[-1, 0, 0, 0], + [0, -1, 0, -10], + [0, 0, 1, -10], + [0, 0, 0, 1]]) + P5 = P0[:] + + return [gtsam.Pose3(P0), gtsam.Pose3(P1), gtsam.Pose3(P2), + gtsam.Pose3(P3), gtsam.Pose3(P4), gtsam.Pose3(P5)] + +def determine_loop_closure(odom_tf: gtsam.Pose3, current_estimate: gtsam.Values, + key: int, xyz_tol=0.6, rot_tol=17) -> int: + """Simple brute force approach which iterates through previous states + and checks for loop closure. + + Args: + odom_tf: The noisy odometry transformation measurement in the body frame. + current_estimate: The current estimates computed by iSAM2. + key: Key corresponding to the current state estimate of the robot. + xyz_tol: Optional argument for the translational tolerance, in meters. + rot_tol: Optional argument for the rotational tolerance, in degrees. + Returns: + k: The key of the state which is helping add the loop closure constraint. + If loop closure is not found, then None is returned. + """ + if current_estimate: + prev_est = current_estimate.atPose3(key+1) + curr_est = prev_est.compose(odom_tf) + for k in range(1, key+1): + pose = current_estimate.atPose3(k) + if (abs(pose.matrix()[:3,:3] - curr_est.matrix()[:3,:3]) <= rot_tol*np.pi/180).all() and \ + (abs(pose.matrix()[:3,3] - curr_est.matrix()[:3,3]) <= xyz_tol).all(): + return k + +def Pose3_ISAM2_example(): + """Perform 3D SLAM given ground truth poses as well as simple + loop closure detection.""" + plt.ion() + + # Declare the 3D translational standard deviations of the prior factor's Gaussian model, in meters. + prior_xyz_sigma = 0.3 + + # Declare the 3D rotational standard deviations of the prior factor's Gaussian model, in degrees. + prior_rpy_sigma = 5 + + # Declare the 3D translational standard deviations of the odometry factor's Gaussian model, in meters. + odometry_xyz_sigma = 0.2 + + # Declare the 3D rotational standard deviations of the odometry factor's Gaussian model, in degrees. + odometry_rpy_sigma = 5 + + # Although this example only uses linear measurements and Gaussian noise models, it is important + # to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example + # simply showcases how iSAM2 may be applied to a Pose2 SLAM problem. + PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_rpy_sigma*np.pi/180, + prior_rpy_sigma*np.pi/180, + prior_rpy_sigma*np.pi/180, + prior_xyz_sigma, + prior_xyz_sigma, + prior_xyz_sigma])) + ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_rpy_sigma*np.pi/180, + odometry_rpy_sigma*np.pi/180, + odometry_rpy_sigma*np.pi/180, + odometry_xyz_sigma, + odometry_xyz_sigma, + odometry_xyz_sigma])) + + # Create a Nonlinear factor graph as well as the data structure to hold state estimates. + graph = gtsam.NonlinearFactorGraph() + initial_estimate = gtsam.Values() + + # Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many + # update calls are required to perform the relinearization. + parameters = gtsam.ISAM2Params() + parameters.setRelinearizeThreshold(0.1) + parameters.setRelinearizeSkip(1) + isam = gtsam.ISAM2(parameters) + + # Create the ground truth poses of the robot trajectory. + true_poses = create_poses() + + # Create the ground truth odometry transformations, xyz translations, and roll-pitch-yaw rotations + # between each robot pose in the trajectory. + odometry_tf = [true_poses[i-1].transformPoseTo(true_poses[i]) for i in range(1, len(true_poses))] + odometry_xyz = [(odometry_tf[i].x(), odometry_tf[i].y(), odometry_tf[i].z()) for i in range(len(odometry_tf))] + odometry_rpy = [odometry_tf[i].rotation().rpy() for i in range(len(odometry_tf))] + + # Corrupt xyz translations and roll-pitch-yaw rotations with gaussian noise to create noisy odometry measurements. + noisy_measurements = [np.random.multivariate_normal(np.hstack((odometry_rpy[i],odometry_xyz[i])), \ + ODOMETRY_NOISE.covariance()) for i in range(len(odometry_tf))] + + # Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate + # iSAM2 incremental optimization. + graph.push_back(gtsam.PriorFactorPose3(1, true_poses[0], PRIOR_NOISE)) + initial_estimate.insert(1, true_poses[0].compose(gtsam.Pose3( + gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25), gtsam.Point3(0.05, -0.10, 0.20)))) + + # Initialize the current estimate which is used during the incremental inference loop. + current_estimate = initial_estimate + for i in range(len(odometry_tf)): + + # Obtain the noisy translation and rotation that is received by the robot and corrupted by gaussian noise. + noisy_odometry = noisy_measurements[i] + + # Compute the noisy odometry transformation according to the xyz translation and roll-pitch-yaw rotation. + noisy_tf = gtsam.Pose3(gtsam.Rot3.RzRyRx(noisy_odometry[:3]), noisy_odometry[3:6].reshape(-1,1)) + + # Determine if there is loop closure based on the odometry measurement and the previous estimate of the state. + loop = determine_loop_closure(noisy_tf, current_estimate, i, xyz_tol=18, rot_tol=30) + + # Add a binary factor in between two existing states if loop closure is detected. + # Otherwise, add a binary factor between a newly observed state and the previous state. + if loop: + graph.push_back(gtsam.BetweenFactorPose3(i + 1, loop, noisy_tf, ODOMETRY_NOISE)) + else: + graph.push_back(gtsam.BetweenFactorPose3(i + 1, i + 2, noisy_tf, ODOMETRY_NOISE)) + + # Compute and insert the initialization estimate for the current pose using a noisy odometry measurement. + noisy_estimate = current_estimate.atPose3(i + 1).compose(noisy_tf) + initial_estimate.insert(i + 2, noisy_estimate) + + # Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables. + isam.update(graph, initial_estimate) + current_estimate = isam.calculateEstimate() + + # Report all current state estimates from the iSAM2 optimization. + report_on_progress(graph, current_estimate, i) + initial_estimate.clear() + + # Print the final covariance matrix for each pose after completing inference. + marginals = gtsam.Marginals(graph, current_estimate) + i = 1 + while current_estimate.exists(i): + print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n") + i += 1 + + plt.ioff() + plt.show() + +if __name__ == '__main__': + Pose3_ISAM2_example() diff --git a/python/gtsam/examples/Pose3SLAMExample_g2o.py b/python/gtsam/examples/Pose3SLAMExample_g2o.py index 501a75dc1..dcdfc34a3 100644 --- a/python/gtsam/examples/Pose3SLAMExample_g2o.py +++ b/python/gtsam/examples/Pose3SLAMExample_g2o.py @@ -8,13 +8,14 @@ # pylint: disable=invalid-name, E1101 from __future__ import print_function + import argparse -import numpy as np -import matplotlib.pyplot as plt -from mpl_toolkits.mplot3d import Axes3D import gtsam +import matplotlib.pyplot as plt +import numpy as np from gtsam.utils import plot +from mpl_toolkits.mplot3d import Axes3D def vector6(x, y, z, a, b, c): @@ -22,50 +23,62 @@ def vector6(x, y, z, a, b, c): return np.array([x, y, z, a, b, c], dtype=float) -parser = argparse.ArgumentParser( - description="A 3D Pose SLAM example that reads input from g2o, and " - "initializes Pose3") -parser.add_argument('-i', '--input', help='input file g2o format') -parser.add_argument('-o', '--output', - help="the path to the output file with optimized graph") -parser.add_argument("-p", "--plot", action="store_true", - help="Flag to plot results") -args = parser.parse_args() +def main(): + """Main runner.""" -g2oFile = gtsam.findExampleDataFile("pose3example.txt") if args.input is None \ - else args.input + parser = argparse.ArgumentParser( + description="A 3D Pose SLAM example that reads input from g2o, and " + "initializes Pose3") + parser.add_argument('-i', '--input', help='input file g2o format') + parser.add_argument( + '-o', + '--output', + help="the path to the output file with optimized graph") + parser.add_argument("-p", + "--plot", + action="store_true", + help="Flag to plot results") + args = parser.parse_args() -is3D = True -graph, initial = gtsam.readG2o(g2oFile, is3D) + g2oFile = gtsam.findExampleDataFile("pose3example.txt") if args.input is None \ + else args.input -# Add Prior on the first key -priorModel = gtsam.noiseModel.Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, - 1e-4, 1e-4, 1e-4)) + is3D = True + graph, initial = gtsam.readG2o(g2oFile, is3D) -print("Adding prior to g2o file ") -firstKey = initial.keys()[0] -graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) + # Add Prior on the first key + priorModel = gtsam.noiseModel.Diagonal.Variances( + vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4)) -params = gtsam.GaussNewtonParams() -params.setVerbosity("Termination") # this will show info about stopping conds -optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) -result = optimizer.optimize() -print("Optimization complete") + print("Adding prior to g2o file ") + firstKey = initial.keys()[0] + graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) -print("initial error = ", graph.error(initial)) -print("final error = ", graph.error(result)) + params = gtsam.GaussNewtonParams() + params.setVerbosity( + "Termination") # this will show info about stopping conds + optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) + result = optimizer.optimize() + print("Optimization complete") -if args.output is None: - print("Final Result:\n{}".format(result)) -else: - outputFile = args.output - print("Writing results to file: ", outputFile) - graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D) - gtsam.writeG2o(graphNoKernel, result, outputFile) - print ("Done!") + print("initial error = ", graph.error(initial)) + print("final error = ", graph.error(result)) -if args.plot: - resultPoses = gtsam.utilities.allPose3s(result) - for i in range(resultPoses.size()): - plot.plot_pose3(1, resultPoses.atPose3(i)) - plt.show() + if args.output is None: + print("Final Result:\n{}".format(result)) + else: + outputFile = args.output + print("Writing results to file: ", outputFile) + graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D) + gtsam.writeG2o(graphNoKernel, result, outputFile) + print("Done!") + + if args.plot: + resultPoses = gtsam.utilities.allPose3s(result) + for i in range(resultPoses.size()): + plot.plot_pose3(1, resultPoses.atPose3(i)) + plt.show() + + +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py b/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py index 2b2c5f991..a96da0774 100644 --- a/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py +++ b/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py @@ -13,23 +13,29 @@ Author: Luca Carlone, Frank Dellaert (python port) from __future__ import print_function +import gtsam import numpy as np -import gtsam -# Read graph from file -g2oFile = gtsam.findExampleDataFile("pose3example.txt") +def main(): + """Main runner.""" + # Read graph from file + g2oFile = gtsam.findExampleDataFile("pose3example.txt") -is3D = True -graph, initial = gtsam.readG2o(g2oFile, is3D) + is3D = True + graph, initial = gtsam.readG2o(g2oFile, is3D) -# Add prior on the first key. TODO: assumes first key ios z -priorModel = gtsam.noiseModel.Diagonal.Variances( - np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4])) -firstKey = initial.keys()[0] -graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel)) + # Add prior on the first key. TODO: assumes first key ios z + priorModel = gtsam.noiseModel.Diagonal.Variances( + np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4])) + firstKey = initial.keys()[0] + graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel)) -# Initializing Pose3 - chordal relaxation" -initialization = gtsam.InitializePose3.initialize(graph) + # Initializing Pose3 - chordal relaxation + initialization = gtsam.InitializePose3.initialize(graph) -print(initialization) + print(initialization) + + +if __name__ == "__main__": + main() diff --git a/python/gtsam/examples/PreintegrationExample.py b/python/gtsam/examples/PreintegrationExample.py index 458248c30..611c536c7 100644 --- a/python/gtsam/examples/PreintegrationExample.py +++ b/python/gtsam/examples/PreintegrationExample.py @@ -5,10 +5,14 @@ All Rights Reserved See LICENSE for the license information -A script validating the Preintegration of IMU measurements +A script validating the Preintegration of IMU measurements. + +Authors: Frank Dellaert, Varun Agrawal. """ -import math +# pylint: disable=invalid-name,unused-import,wrong-import-order + +from typing import Optional, Sequence import gtsam import matplotlib.pyplot as plt @@ -18,25 +22,28 @@ from mpl_toolkits.mplot3d import Axes3D IMU_FIG = 1 POSES_FIG = 2 +GRAVITY = 10 -class PreintegrationExample(object): - +class PreintegrationExample: + """Base class for all preintegration examples.""" @staticmethod - def defaultParams(g): + def defaultParams(g: float): """Create default parameters with Z *up* and realistic noise parameters""" params = gtsam.PreintegrationParams.MakeSharedU(g) - kGyroSigma = math.radians(0.5) / 60 # 0.5 degree ARW + kGyroSigma = np.radians(0.5) / 60 # 0.5 degree ARW kAccelSigma = 0.1 / 60 # 10 cm VRW - params.setGyroscopeCovariance( - kGyroSigma ** 2 * np.identity(3, float)) - params.setAccelerometerCovariance( - kAccelSigma ** 2 * np.identity(3, float)) - params.setIntegrationCovariance( - 0.0000001 ** 2 * np.identity(3, float)) + params.setGyroscopeCovariance(kGyroSigma**2 * np.identity(3, float)) + params.setAccelerometerCovariance(kAccelSigma**2 * + np.identity(3, float)) + params.setIntegrationCovariance(0.0000001**2 * np.identity(3, float)) return params - def __init__(self, twist=None, bias=None, dt=1e-2): + def __init__(self, + twist: Optional[np.ndarray] = None, + bias: Optional[gtsam.imuBias.ConstantBias] = None, + params: Optional[gtsam.PreintegrationParams] = None, + dt: float = 1e-2): """Initialize with given twist, a pair(angularVelocityVector, velocityVector).""" # setup interactive plotting @@ -48,7 +55,7 @@ class PreintegrationExample(object): else: # default = loop with forward velocity 2m/s, while pitching up # with angular velocity 30 degree/sec (negative in FLU) - W = np.array([0, -math.radians(30), 0]) + W = np.array([0, -np.radians(30), 0]) V = np.array([2, 0, 0]) self.scenario = gtsam.ConstantTwistScenario(W, V) @@ -58,9 +65,11 @@ class PreintegrationExample(object): self.labels = list('xyz') self.colors = list('rgb') - # Create runner - self.g = 10 # simple gravity constant - self.params = self.defaultParams(self.g) + if params: + self.params = params + else: + # Default params with simple gravity constant + self.params = self.defaultParams(g=GRAVITY) if bias is not None: self.actualBias = bias @@ -69,13 +78,22 @@ class PreintegrationExample(object): gyroBias = np.array([0, 0, 0]) self.actualBias = gtsam.imuBias.ConstantBias(accBias, gyroBias) - self.runner = gtsam.ScenarioRunner( - self.scenario, self.params, self.dt, self.actualBias) + # Create runner + self.runner = gtsam.ScenarioRunner(self.scenario, self.params, self.dt, + self.actualBias) fig, self.axes = plt.subplots(4, 3) fig.set_tight_layout(True) - def plotImu(self, t, measuredOmega, measuredAcc): + def plotImu(self, t: float, measuredOmega: Sequence, + measuredAcc: Sequence): + """ + Plot IMU measurements. + Args: + t: The time at which the measurement was recoreded. + measuredOmega: Measured angular velocity. + measuredAcc: Measured linear acceleration. + """ plt.figure(IMU_FIG) # plot angular velocity @@ -108,12 +126,21 @@ class PreintegrationExample(object): ax.scatter(t, measuredAcc[i], color=color, marker='.') ax.set_xlabel('specific force ' + label) - def plotGroundTruthPose(self, t, scale=0.3, time_interval=0.01): - # plot ground truth pose, as well as prediction from integrated IMU measurements + def plotGroundTruthPose(self, + t: float, + scale: float = 0.3, + time_interval: float = 0.01): + """ + Plot ground truth pose, as well as prediction from integrated IMU measurements. + Args: + t: Time at which the pose was obtained. + scale: The scaling factor for the pose axes. + time_interval: The time to wait before showing the plot. + """ actualPose = self.scenario.pose(t) plot_pose3(POSES_FIG, actualPose, scale) - t = actualPose.translation() - self.maxDim = max([max(np.abs(t)), self.maxDim]) + translation = actualPose.translation() + self.maxDim = max([max(np.abs(translation)), self.maxDim]) ax = plt.gca() ax.set_xlim3d(-self.maxDim, self.maxDim) ax.set_ylim3d(-self.maxDim, self.maxDim) @@ -121,8 +148,8 @@ class PreintegrationExample(object): plt.pause(time_interval) - def run(self, T=12): - # simulate the loop + def run(self, T: int = 12): + """Simulate the loop.""" for i, t in enumerate(np.arange(0, T, self.dt)): measuredOmega = self.runner.measuredAngularVelocity(t) measuredAcc = self.runner.measuredSpecificForce(t) diff --git a/python/gtsam/examples/SFMExample_bal.py b/python/gtsam/examples/SFMExample_bal.py index dfe8b523c..f3e48c3c3 100644 --- a/python/gtsam/examples/SFMExample_bal.py +++ b/python/gtsam/examples/SFMExample_bal.py @@ -7,73 +7,79 @@ See LICENSE for the license information Solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file - Author: Frank Dellaert (Python: Akshay Krishnan, John Lambert) + Author: Frank Dellaert (Python: Akshay Krishnan, John Lambert, Varun Agrawal) """ import argparse import logging import sys -import matplotlib.pyplot as plt -import numpy as np - import gtsam -from gtsam import ( - GeneralSFMFactorCal3Bundler, - PinholeCameraCal3Bundler, - PriorFactorPinholeCameraCal3Bundler, - readBal, - symbol_shorthand -) +from gtsam import (GeneralSFMFactorCal3Bundler, + PriorFactorPinholeCameraCal3Bundler, PriorFactorPoint3) +from gtsam.symbol_shorthand import C, P # type: ignore +from gtsam.utils import plot # type: ignore +from matplotlib import pyplot as plt -C = symbol_shorthand.C -P = symbol_shorthand.P +logging.basicConfig(stream=sys.stdout, level=logging.INFO) + +DEFAULT_BAL_DATASET = "dubrovnik-3-7-pre" -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) +def plot_scene(scene_data: gtsam.SfmData, result: gtsam.Values) -> None: + """Plot the SFM results.""" + plot_vals = gtsam.Values() + for cam_idx in range(scene_data.number_cameras()): + plot_vals.insert(C(cam_idx), + result.atPinholeCameraCal3Bundler(C(cam_idx)).pose()) + for j in range(scene_data.number_tracks()): + plot_vals.insert(P(j), result.atPoint3(P(j))) -def run(args): + plot.plot_3d_points(0, plot_vals, linespec="g.") + plot.plot_trajectory(0, plot_vals, title="SFM results") + + plt.show() + + +def run(args: argparse.Namespace) -> None: """ Run LM optimization with BAL input data and report resulting error """ - input_file = gtsam.findExampleDataFile(args.input_file) + input_file = args.input_file # Load the SfM data from file - scene_data = readBal(input_file) - logging.info(f"read {scene_data.number_tracks()} tracks on {scene_data.number_cameras()} cameras\n") + scene_data = gtsam.readBal(input_file) + logging.info("read %d tracks on %d cameras\n", scene_data.number_tracks(), + scene_data.number_cameras()) # Create a factor graph graph = gtsam.NonlinearFactorGraph() # We share *one* noiseModel between all projection factors - noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v + noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v # Add measurements to the factor graph - j = 0 - for t_idx in range(scene_data.number_tracks()): - track = scene_data.track(t_idx) # SfmTrack + for j in range(scene_data.number_tracks()): + track = scene_data.track(j) # SfmTrack # retrieve the SfmMeasurement objects for m_idx in range(track.number_measurements()): # i represents the camera index, and uv is the 2d measurement i, uv = track.measurement(m_idx) # note use of shorthand symbols C and P graph.add(GeneralSFMFactorCal3Bundler(uv, noise, C(i), P(j))) - j += 1 # Add a prior on pose x1. This indirectly specifies where the origin is. graph.push_back( - gtsam.PriorFactorPinholeCameraCal3Bundler( - C(0), scene_data.camera(0), gtsam.noiseModel.Isotropic.Sigma(9, 0.1) - ) - ) + PriorFactorPinholeCameraCal3Bundler( + C(0), scene_data.camera(0), + gtsam.noiseModel.Isotropic.Sigma(9, 0.1))) # Also add a prior on the position of the first landmark to fix the scale graph.push_back( - gtsam.PriorFactorPoint3( - P(0), scene_data.track(0).point3(), gtsam.noiseModel.Isotropic.Sigma(3, 0.1) - ) - ) + PriorFactorPoint3(P(0), + scene_data.track(0).point3(), + gtsam.noiseModel.Isotropic.Sigma(3, 0.1))) # Create initial estimate initial = gtsam.Values() - + i = 0 # add each PinholeCameraCal3Bundler for cam_idx in range(scene_data.number_cameras()): @@ -81,12 +87,10 @@ def run(args): initial.insert(C(i), camera) i += 1 - j = 0 # add each SfmTrack - for t_idx in range(scene_data.number_tracks()): - track = scene_data.track(t_idx) + for j in range(scene_data.number_tracks()): + track = scene_data.track(j) initial.insert(P(j), track.point3()) - j += 1 # Optimize the graph and print results try: @@ -94,25 +98,31 @@ def run(args): params.setVerbosityLM("ERROR") lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) result = lm.optimize() - except Exception as e: + except RuntimeError: logging.exception("LM Optimization failed") return + # Error drops from ~2764.22 to ~0.046 - logging.info(f"final error: {graph.error(result)}") + logging.info("initial error: %f", graph.error(initial)) + logging.info("final error: %f", graph.error(result)) + + plot_scene(scene_data, result) + + +def main() -> None: + """Main runner.""" + parser = argparse.ArgumentParser() + parser.add_argument('-i', + '--input_file', + type=str, + default=gtsam.findExampleDataFile(DEFAULT_BAL_DATASET), + help="""Read SFM data from the specified BAL file. + The data format is described here: https://grail.cs.washington.edu/projects/bal/. + BAL files contain (nrPoses, nrPoints, nrObservations), followed by (i,j,u,v) tuples, + then (wx,wy,wz,tx,ty,tz,f,k1,k1) as Bundler camera calibrations w/ Rodrigues vector + and (x,y,z) 3d point initializations.""") + run(parser.parse_args()) if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '-i', - '--input_file', - type=str, - default="dubrovnik-3-7-pre", - help='Read SFM data from the specified BAL file' - 'The data format is described here: https://grail.cs.washington.edu/projects/bal/.' - 'BAL files contain (nrPoses, nrPoints, nrObservations), followed by (i,j,u,v) tuples, ' - 'then (wx,wy,wz,tx,ty,tz,f,k1,k1) as Bundler camera calibrations w/ Rodrigues vector' - 'and (x,y,z) 3d point initializations.' - ) - run(parser.parse_args()) - + main() diff --git a/python/gtsam/tests/test_Cal3Fisheye.py b/python/gtsam/tests/test_Cal3Fisheye.py index 298c6e57b..e54afc757 100644 --- a/python/gtsam/tests/test_Cal3Fisheye.py +++ b/python/gtsam/tests/test_Cal3Fisheye.py @@ -17,6 +17,15 @@ import gtsam from gtsam.utils.test_case import GtsamTestCase from gtsam.symbol_shorthand import K, L, P + +def ulp(ftype=np.float64): + """ + Unit in the last place of floating point datatypes + """ + f = np.finfo(ftype) + return f.tiny / ftype(1 << f.nmant) + + class TestCal3Fisheye(GtsamTestCase): @classmethod @@ -105,6 +114,71 @@ class TestCal3Fisheye(GtsamTestCase): score = graph.error(state) self.assertAlmostEqual(score, 0) + def test_jacobian_on_axis(self): + """Check of jacobian at optical axis""" + obj_point_on_axis = np.array([0, 0, 1]) + img_point = np.array([0, 0]) + f, z, H = self.evaluate_jacobian(obj_point_on_axis, img_point) + self.assertAlmostEqual(f, 0) + self.gtsamAssertEquals(z, np.zeros(2)) + self.gtsamAssertEquals(H @ H.T, 3*np.eye(2)) + + def test_jacobian_convergence(self): + """Test stability of jacobian close to optical axis""" + t = ulp(np.float64) + obj_point_close_to_axis = np.array([t, 0, 1]) + img_point = np.array([np.sqrt(t), 0]) + f, z, H = self.evaluate_jacobian(obj_point_close_to_axis, img_point) + self.assertAlmostEqual(f, 0) + self.gtsamAssertEquals(z, np.zeros(2)) + self.gtsamAssertEquals(H @ H.T, 3*np.eye(2)) + + # With a height of sqrt(ulp), this may cause an overflow + t = ulp(np.float64) + obj_point_close_to_axis = np.array([np.sqrt(t), 0, 1]) + img_point = np.array([np.sqrt(t), 0]) + f, z, H = self.evaluate_jacobian(obj_point_close_to_axis, img_point) + self.assertAlmostEqual(f, 0) + self.gtsamAssertEquals(z, np.zeros(2)) + self.gtsamAssertEquals(H @ H.T, 3*np.eye(2)) + + def test_scaling_factor(self): + """Check convergence of atan2(r, z)/r ~ 1/z for small r""" + r = ulp(np.float64) + s = np.arctan(r) / r + self.assertEqual(s, 1.0) + z = 1 + s = self.scaling_factor(r, z) + self.assertEqual(s, 1.0/z) + z = 2 + s = self.scaling_factor(r, z) + self.assertEqual(s, 1.0/z) + s = self.scaling_factor(2*r, z) + self.assertEqual(s, 1.0/z) + + @staticmethod + def scaling_factor(r, z): + """Projection factor theta/r for equidistant fisheye lens model""" + return np.arctan2(r, z) / r if r/z != 0 else 1.0/z + + @staticmethod + def evaluate_jacobian(obj_point, img_point): + """Evaluate jacobian at given object point""" + pose = gtsam.Pose3() + camera = gtsam.Cal3Fisheye() + state = gtsam.Values() + camera_key, pose_key, landmark_key = K(0), P(0), L(0) + state.insert_point3(landmark_key, obj_point) + state.insert_pose3(pose_key, pose) + g = gtsam.NonlinearFactorGraph() + noise_model = gtsam.noiseModel.Unit.Create(2) + factor = gtsam.GenericProjectionFactorCal3Fisheye(img_point, noise_model, pose_key, landmark_key, camera) + g.add(factor) + f = g.error(state) + gaussian_factor_graph = g.linearize(state) + H, z = gaussian_factor_graph.jacobian() + return f, z, H + @unittest.skip("triangulatePoint3 currently seems to require perspective projections.") def test_triangulation_skipped(self): """Estimate spatial point from image measurements""" diff --git a/python/gtsam/tests/test_Cal3Unified.py b/python/gtsam/tests/test_Cal3Unified.py index dab1ae446..bafbacfa4 100644 --- a/python/gtsam/tests/test_Cal3Unified.py +++ b/python/gtsam/tests/test_Cal3Unified.py @@ -117,6 +117,28 @@ class TestCal3Unified(GtsamTestCase): score = graph.error(state) self.assertAlmostEqual(score, 0) + def test_jacobian(self): + """Evaluate jacobian at optical axis""" + obj_point_on_axis = np.array([0, 0, 1]) + img_point = np.array([0.0, 0.0]) + pose = gtsam.Pose3() + camera = gtsam.Cal3Unified() + state = gtsam.Values() + camera_key, pose_key, landmark_key = K(0), P(0), L(0) + state.insert_cal3unified(camera_key, camera) + state.insert_point3(landmark_key, obj_point_on_axis) + state.insert_pose3(pose_key, pose) + g = gtsam.NonlinearFactorGraph() + noise_model = gtsam.noiseModel.Unit.Create(2) + factor = gtsam.GeneralSFMFactor2Cal3Unified(img_point, noise_model, pose_key, landmark_key, camera_key) + g.add(factor) + f = g.error(state) + gaussian_factor_graph = g.linearize(state) + H, z = gaussian_factor_graph.jacobian() + self.assertAlmostEqual(f, 0) + self.gtsamAssertEquals(z, np.zeros(2)) + self.gtsamAssertEquals(H @ H.T, 4*np.eye(2)) + @unittest.skip("triangulatePoint3 currently seems to require perspective projections.") def test_triangulation(self): """Estimate spatial point from image measurements""" diff --git a/python/gtsam/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py index e07b904a9..411828890 100644 --- a/python/gtsam/tests/test_Pose3.py +++ b/python/gtsam/tests/test_Pose3.py @@ -59,8 +59,16 @@ class TestPose3(GtsamTestCase): self.assertEqual(math.sqrt(2.0), x1.range(pose=xl2)) def test_adjoint(self): - """Test adjoint method.""" + """Test adjoint methods.""" + T = Pose3() xi = np.array([1, 2, 3, 4, 5, 6]) + # test calling functions + T.AdjointMap() + T.Adjoint(xi) + T.AdjointTranspose(xi) + Pose3.adjointMap(xi) + Pose3.adjoint(xi, xi) + # test correctness of adjoint(x, y) expected = np.dot(Pose3.adjointMap_(xi), xi) actual = Pose3.adjoint_(xi, xi) np.testing.assert_array_equal(actual, expected) diff --git a/python/gtsam/tests/test_Rot3.py b/python/gtsam/tests/test_Rot3.py new file mode 100644 index 000000000..a1ce01ba2 --- /dev/null +++ b/python/gtsam/tests/test_Rot3.py @@ -0,0 +1,2037 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +See LICENSE for the license information +Rot3 unit tests. +Author: John Lambert +""" +# pylint: disable=no-name-in-module + +import unittest + +import numpy as np + +import gtsam +from gtsam import Rot3 +from gtsam.utils.test_case import GtsamTestCase + + +R1_R2_pairs = [ + ( + [ + [0.994283, -0.10356, 0.0260251], + [0.102811, 0.994289, 0.0286205], + [-0.0288404, -0.0257812, 0.999251], + ], + [ + [-0.994235, 0.0918291, -0.0553602], + [-0.0987317, -0.582632, 0.806718], + [0.0418251, 0.807532, 0.588339], + ], + ), + ( + [ + [0.999823, -0.000724729, 0.0187896], + [0.00220672, 0.996874, -0.0789728], + [-0.0186736, 0.0790003, 0.9967], + ], + [ + [-0.99946, -0.0155217, -0.0289749], + [-0.0306159, 0.760422, 0.648707], + [0.0119641, 0.649244, -0.760487], + ], + ), + ( + [ + [0.999976, 0.00455542, -0.00529608], + [-0.00579633, 0.964214, -0.265062], + [0.00389908, 0.265086, 0.964217], + ], + [ + [-0.999912, -0.0123323, -0.00489179], + [-0.00739095, 0.21159, 0.977331], + [-0.0110179, 0.977281, -0.211663], + ], + ), + ( + [ + [0.998801, 0.0449026, 0.019479], + [-0.0448727, 0.998991, -0.00197348], + [-0.0195479, 0.00109704, 0.999808], + ], + [ + [-0.999144, -0.0406154, -0.00800012], + [0.0406017, -0.999174, 0.00185875], + [-0.00806909, 0.00153352, 0.999966], + ], + ), + ( + [ + [0.587202, 0.00034062, -0.80944], + [0.394859, 0.872825, 0.286815], + [0.706597, -0.488034, 0.51239], + ], + [ + [-0.999565, -0.028095, -0.00905389], + [0.0192863, -0.853838, 0.520182], + [-0.0223455, 0.519782, 0.854007], + ], + ), + ( + [ + [0.998798, 0.0370584, 0.0320815], + [-0.0355966, 0.998353, -0.0449943], + [-0.033696, 0.0437982, 0.998472], + ], + [ + [-0.999942, -0.010745, -0.00132538], + [-0.000998705, -0.0304045, 0.999538], + [-0.0107807, 0.999481, 0.0303914], + ], + ), + ( + [ + [0.998755, 0.00708291, -0.0493744], + [-0.00742097, 0.99995, -0.00666709], + [0.0493247, 0.0070252, 0.998758], + ], + [ + [-0.998434, 0.0104672, 0.0549825], + [0.0115323, 0.999751, 0.0190859], + [-0.0547691, 0.01969, -0.998307], + ], + ), + ( + [ + [0.990471, 0.0997485, -0.0949595], + [-0.117924, 0.970427, -0.210631], + [0.0711411, 0.219822, 0.972943], + ], + [ + [-0.99192, -0.125627, 0.0177888], + [0.126478, -0.967866, 0.217348], + [-0.0100874, 0.217839, 0.975933], + ], + ), + ( + [ + [-0.780894, -0.578319, -0.236116], + [0.34478, -0.0838381, -0.934932], + [0.520894, -0.811491, 0.264862], + ], + [ + [-0.99345, 0.00261746, -0.114244], + [-0.112503, 0.152922, 0.981815], + [0.0200403, 0.988236, -0.151626], + ], + ), + ( + [ + [0.968425, 0.0466097, 0.244911], + [-0.218867, 0.629346, 0.745668], + [-0.119378, -0.775726, 0.619676], + ], + [ + [-0.971208, 0.00666431, -0.238143], + [0.0937886, 0.929584, -0.35648], + [0.218998, -0.368551, -0.903444], + ], + ), + ( + [ + [0.998512, 0.0449168, -0.0309146], + [-0.0344032, 0.958823, 0.281914], + [0.0423043, -0.280431, 0.958941], + ], + [ + [-0.999713, 0.00732431, 0.0228168], + [-0.00759688, 0.806166, -0.59164], + [-0.0227275, -0.591644, -0.805879], + ], + ), + ( + [ + [0.981814, 0.00930728, 0.189617], + [-0.0084101, 0.999949, -0.00553563], + [-0.189659, 0.00384026, 0.981843], + ], + [ + [-0.981359, 0.00722349, -0.192051], + [0.00148564, 0.999549, 0.0300036], + [0.192182, 0.0291591, -0.980927], + ], + ), + ( + [ + [0.972544, -0.215591, 0.0876242], + [0.220661, 0.973915, -0.0529018], + [-0.0739333, 0.0707846, 0.994748], + ], + [ + [-0.971294, 0.215675, -0.100371], + [-0.23035, -0.747337, 0.62324], + [0.0594069, 0.628469, 0.775564], + ], + ), + ( + [ + [0.989488, 0.0152447, 0.143808], + [-0.0160974, 0.999859, 0.00476753], + [-0.143715, -0.00703235, 0.989594], + ], + [ + [-0.988492, 0.0124362, -0.150766], + [0.00992423, 0.999799, 0.0174037], + [0.150952, 0.0157072, -0.988417], + ], + ), + ( + [ + [0.99026, 0.109934, -0.0854388], + [-0.0973012, 0.985345, 0.140096], + [0.099588, -0.130418, 0.986445], + ], + [ + [-0.994239, 0.0206112, 0.1052], + [0.0227944, 0.999548, 0.0195944], + [-0.104748, 0.0218794, -0.994259], + ], + ), + ( + [ + [0.988981, 0.132742, -0.0655406], + [-0.113134, 0.963226, 0.243712], + [0.0954813, -0.233612, 0.96763], + ], + [ + [-0.989473, -0.144453, 0.00888153], + [0.112318, -0.727754, 0.67658], + [-0.0912697, 0.670455, 0.736317], + ], + ), + ( + [ + [0.13315, -0.722685, 0.678231], + [0.255831, 0.686195, 0.680946], + [-0.957508, 0.0828446, 0.276252], + ], + [ + [-0.233019, 0.0127274, -0.97239], + [-0.0143824, 0.99976, 0.0165321], + [0.972367, 0.0178377, -0.23278], + ], + ), + ( + [ + [0.906305, -0.0179617, -0.422243], + [0.0246095, 0.999644, 0.0102984], + [0.421908, -0.0197247, 0.906424], + ], + [ + [-0.90393, 0.0136293, 0.427466], + [0.0169755, 0.999848, 0.0040176], + [-0.427346, 0.0108879, -0.904024], + ], + ), + ( + [ + [0.999808, 0.0177784, -0.00826505], + [-0.0177075, 0.999806, 0.00856939], + [0.0084158, -0.00842139, 0.999929], + ], + [ + [-0.999901, -0.0141114, 0.00072392], + [0.00130602, -0.0413336, 0.999145], + [-0.0140699, 0.999047, 0.0413473], + ], + ), + ( + [ + [0.985811, -0.161425, 0.0460375], + [0.154776, 0.980269, 0.12295], + [-0.0649764, -0.11408, 0.991344], + ], + [ + [-0.985689, 0.137931, -0.09692], + [-0.162627, -0.626622, 0.762168], + [0.0443951, 0.767022, 0.640085], + ], + ), + ( + [ + [0.956652, -0.0116044, 0.291001], + [0.05108, 0.990402, -0.128428], + [-0.286718, 0.137726, 0.948064], + ], + [ + [-0.956189, 0.00996594, -0.292585], + [-0.0397033, 0.985772, 0.16333], + [0.29005, 0.167791, -0.942189], + ], + ), + ( + [ + [0.783763, -0.0181248, -0.620796], + [-0.0386421, 0.996214, -0.0778717], + [0.619857, 0.0850218, 0.780095], + ], + [ + [-0.780275, 0.0093644, 0.625368], + [-0.0221791, 0.998845, -0.0426297], + [-0.625045, -0.0471329, -0.779165], + ], + ), + ( + [ + [0.890984, 0.0232464, -0.453439], + [-0.0221215, 0.999725, 0.00778511], + [0.453495, 0.00309433, 0.891253], + ], + [ + [-0.890178, 0.0290103, 0.45469], + [0.0337152, 0.999429, 0.0022403], + [-0.454366, 0.0173244, -0.890648], + ], + ), + ( + [ + [0.998177, -0.0119546, 0.0591504], + [0.00277494, 0.988238, 0.152901], + [-0.0602825, -0.152458, 0.98647], + ], + [ + [-0.997444, 0.00871865, -0.0709414], + [0.0197108, 0.987598, -0.155762], + [0.0687035, -0.156762, -0.985246], + ], + ), + ( + [ + [0.985214, 0.164929, 0.0463837], + [-0.166966, 0.984975, 0.0441225], + [-0.0384096, -0.0512146, 0.997949], + ], + [ + [-0.999472, -0.000819214, -0.0325087], + [-0.00344291, 0.99673, 0.0807324], + [0.0323362, 0.0808019, -0.996206], + ], + ), + ( + [ + [0.998499, 0.0465241, 0.0288955], + [-0.0454764, 0.99832, -0.0359142], + [-0.0305178, 0.0345463, 0.998937], + ], + [ + [-0.999441, 0.00412484, -0.0332105], + [0.00374685, 0.999928, 0.0114307], + [0.0332552, 0.0112999, -0.999384], + ], + ), + ( + [ + [0.10101, -0.943239, -0.316381], + [0.841334, -0.0887423, 0.533182], + [-0.530994, -0.320039, 0.784615], + ], + [ + [-0.725665, 0.0153749, -0.687878], + [-0.304813, 0.889109, 0.34143], + [0.616848, 0.457438, -0.640509], + ], + ), + ( + [ + [0.843428, 0.174952, 0.507958], + [0.0435637, 0.920106, -0.389239], + [-0.535473, 0.350423, 0.768422], + ], + [ + [-0.835464, 0.0040872, -0.549533], + [0.00227251, 0.999989, 0.00398241], + [0.549543, 0.00207845, -0.835464], + ], + ), + ( + [ + [0.999897, -0.0142888, -0.00160177], + [0.0141561, 0.997826, -0.064364], + [0.00251798, 0.0643346, 0.997925], + ], + [ + [-0.999956, 0.00898988, 0.00296485], + [0.00900757, 0.999941, 0.00601779], + [-0.00291058, 0.00604429, -0.999978], + ], + ), + ( + [ + [0.999477, -0.0204859, 0.0250096], + [0.0126204, 0.959462, 0.281557], + [-0.0297637, -0.281094, 0.959219], + ], + [ + [-0.999384, 0.0172602, -0.0305795], + [-0.0254425, 0.24428, 0.969371], + [0.0242012, 0.969551, -0.24369], + ], + ), + ( + [ + [0.984597, 0.173474, -0.0218106], + [-0.15145, 0.783891, -0.602145], + [-0.0873592, 0.596173, 0.798089], + ], + [ + [-0.998608, -0.0432858, 0.0301827], + [-0.00287128, 0.615692, 0.787983], + [-0.0526917, 0.786797, -0.61496], + ], + ), + ( + [ + [0.917099, -0.3072, 0.254083], + [0.303902, 0.951219, 0.0531566], + [-0.258018, 0.0284665, 0.965721], + ], + [ + [-0.931191, 0.347008, -0.111675], + [-0.352102, -0.77686, 0.522032], + [0.0943935, 0.52543, 0.845586], + ], + ), + ( + [ + [0.991706, 0.0721037, -0.106393], + [-0.0995017, 0.954693, -0.280464], + [0.0813505, 0.288725, 0.95395], + ], + [ + [-0.995306, 0.00106317, 0.0967833], + [0.0167662, 0.986717, 0.161583], + [-0.0953259, 0.162447, -0.982103], + ], + ), + ( + [ + [0.997078, 0.0478273, -0.0595641], + [-0.0348316, 0.978617, 0.202719], + [0.067986, -0.200052, 0.977424], + ], + [ + [-0.997925, -0.0439691, 0.0470461], + [0.0643829, -0.695474, 0.715663], + [0.00125305, 0.717207, 0.696861], + ], + ), + ( + [ + [0.972749, -0.0233882, -0.230677], + [0.0255773, 0.999652, 0.00650349], + [0.230445, -0.0122264, 0.973009], + ], + [ + [-0.973286, 0.0147558, 0.229126], + [0.0145644, 0.999891, -0.00252631], + [-0.229138, 0.000878362, -0.973394], + ], + ), + ( + [ + [0.999271, 0.00700481, 0.0375381], + [-0.0348202, 0.57069, 0.820427], + [-0.0156757, -0.821136, 0.570517], + ], + [ + [-0.999805, -0.0198049, 0.000539906], + [0.0179848, -0.89584, 0.444015], + [-0.00831113, 0.443938, 0.89602], + ], + ), + ( + [ + [0.975255, -0.0207895, 0.220104], + [0.0252764, 0.999526, -0.0175888], + [-0.219634, 0.022717, 0.975318], + ], + [ + [-0.975573, 0.0128154, -0.219304], + [0.0106554, 0.999882, 0.0110292], + [0.219419, 0.00842303, -0.975594], + ], + ), + ( + [ + [-0.433961, -0.658151, -0.615236], + [0.610442, -0.717039, 0.336476], + [-0.6626, -0.229548, 0.71293], + ], + [ + [-0.998516, -0.00675119, -0.054067], + [-0.00405539, 0.99875, -0.0498174], + [0.0543358, -0.0495237, -0.997296], + ], + ), + ( + [ + [0.942764, -0.0126807, -0.333221], + [-0.0017175, 0.999079, -0.042879], + [0.333458, 0.0409971, 0.941873], + ], + [ + [-0.942228, -0.0109444, 0.334798], + [0.0110573, 0.997905, 0.0637396], + [-0.334794, 0.0637589, -0.940133], + ], + ), + ( + [ + [0.962038, 0.0147987, 0.272515], + [-0.0185974, 0.999762, 0.0113615], + [-0.272283, -0.0159982, 0.962084], + ], + [ + [-0.959802, 0.0113708, -0.280451], + [0.00982126, 0.999928, 0.00692958], + [0.280509, 0.00389678, -0.959845], + ], + ), + ( + [ + [0.998414, 0.0139348, 0.0545528], + [-0.0226877, 0.986318, 0.163283], + [-0.0515311, -0.164262, 0.98507], + ], + [ + [-0.998641, -0.000695993, -0.0521343], + [0.0182534, 0.931965, -0.362087], + [0.0488394, -0.362547, -0.930686], + ], + ), + ( + [ + [0.999705, -0.0234518, -0.00633743], + [0.0235916, 0.999458, 0.0229643], + [0.00579544, -0.023107, 0.999716], + ], + [ + [-0.999901, 0.004436, 0.0133471], + [-0.00306106, 0.85758, -0.514342], + [-0.0137278, -0.514332, -0.857481], + ], + ), + ( + [ + [0.998617, -0.051704, 0.0094837], + [0.0484292, 0.975079, 0.216506], + [-0.0204416, -0.215748, 0.976235], + ], + [ + [-0.999959, -0.00295958, -0.00862907], + [-0.00313279, 0.999792, 0.0201361], + [0.00856768, 0.0201625, -0.999761], + ], + ), + ( + [ + [0.999121, 0.0345472, -0.023733], + [-0.0333175, 0.998174, 0.0503881], + [0.0254304, -0.0495531, 0.998448], + ], + [ + [-0.999272, -0.0337466, 0.0178065], + [0.0200629, -0.0677624, 0.9975], + [-0.0324556, 0.997131, 0.0683898], + ], + ), + ( + [ + [0.989017, 0.139841, -0.0478525], + [-0.131355, 0.683201, -0.718319], + [-0.0677572, 0.716715, 0.694067], + ], + [ + [-0.995236, 0.00457798, 0.097401], + [0.097488, 0.0258334, 0.994902], + [0.00203912, 0.999657, -0.0261574], + ], + ), + ( + [ + [0.961528, 0.249402, 0.11516], + [-0.204522, 0.9298, -0.306009], + [-0.183395, 0.270684, 0.945038], + ], + [ + [-0.999566, -0.0233216, 0.0180679], + [0.012372, 0.224583, 0.974377], + [-0.0267822, 0.974177, -0.224197], + ], + ), + ( + [ + [0.865581, 0.0252563, -0.500131], + [0.0302583, 0.994265, 0.102578], + [0.499853, -0.103923, 0.859853], + ], + [ + [-0.866693, 0.0042288, 0.498824], + [0.0129331, 0.999818, 0.0139949], + [-0.498674, 0.0185807, -0.866591], + ], + ), + ( + [ + [0.998999, -0.0213419, -0.0393009], + [-0.0007582, 0.870578, -0.492031], + [0.0447153, 0.491568, 0.86969], + ], + [ + [-0.999207, -0.0184688, 0.0353073], + [0.00153266, 0.867625, 0.497218], + [-0.0398164, 0.496876, -0.866908], + ], + ), + ( + [ + [0.96567, -0.00482973, 0.259728], + [0.00349956, 0.999978, 0.00558359], + [-0.259749, -0.00448297, 0.965666], + ], + [ + [-0.962691, -0.00113074, -0.270609], + [-5.93716e-05, 0.999992, -0.00396767], + [0.270612, -0.00380337, -0.962683], + ], + ), + ( + [ + [0.948799, 0.287027, -0.131894], + [-0.300257, 0.949181, -0.0943405], + [0.0981135, 0.129112, 0.986764], + ], + [ + [-0.993593, -0.0406684, 0.105449], + [-0.0506857, 0.994269, -0.0941326], + [-0.101017, -0.0988741, -0.98996], + ], + ), + ( + [ + [0.998935, 0.0451118, 0.0097202], + [-0.0418086, 0.973879, -0.223183], + [-0.0195345, 0.222539, 0.974728], + ], + [ + [-0.999821, 0.00821522, -0.0170658], + [0.00742187, 0.998912, 0.046048], + [0.0174255, 0.0459131, -0.998794], + ], + ), + ( + [ + [0.99577, 0.00458459, 0.0917705], + [-0.00244288, 0.999722, -0.0234365], + [-0.0918524, 0.0231131, 0.995504], + ], + [ + [-0.995956, 0.0137721, -0.0887945], + [0.0122932, 0.999777, 0.0171801], + [0.0890113, 0.0160191, -0.995903], + ], + ), + ( + [ + [0.97931, 0.0219899, 0.201169], + [-0.0159226, 0.99937, -0.0317288], + [-0.201739, 0.0278692, 0.979043], + ], + [ + [-0.980952, 0.00507266, -0.19419], + [0.00310821, 0.999941, 0.010419], + [0.194231, 0.00961706, -0.98091], + ], + ), + ( + [ + [0.999616, 0.00550326, -0.0271537], + [-0.0048286, 0.99968, 0.0248495], + [0.0272817, -0.0247088, 0.999322], + ], + [ + [-0.999689, -0.00054899, 0.0249588], + [-0.00125497, 0.999599, -0.0282774], + [-0.0249333, -0.0282998, -0.999289], + ], + ), + ( + [ + [0.998036, -0.00755259, -0.0621791], + [0.0417502, 0.820234, 0.570502], + [0.0466927, -0.571978, 0.818939], + ], + [ + [-0.999135, -0.0278203, 0.0309173], + [-0.00855238, 0.864892, 0.501886], + [-0.0407029, 0.501187, -0.864382], + ], + ), + ( + [ + [0.958227, 0.00271545, 0.285997], + [-0.00426128, 0.999979, 0.00478282], + [-0.285979, -0.00580174, 0.958218], + ], + [ + [-0.958726, 0.011053, -0.284121], + [0.0138068, 0.999875, -0.00769161], + [0.284001, -0.0112968, -0.958759], + ], + ), + ( + [ + [-0.804547, -0.48558, -0.341929], + [0.517913, -0.855425, -0.00382581], + [-0.290637, -0.180168, 0.939718], + ], + [ + [0.993776, -0.0469383, -0.101033], + [-0.110087, -0.274676, -0.955214], + [0.0170842, 0.96039, -0.278134], + ], + ), + ( + [ + [0.991875, -0.0022313, -0.127195], + [-0.00198041, 0.999454, -0.0329762], + [0.127199, 0.0329602, 0.991329], + ], + [ + [-0.992632, -0.0090772, 0.120844], + [-0.00870494, 0.999956, 0.00360636], + [-0.120871, 0.00252786, -0.992667], + ], + ), + ( + [ + [0.999305, -0.0252534, 0.0274367], + [0.026144, 0.999126, -0.0326002], + [-0.0265895, 0.0332948, 0.999092], + ], + [ + [-0.999314, -0.0038532, -0.0368519], + [-0.00441323, 0.999876, 0.0151263], + [0.036789, 0.0152787, -0.999207], + ], + ), + ( + [ + [0.999843, -0.00958823, 0.0148803], + [0.00982469, 0.999825, -0.0159002], + [-0.0147253, 0.0160439, 0.999763], + ], + [ + [-0.999973, 0.00673608, -0.00308692], + [-0.0067409, -0.999977, 0.00116827], + [-0.00307934, 0.00119013, 0.999995], + ], + ), + ( + [ + [0.981558, -0.00727741, 0.191028], + [-0.00866166, 0.996556, 0.0824708], + [-0.190971, -0.0826044, 0.978114], + ], + [ + [-0.980202, 0.0179519, -0.197188], + [0.00957606, 0.999014, 0.0433472], + [0.197772, 0.0406008, -0.979408], + ], + ), + ( + [ + [0.966044, 0.0143709, 0.257977], + [-0.0157938, 0.999869, 0.00344404], + [-0.257894, -0.00740153, 0.966145], + ], + [ + [-0.965532, 0.0100318, -0.260094], + [0.00950897, 0.999949, 0.00326797], + [0.260113, 0.000682242, -0.965579], + ], + ), + ( + [ + [0.999965, 0.00727991, -0.00412134], + [-0.00802642, 0.973769, -0.227397], + [0.00235781, 0.227422, 0.973794], + ], + [ + [-0.999877, 0.00698241, 0.0141441], + [0.0103867, 0.966295, 0.257228], + [-0.0118713, 0.257343, -0.966248], + ], + ), + ( + [ + [0.951385, -0.0297966, 0.306561], + [-0.0314555, 0.980706, 0.19294], + [-0.306395, -0.193204, 0.932092], + ], + [ + [-0.99981, 0.00389172, -0.0191159], + [-0.00386326, -0.999991, -0.00150593], + [-0.0191215, -0.00143146, 0.999816], + ], + ), + ( + [ + [0.986772, -0.120673, 0.10825], + [0.0543962, 0.875511, 0.480126], + [-0.152713, -0.467887, 0.870495], + ], + [ + [-0.991246, 0.125848, -0.0399414], + [-0.129021, -0.85897, 0.495507], + [0.0280503, 0.496321, 0.867686], + ], + ), + ( + [ + [-0.804799, -0.588418, 0.0778637], + [-0.514399, 0.756902, 0.403104], + [-0.296129, 0.284365, -0.911836], + ], + [ + [0.98676, -0.0939473, 0.132227], + [0.162179, 0.557277, -0.814336], + [0.0028177, 0.824995, 0.565135], + ], + ), + ( + [ + [0.878935, 0.115231, 0.462813], + [0.0845639, 0.917349, -0.388998], + [-0.469386, 0.381041, 0.796546], + ], + [ + [-0.869533, 0.00193279, -0.493873], + [-0.00419575, 0.999927, 0.0113007], + [0.493859, 0.0118986, -0.869462], + ], + ), + ( + [ + [0.951881, 0.20828, 0.224816], + [-0.305582, 0.700797, 0.644595], + [-0.023294, -0.682277, 0.730722], + ], + [ + [-0.999787, 0.0141074, -0.0151097], + [-0.000971554, 0.698061, 0.716038], + [0.0206489, 0.7159, -0.697898], + ], + ), + ( + [ + [0.999538, 0.0192173, 0.0235334], + [-0.0189064, 0.999732, -0.0133635], + [-0.0237839, 0.0129124, 0.999634], + ], + [ + [-0.999807, 0.00286378, -0.0194776], + [0.0026258, 0.999922, 0.0122308], + [0.0195111, 0.0121774, -0.999736], + ], + ), + ( + [ + [0.998468, 0.041362, -0.0367422], + [-0.0364453, 0.991404, 0.125658], + [0.0416238, -0.124127, 0.991393], + ], + [ + [-0.997665, -0.0658235, 0.0183602], + [0.0216855, -0.0501652, 0.998507], + [-0.064804, 0.99657, 0.0514739], + ], + ), + ( + [ + [0.995563, 0.0493669, 0.0801057], + [-0.0272233, 0.966027, -0.257002], + [-0.0900717, 0.253681, 0.963085], + ], + [ + [-0.999228, -0.034399, -0.0190572], + [0.0250208, -0.929986, 0.366743], + [-0.0303386, 0.365984, 0.930127], + ], + ), + ( + [ + [0.952898, 0.0122933, 0.303043], + [-0.00568444, 0.999727, -0.0226807], + [-0.303239, 0.0198898, 0.952707], + ], + [ + [-0.951155, 0.0127759, -0.308452], + [0.000612627, 0.999219, 0.0394978], + [0.308716, 0.0373795, -0.95042], + ], + ), + ( + [ + [0.923096, -0.000313887, 0.38457], + [0.00948258, 0.999714, -0.0219453], + [-0.384454, 0.0239044, 0.922835], + ], + [ + [-0.922662, -0.00403523, -0.385589], + [-0.0119834, 0.999762, 0.0182116], + [0.385424, 0.0214239, -0.922491], + ], + ), + ( + [ + [0.991575, 0.0945042, -0.0885834], + [-0.10112, 0.99216, -0.0734349], + [0.080949, 0.0817738, 0.993358], + ], + [ + [-0.990948, -0.127974, 0.0405639], + [0.096351, -0.467557, 0.878697], + [-0.0934839, 0.874651, 0.475655], + ], + ), + ( + [ + [0.997148, 0.010521, 0.0747407], + [-0.0079726, 0.999379, -0.034313], + [-0.0750553, 0.0336192, 0.996612], + ], + [ + [-0.996543, 0.00988805, -0.0825019], + [0.00939476, 0.999936, 0.0063645], + [0.0825595, 0.00556751, -0.996572], + ], + ), + ( + [ + [0.991261, 0.00474444, -0.131831], + [-0.00205841, 0.999788, 0.0205036], + [0.131901, -0.020053, 0.99106], + ], + [ + [-0.990924, 4.45275e-05, 0.134427], + [0.00614714, 0.998969, 0.0449827], + [-0.134286, 0.0454008, -0.989903], + ], + ), + ( + [ + [0.992266, -0.0947916, 0.0801474], + [0.100889, 0.992006, -0.0757987], + [-0.0723216, 0.0832984, 0.993897], + ], + [ + [-0.992701, 0.0817686, -0.0886652], + [-0.114283, -0.40263, 0.908203], + [0.0385633, 0.911704, 0.409035], + ], + ), + ( + [ + [0.99696, -0.00808565, -0.0774951], + [0.0585083, 0.734519, 0.676061], + [0.0514552, -0.67854, 0.732759], + ], + [ + [-0.9998, 0.0053398, -0.0193164], + [-0.0162677, -0.779206, 0.626556], + [-0.0117055, 0.626745, 0.779137], + ], + ), + ( + [ + [0.961501, 0.0133645, -0.274475], + [-0.016255, 0.999834, -0.00825889], + [0.274319, 0.0124025, 0.961559], + ], + [ + [-0.963687, 0.000179203, 0.267042], + [0.00670194, 0.999701, 0.023515], + [-0.266958, 0.0244509, -0.9634], + ], + ), + ( + [ + [0.99877, 0.0413462, -0.0273572], + [-0.0263673, 0.91029, 0.413131], + [0.0419844, -0.411902, 0.910261], + ], + [ + [-0.998035, -0.0613039, 0.0130407], + [-0.00146496, 0.230815, 0.972998], + [-0.0626594, 0.971065, -0.230452], + ], + ), + ( + [ + [0.999657, 0.0261608, 0.00141675], + [-0.0261957, 0.998937, 0.0379393], + [-0.000422719, -0.0379634, 0.999279], + ], + [ + [-0.998896, -0.0310033, -0.0353275], + [0.0315452, -0.999392, -0.0148857], + [-0.0348445, -0.0159846, 0.999265], + ], + ), + ( + [ + [0.77369, 0.0137861, 0.633415], + [-0.0186509, 0.999826, 0.00102049], + [-0.63329, -0.0126033, 0.773812], + ], + [ + [-0.773069, 0.0156632, -0.634129], + [0.00418312, 0.999799, 0.0195956], + [0.634308, 0.0124961, -0.772979], + ], + ), + ( + [ + [0.952827, -0.024521, -0.302522], + [-0.00541318, 0.9952, -0.0977158], + [0.303465, 0.0947439, 0.94812], + ], + [ + [-0.952266, -0.00806089, 0.305165], + [0.00351941, 0.999295, 0.037378], + [-0.305252, 0.0366678, -0.951567], + ], + ), + ( + [ + [-0.172189, 0.949971, 0.260587], + [-0.86961, -0.0223234, -0.493235], + [-0.462741, -0.311539, 0.829948], + ], + [ + [-0.672964, 0.0127645, -0.739567], + [0.00429523, 0.999902, 0.0133494], + [0.739664, 0.00580721, -0.672953], + ], + ), + ( + [ + [0.637899, -0.440017, 0.632036], + [-0.52883, 0.346333, 0.774849], + [-0.559842, -0.828516, -0.0117683], + ], + [ + [-0.0627307, -0.0314554, -0.997536], + [-0.733537, 0.679201, 0.0247117], + [0.67675, 0.733279, -0.0656804], + ], + ), + ( + [ + [0.998402, 0.00284932, -0.0564372], + [0.000393713, 0.998353, 0.0573683], + [0.0565077, -0.0572989, 0.996757], + ], + [ + [-0.997878, 0.000941416, 0.0651252], + [-2.16756e-05, 0.999891, -0.0147853], + [-0.065132, -0.0147552, -0.997768], + ], + ), + ( + [ + [0.9999, 0.0141438, -0.000431687], + [-0.0140882, 0.9979, 0.063225], + [0.00132502, -0.0632125, 0.997999], + ], + [ + [-0.999515, -0.0308197, -0.00482715], + [-0.00160551, -0.103741, 0.994605], + [-0.0311554, 0.994128, 0.10364], + ], + ), + ( + [ + [-0.201909, 0.0267804, 0.979038], + [-0.0159062, 0.999405, -0.0306179], + [-0.979275, -0.0217548, -0.201363], + ], + [ + [0.261235, 0.951613, -0.161839], + [0.0758567, 0.146901, 0.986239], + [0.962292, -0.269916, -0.03381], + ], + ), + ( + [ + [0.998335, -0.0191576, -0.0544038], + [0.0163271, 0.998513, -0.0520045], + [0.0553192, 0.0510297, 0.997164], + ], + [ + [-0.998811, -0.00846127, 0.0480344], + [-0.0051736, 0.997661, 0.0681593], + [-0.0484988, 0.0678295, -0.996519], + ], + ), + ( + [ + [0.999973, 0.00227282, -0.00699658], + [-0.00137504, 0.992062, 0.125744], + [0.00722684, -0.125731, 0.992038], + ], + [ + [-0.999995, -0.00337061, 4.25756e-05], + [-0.00333677, 0.991528, 0.129853], + [-0.00047993, 0.129852, -0.991534], + ], + ), + ( + [ + [0.998908, 0.0216581, -0.041392], + [-0.0327304, 0.956678, -0.289302], + [0.0333331, 0.290341, 0.956342], + ], + [ + [-0.998254, -0.0377592, 0.0454422], + [0.00744647, 0.682591, 0.730764], + [-0.0586112, 0.729825, -0.681118], + ], + ), + ( + [ + [0.999387, -0.0042571, -0.0347599], + [0.00485203, 0.999843, 0.017049], + [0.0346819, -0.0172072, 0.99925], + ], + [ + [-0.999976, 0.00260242, -0.00669664], + [-0.00250352, -0.999889, -0.0147361], + [-0.00673422, -0.0147175, 0.99987], + ], + ), + ( + [ + [0.906103, -0.398828, -0.141112], + [0.381512, 0.914475, -0.13485], + [0.182826, 0.0683519, 0.980766], + ], + [ + [-0.996568, -0.0321282, -0.0763021], + [-0.0823787, 0.476597, 0.875254], + [0.00824509, 0.878535, -0.477609], + ], + ), + ( + [ + [0.908356, 0.316033, -0.273884], + [-0.231421, -0.165634, -0.95865], + [-0.34833, 0.934178, -0.0773183], + ], + [ + [-0.999889, -0.0146322, -0.00295739], + [-0.0149238, 0.974974, 0.221815], + [-0.000362257, 0.221835, -0.975085], + ], + ), + ( + [ + [0.999507, -0.00834631, 0.0302637], + [0.00899248, 0.999733, -0.0212785], + [-0.030078, 0.0215401, 0.999315], + ], + [ + [-0.999538, 0.00785187, -0.0293621], + [0.00739788, 0.999852, 0.0155394], + [0.0294797, 0.0153149, -0.999448], + ], + ), + ( + [ + [0.999951, -0.00729441, -0.00672921], + [0.00313753, 0.87564, -0.482954], + [0.00941523, 0.48291, 0.87562], + ], + [ + [-0.999984, -0.005202, -0.00277372], + [0.00340465, -0.893745, 0.448565], + [-0.00481353, 0.448548, 0.893747], + ], + ), + ( + [ + [0.998028, -0.0569885, 0.0263322], + [0.0489091, 0.968801, 0.242967], + [-0.039357, -0.2412, 0.969677], + ], + [ + [-0.997066, 0.0422415, -0.0638525], + [-0.0760293, -0.448184, 0.890703], + [0.00900662, 0.892944, 0.45008], + ], + ), + ( + [ + [0.999745, 0.00860777, 0.0208747], + [-0.00827114, 0.999835, -0.0161595], + [-0.0210103, 0.0159827, 0.999651], + ], + [ + [-0.999576, 0.0148733, -0.0251161], + [0.0151027, 0.999846, -0.00898035], + [0.0249787, -0.00935575, -0.999646], + ], + ), + ( + [ + [0.91924, 0.0372116, -0.391934], + [-0.00675798, 0.996868, 0.0787959], + [0.393639, -0.0697837, 0.916613], + ], + [ + [-0.921919, 0.00882585, 0.387286], + [0.00588498, 0.999944, -0.00877866], + [-0.387342, -0.00581387, -0.921919], + ], + ), + ( + [ + [0.998324, -0.0029024, 0.0577924], + [0.00236766, 0.999954, 0.00931901], + [-0.0578167, -0.00916657, 0.998285], + ], + [ + [-0.99892, -0.0025688, -0.0464413], + [-0.00203721, 0.999932, -0.0114927], + [0.0464676, -0.0113855, -0.998857], + ], + ), + ( + [ + [0.993986, 0.0163462, -0.108279], + [-0.0612924, 0.902447, -0.426418], + [0.090746, 0.43049, 0.898022], + ], + [ + [-0.994519, -0.0767804, 0.0709843], + [0.0579273, 0.160607, 0.985318], + [-0.0870543, 0.984028, -0.15528], + ], + ), + ( + [ + [0.997351, 0.0715122, -0.0132892], + [-0.0707087, 0.996067, 0.0533919], + [0.0170551, -0.0523108, 0.998485], + ], + [ + [-0.997704, -0.066002, 0.015281], + [0.064101, -0.846657, 0.528267], + [-0.0219278, 0.528033, 0.848942], + ], + ), + ( + [ + [0.999839, 0.00714662, -0.0164633], + [-0.00859425, 0.99594, -0.0896085], + [0.0157561, 0.0897356, 0.995841], + ], + [ + [-0.999773, 0.0079918, 0.0197854], + [0.00864136, 0.999419, 0.0329623], + [-0.0195105, 0.0331255, -0.999262], + ], + ), + ( + [ + [-0.773738, 0.630074, 0.0658454], + [-0.622848, -0.737618, -0.260731], + [-0.115711, -0.242749, 0.963163], + ], + [ + [-0.740005, 0.000855199, -0.672604], + [-0.0106008, 0.99986, 0.0129348], + [0.672521, 0.0167018, -0.739892], + ], + ), + ( + [ + [0.969039, -0.00110643, -0.246907], + [-0.121454, 0.868509, -0.480564], + [0.214973, 0.495673, 0.841484], + ], + [ + [-0.981168, -0.150714, 0.120811], + [0.172426, -0.401504, 0.89948], + [-0.0870583, 0.903372, 0.419929], + ], + ), + ( + [ + [0.589015, 0.80692, 0.0440651], + [-0.806467, 0.583447, 0.0959135], + [0.0516848, -0.0920316, 0.994414], + ], + [ + [-0.99998, 0.00434293, -0.00486489], + [0.00437139, 0.999973, -0.00588975], + [0.00483918, -0.00591087, -0.999972], + ], + ), + ( + [ + [0.999972, 0.000781564, 0.00750023], + [-0.0031568, 0.946655, 0.322235], + [-0.00684828, -0.322249, 0.94663], + ], + [ + [-0.999817, -0.0178453, -0.00691725], + [-0.0189272, 0.975556, 0.218934], + [0.00284118, 0.219025, -0.975716], + ], + ), + ( + [ + [-0.969668, 0.219101, -0.108345], + [0.172364, 0.298654, -0.938667], + [-0.173305, -0.928871, -0.32736], + ], + [ + [-0.999917, 0.0111423, -0.00656864], + [-0.00977865, -0.318874, 0.947748], + [0.00846644, 0.947733, 0.318955], + ], + ), + ( + [ + [-0.808574, -0.185515, -0.558383], + [0.174641, -0.981898, 0.0733309], + [-0.561879, -0.038223, 0.826336], + ], + [ + [-0.873416, 0.0121808, -0.486824], + [-0.00495714, 0.999413, 0.0338998], + [0.486951, 0.032022, -0.872843], + ], + ), + ( + [ + [0.999295, 0.0295658, -0.0231234], + [-0.0251771, 0.984904, 0.17126], + [0.0278378, -0.170557, 0.984954], + ], + [ + [-0.998834, -0.040128, 0.026921], + [0.0327412, -0.152276, 0.987798], + [-0.0355388, 0.987524, 0.153411], + ], + ), + ( + [ + [0.996021, -0.0050677, -0.0889802], + [0.0042919, 0.999951, -0.00890794], + [0.089021, 0.0084906, 0.995994], + ], + [ + [-0.995726, -0.00858132, 0.0919686], + [-0.00615004, 0.999625, 0.0266854], + [-0.0921631, 0.0260058, -0.995405], + ], + ), + ( + [ + [0.563325, 0.812296, 0.151129], + [-0.316559, 0.381143, -0.868632], + [-0.763188, 0.441481, 0.471847], + ], + [ + [-0.980048, -0.0115108, -0.198437], + [-0.168991, 0.573853, 0.801335], + [0.104649, 0.818877, -0.564348], + ], + ), + ( + [ + [0.984844, -0.0288271, 0.17103], + [0.0260588, 0.999491, 0.0184094], + [-0.171474, -0.0136736, 0.985094], + ], + [ + [-0.984637, -0.00367691, -0.174577], + [-0.00649229, 0.999858, 0.0155587], + [0.174495, 0.0164532, -0.984521], + ], + ), + ( + [ + [0.99985, 0.000720773, -0.0172841], + [-0.00075051, 0.999998, -0.0017141], + [0.0172828, 0.00172682, 0.999849], + ], + [ + [-0.999926, -0.00413456, 0.0114842], + [-0.00368343, 0.999231, 0.0390359], + [-0.0116368, 0.0389908, -0.999172], + ], + ), + ( + [ + [0.997976, 0.0603523, -0.0200139], + [-0.0558618, 0.982551, 0.177404], + [0.0303714, -0.175927, 0.983935], + ], + [ + [-0.996867, -0.0790953, 0.00217996], + [0.0318842, -0.376338, 0.925935], + [-0.0724181, 0.923101, 0.37768], + ], + ), + ( + [ + [0.94678, -0.00538407, -0.321837], + [0.00249113, 0.999953, -0.0094], + [0.321872, 0.008098, 0.946749], + ], + [ + [-0.945694, 0.0255694, 0.324053], + [0.0240377, 0.999673, -0.00872898], + [-0.32417, -0.000465377, -0.945999], + ], + ), + ( + [ + [0.846059, 0.435245, -0.307807], + [0.318073, 0.0512036, 0.946682], + [0.4278, -0.898855, -0.0951187], + ], + [ + [-0.217213, -0.0389124, 0.975352], + [0.742195, 0.642416, 0.190918], + [-0.634011, 0.765368, -0.11066], + ], + ), + ( + [ + [0.914988, -0.0538229, -0.399875], + [-0.0459455, 0.970717, -0.23579], + [0.400857, 0.234117, 0.885722], + ], + [ + [-0.919706, 0.00194642, 0.392606], + [0.105539, 0.964406, 0.242451], + [-0.378159, 0.264418, -0.887176], + ], + ), + ( + [ + [0.970915, -0.183858, 0.153365], + [0.209801, 0.96196, -0.174974], + [-0.115361, 0.202061, 0.972555], + ], + [ + [-0.975509, 0.21077, -0.0629391], + [-0.218082, -0.964089, 0.151576], + [-0.0287314, 0.161588, 0.986441], + ], + ), + ( + [ + [0.99369, -0.00515149, -0.112044], + [0.00366664, 0.999903, -0.0134545], + [0.112102, 0.0129588, 0.993612], + ], + [ + [-0.99406, 0.00631892, 0.108668], + [0.00878985, 0.999713, 0.022273], + [-0.108496, 0.0230956, -0.99383], + ], + ), + ( + [ + [0.995917, 0.0137529, 0.089215], + [-0.0145079, 0.999864, 0.00781912], + [-0.0890954, -0.00908151, 0.995982], + ], + [ + [-0.996188, 0.012059, -0.0864113], + [0.0126654, 0.999899, -0.00647346], + [0.0863245, -0.00754306, -0.99624], + ], + ), + ( + [ + [0.84563, -0.0032436, -0.533759], + [0.0040093, 0.999992, 0.000275049], + [0.533754, -0.00237259, 0.845636], + ], + [ + [-0.849818, -0.00755214, 0.527023], + [-0.00734806, 0.99997, 0.00248074], + [-0.527026, -0.00176415, -0.849848], + ], + ), + ( + [ + [0.736067, -0.212675, -0.642631], + [-0.447028, 0.560168, -0.697408], + [0.508303, 0.800613, 0.31725], + ], + [ + [-0.684029, 0.0061039, 0.729431], + [0.0260275, 0.999532, 0.0160434], + [-0.728992, 0.0299595, -0.683868], + ], + ), + ( + [ + [0.993949, 0.00461705, -0.109742], + [-0.00653155, 0.999833, -0.0170925], + [0.109644, 0.0177058, 0.993813], + ], + [ + [-0.994446, 0.0218439, 0.102965], + [0.0227578, 0.999711, 0.00770966], + [-0.102767, 0.0100102, -0.994656], + ], + ), + ( + [ + [0.996005, -0.0103388, 0.0886959], + [-0.0291635, 0.901147, 0.432531], + [-0.0843999, -0.43339, 0.897246], + ], + [ + [-0.999947, 0.00833193, -0.00598923], + [-0.0101526, -0.887864, 0.459993], + [-0.00148526, 0.46003, 0.887902], + ], + ), + ( + [ + [0.981518, 0.0114609, 0.191025], + [-0.0104683, 0.999926, -0.00620422], + [-0.191082, 0.00408984, 0.981565], + ], + [ + [-0.979556, 0.000134379, -0.201176], + [-0.00817302, 0.999148, 0.0404628], + [0.20101, 0.0412799, -0.97872], + ], + ), + ( + [ + [0.997665, -0.0372296, -0.0572574], + [0.0379027, 0.999224, 0.0107148], + [0.0568141, -0.01286, 0.998302], + ], + [ + [-0.997794, 0.00389749, 0.0662921], + [0.00639122, 0.999278, 0.0374446], + [-0.0660983, 0.0377856, -0.997099], + ], + ), + ( + [ + [0.981618, -0.0105643, -0.190564], + [0.00329498, 0.999256, -0.0384229], + [0.190828, 0.0370887, 0.980923], + ], + [ + [-0.981673, -0.000810695, 0.190576], + [0.00398375, 0.999685, 0.0247729], + [-0.190536, 0.0250779, -0.981361], + ], + ), + ( + [ + [-0.544941, -0.812151, -0.208446], + [0.812337, -0.449791, -0.37121], + [0.207722, -0.371617, 0.90485], + ], + [ + [-0.121327, -0.000366672, -0.992614], + [-0.955208, 0.271977, 0.116655], + [0.269926, 0.962303, -0.0333484], + ], + ), + ( + [ + [0.637701, -0.219537, 0.738336], + [0.735715, 0.457522, -0.499397], + [-0.228168, 0.861671, 0.453279], + ], + [ + [-0.741797, 0.0196167, -0.670339], + [-0.00209087, 0.9995, 0.0315629], + [0.670623, 0.0248149, -0.741385], + ], + ), + ( + [ + [0.99813, -0.0590625, -0.0157485], + [0.0589086, 0.998213, -0.0100649], + [0.0163148, 0.00911833, 0.999825], + ], + [ + [-0.99893, 0.0258783, -0.0383385], + [-0.0440455, -0.279068, 0.959261], + [0.014125, 0.959924, 0.279908], + ], + ), + ( + [ + [0.999558, 0.0028395, -0.0296019], + [-0.00492321, 0.997496, -0.0705578], + [0.0293274, 0.0706723, 0.997068], + ], + [ + [-0.999532, -0.0305627, -0.00231546], + [0.00957406, -0.38309, 0.923664], + [-0.0291167, 0.923206, 0.383202], + ], + ), + ( + [ + [0.99814, -0.0528437, -0.0303853], + [0.0590889, 0.96123, 0.269341], + [0.0149743, -0.270636, 0.962565], + ], + [ + [-0.999464, 0.00781117, 0.0318024], + [-0.000588355, 0.966696, -0.255928], + [-0.0327423, -0.255809, -0.966173], + ], + ), + ( + [ + [-0.936685, 0.234194, 0.260336], + [-0.233325, -0.97178, 0.034698], + [0.261116, -0.0282419, 0.964894], + ], + [ + [0.999511, 0.00582072, 0.0307461], + [0.0289012, 0.204922, -0.978352], + [-0.0119956, 0.978762, 0.204654], + ], + ), + ( + [ + [0.973616, -0.019218, -0.227384], + [0.0030011, 0.99744, -0.0714512], + [0.228175, 0.0688836, 0.97118], + ], + [ + [-0.974738, 0.0190271, 0.222547], + [0.0222378, 0.999682, 0.0119297], + [-0.222249, 0.0165771, -0.97485], + ], + ), + ( + [ + [0.997273, 0.0453471, -0.0582173], + [-0.0234007, 0.942529, 0.333303], + [0.0699858, -0.331032, 0.941021], + ], + [ + [-0.996269, -0.0613496, 0.0607196], + [-0.0100285, 0.780948, 0.624516], + [-0.0857328, 0.621576, -0.77865], + ], + ), + ( + [ + [0.999511, 0.0274482, -0.0149865], + [-0.0305945, 0.957511, -0.286769], + [0.00647846, 0.287087, 0.957883], + ], + [ + [-0.999443, -0.0260559, 0.0209038], + [0.0148505, 0.213942, 0.976734], + [-0.0299225, 0.976499, -0.213437], + ], + ), + ( + [ + [0.621123, 0.722893, 0.302708], + [-0.48353, 0.657448, -0.577894], + [-0.61677, 0.212574, 0.757896], + ], + [ + [-0.996888, -0.0217614, -0.0757776], + [-0.0783897, 0.376159, 0.923234], + [0.00841386, 0.926299, -0.376694], + ], + ), + ( + [ + [0.974426, 0.0128384, -0.224341], + [-0.0123842, 0.999917, 0.00343166], + [0.224367, -0.00056561, 0.974505], + ], + [ + [-0.973234, -0.00506667, 0.229763], + [-0.000498848, 0.999801, 0.0199346], + [-0.229818, 0.0192865, -0.973043], + ], + ), + ( + [ + [0.994721, -0.0881097, 0.0526082], + [0.0972904, 0.972774, -0.210345], + [-0.0326424, 0.214353, 0.976211], + ], + [ + [-0.994309, 0.0920529, -0.0536268], + [-0.105538, -0.782431, 0.613729], + [0.0145358, 0.615896, 0.787694], + ], + ), + ( + [ + [0.998677, -0.0372894, 0.0354002], + [0.0242326, 0.948589, 0.315583], + [-0.0453481, -0.314308, 0.948237], + ], + [ + [-0.999066, -0.00910724, -0.0422707], + [-0.024629, 0.923353, 0.383161], + [0.0355411, 0.383844, -0.922715], + ], + ), + ( + [ + [0.931525, 0.00831028, 0.363583], + [0.0192806, 0.997204, -0.0721909], + [-0.363167, 0.0742577, 0.92876], + ], + [ + [-0.930052, -0.00174384, -0.367425], + [-0.0268673, 0.997634, 0.0632737], + [0.366445, 0.0687194, -0.927899], + ], + ), + ( + [ + [-0.50483, -0.819216, 0.272087], + [0.775688, -0.568816, -0.273414], + [0.378753, 0.0730272, 0.922612], + ], + [ + [-0.981596, 0.00031926, 0.190974], + [0.00652401, 0.999471, 0.0318616], + [-0.190863, 0.0325211, -0.981079], + ], + ), + ( + [ + [0.990518, -0.00195099, -0.137368], + [-0.00164696, 0.999659, -0.0260735], + [0.137372, 0.0260526, 0.990177], + ], + [ + [-0.991078, 0.00934835, 0.132961], + [0.0106057, 0.999905, 0.00875176], + [-0.132866, 0.0100839, -0.991083], + ], + ), + ( + [ + [0.935049, -0.353081, 0.0318997], + [0.257018, 0.737114, 0.624984], + [-0.244184, -0.576192, 0.779985], + ], + [ + [-0.977342, -0.00167896, -0.211667], + [-0.0448634, 0.978894, 0.199386], + [0.206864, 0.204364, -0.956789], + ], + ), + ( + [ + [0.998464, 0.0501172, 0.0236119], + [-0.0498618, 0.998692, -0.0112844], + [-0.0241466, 0.0100898, 0.999658], + ], + [ + [-0.999931, -0.0037971, -0.0112195], + [-0.00640916, 0.970027, 0.242913], + [0.00996085, 0.242968, -0.969984], + ], + ), + ( + [ + [0.999893, -0.0108217, 0.00984537], + [0.011201, 0.999164, -0.0393194], + [-0.00941164, 0.0394255, 0.999178], + ], + [ + [-0.999886, 0.00730461, -0.0133396], + [-0.0118202, -0.925163, 0.379391], + [-0.00956982, 0.379504, 0.925142], + ], + ), + ( + [ + [0.990922, -0.086745, 0.102709], + [0.0847349, 0.99612, 0.0237834], + [-0.104373, -0.0148644, 0.994427], + ], + [ + [-0.994922, -0.00197458, -0.10064], + [-0.00242513, 0.999988, 0.00435525], + [0.10063, 0.00457739, -0.994914], + ], + ), + ( + [ + [0.999856, 0.00210734, -0.0168511], + [-0.00557165, 0.978053, -0.20828], + [0.0160424, 0.208344, 0.977924], + ], + [ + [-0.999698, 0.0048691, 0.0241226], + [-0.00154306, 0.965899, -0.258915], + [-0.0245606, -0.258874, -0.9656], + ], + ), + ( + [ + [0.992858, -0.0249864, -0.116659], + [0.0419872, 0.988447, 0.145634], + [0.111673, -0.149492, 0.982436], + ], + [ + [-0.992324, 0.0357741, 0.118384], + [-0.0419528, 0.803113, -0.594348], + [-0.116338, -0.594752, -0.795447], + ], + ), + ( + [ + [0.986821, -0.00531913, 0.161729], + [0.00797365, 0.999844, -0.0157688], + [-0.16162, 0.0168505, 0.986709], + ], + [ + [-0.985867, 0.0119402, -0.167109], + [0.0141227, 0.99983, -0.0118784], + [0.166939, -0.0140704, -0.985868], + ], + ), + ( + [ + [0.999693, -0.0158939, -0.0190113], + [0.0103599, 0.96501, -0.262007], + [0.0225104, 0.261729, 0.964879], + ], + [ + [-0.999344, -0.0314781, -0.0180051], + [-0.0250895, 0.241673, 0.970034], + [-0.0261833, 0.969847, -0.242305], + ], + ), + ( + [ + [0.977445, 0.0293661, 0.209138], + [-0.0723687, 0.976903, 0.201057], + [-0.198403, -0.211657, 0.956994], + ], + [ + [-0.976437, 0.00895131, -0.215624], + [0.0552894, 0.976169, -0.20985], + [0.208607, -0.216827, -0.953663], + ], + ), + ( + [ + [0.994593, 0.0974797, -0.0358119], + [-0.0822288, 0.949838, 0.301737], + [0.0634288, -0.297161, 0.952718], + ], + [ + [-0.994192, -0.10746, -0.00604622], + [0.078812, -0.7651, 0.639071], + [-0.0733003, 0.634882, 0.769124], + ], + ), + ( + [ + [0.365674, 0.282077, -0.88697], + [-0.609177, 0.793033, 0.00105565], + [0.703694, 0.539936, 0.461826], + ], + [ + [-0.469534, 0.0109062, 0.882848], + [0.0060785, 0.99994, -0.00911984], + [-0.882894, 0.00108445, -0.469572], + ], + ), + ( + [ + [0.999956, 0.00903085, 0.0025358], + [-0.00862738, 0.991574, -0.129252], + [-0.00368169, 0.129224, 0.991609], + ], + [ + [-0.999976, 0.00322491, -0.00637541], + [0.00379751, 0.995755, -0.0919687], + [0.00605176, -0.0919907, -0.995743], + ], + ), + ( + [ + [0.999982, -0.00398882, -0.00441072], + [0.00411881, 0.999545, 0.0298655], + [0.00428959, -0.0298832, 0.999544], + ], + [ + [-0.999931, -0.00315547, -0.0114491], + [0.00300966, -0.999914, 0.0128304], + [-0.0114875, 0.012796, 0.999853], + ], + ), + ( + [ + [0.996613, 0.0781452, -0.0256245], + [-0.0610516, 0.91178, 0.406116], + [0.0550999, -0.403175, 0.913462], + ], + [ + [-0.996368, -0.084671, 0.00909851], + [0.0540149, -0.545774, 0.83619], + [-0.0658352, 0.833644, 0.548365], + ], + ), + ( + [ + [0.961059, 0.139318, 0.238654], + [-0.117488, 0.987672, -0.103448], + [-0.250124, 0.0713812, 0.965579], + ], + [ + [-0.973397, 0.00782581, -0.228998], + [-0.0621109, 0.952986, 0.296581], + [0.220553, 0.302913, -0.927147], + ], + ), + ( + [ + [0.156415, -0.982138, 0.104589], + [-0.568896, -0.176149, -0.803323], + [0.807398, 0.0661518, -0.586287], + ], + [ + [-0.992155, 0.0934304, -0.0830664], + [-0.121171, -0.555137, 0.822887], + [0.0307694, 0.826496, 0.562102], + ], + ), + ( + [ + [0.997973, 0.0130328, -0.0622976], + [-0.011111, 0.999455, 0.0310968], + [0.0626689, -0.0303416, 0.997573], + ], + [ + [-0.997391, -0.00094697, 0.0722014], + [-0.00271076, 0.9997, -0.024334], + [-0.0721567, -0.024466, -0.997094], + ], + ), + ( + [ + [0.913504, -0.0125928, -0.406634], + [-0.108363, 0.95588, -0.27304], + [0.392132, 0.293487, 0.871836], + ], + [ + [-0.909813, 0.0115348, 0.414861], + [0.128636, 0.958223, 0.255464], + [-0.394582, 0.28579, -0.873287], + ], + ), + ( + [ + [0.932595, -0.0693644, 0.354197], + [0.0984415, 0.993036, -0.0647231], + [-0.347241, 0.0952281, 0.932928], + ], + [ + [-0.930498, 0.00578599, -0.366252], + [-0.106202, 0.952666, 0.284867], + [0.350564, 0.303964, -0.885839], + ], + ), + ( + [ + [0.995668, -0.00475737, 0.0928567], + [0.00890154, 0.99898, -0.0442667], + [-0.0925514, 0.0449015, 0.994695], + ], + [ + [-0.996077, -0.0107986, -0.0878355], + [0.00749423, 0.978669, -0.205305], + [0.0881789, -0.205158, -0.974749], + ], + ), + ( + [ + [0.99948, 0.0321999, 0.00146151], + [-0.0321302, 0.998886, -0.0345513], + [-0.00257243, 0.0344864, 0.999402], + ], + [ + [-0.999953, 0.00726142, -0.0065326], + [0.00488529, 0.950962, 0.30927], + [0.00845801, 0.309223, -0.950953], + ], + ), +] + + +class TestRot3(GtsamTestCase): + """Test selected Rot3 methods.""" + + def test_axisangle(self) -> None: + """Test .axisAngle() method.""" + # fmt: off + R = np.array( + [ + [ -0.999957, 0.00922903, 0.00203116], + [ 0.00926964, 0.999739, 0.0208927], + [ -0.0018374, 0.0209105, -0.999781] + ]) + # fmt: on + + # get back angle in radians + _, actual_angle = Rot3(R).axisAngle() + expected_angle = 3.1396582 + np.testing.assert_almost_equal(actual_angle, expected_angle, 1e-7) + + def test_axis_angle_stress_test(self) -> None: + """Test that .axisAngle() yields angles less than 180 degrees for specific inputs.""" + for (R1, R2) in R1_R2_pairs: + R1 = Rot3(np.array(R1)) + R2 = Rot3(np.array(R2)) + + i1Ri2 = R1.between(R2) + + axis, angle = i1Ri2.axisAngle() + angle_deg = np.rad2deg(angle) + assert angle_deg < 180 + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam/tests/test_lago.py b/python/gtsam/tests/test_lago.py new file mode 100644 index 000000000..8ed5dd943 --- /dev/null +++ b/python/gtsam/tests/test_lago.py @@ -0,0 +1,38 @@ +""" +GTSAM Copyright 2010, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) +See LICENSE for the license information + +Author: John Lambert (Python) +""" + +import unittest + +import numpy as np + +import gtsam +from gtsam import Point3, Pose2, PriorFactorPose2, Values + + +class TestLago(unittest.TestCase): + """Test selected LAGO methods.""" + + def test_initialize(self) -> None: + """Smokescreen to ensure LAGO can be imported and run on toy data stored in a g2o file.""" + g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") + + graph = gtsam.NonlinearFactorGraph() + graph, initial = gtsam.readG2o(g2oFile) + + # Add prior on the pose having index (key) = 0 + priorModel = gtsam.noiseModel.Diagonal.Variances(Point3(1e-6, 1e-6, 1e-8)) + graph.add(PriorFactorPose2(0, Pose2(), priorModel)) + + estimateLago: Values = gtsam.lago.initialize(graph) + assert isinstance(estimateLago, Values) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/testExpressionFactor.cpp b/tests/testExpressionFactor.cpp index 66dbed1eb..75425a0cd 100644 --- a/tests/testExpressionFactor.cpp +++ b/tests/testExpressionFactor.cpp @@ -606,7 +606,7 @@ Vector3 f(const Point2& a, const Vector3& b, OptionalJacobian<3, 2> H1, if (H1) *H1 << b.y(), b.z(), b.x(), 0, 0, 0; if (H2) *H2 = A; return A * b; -}; +} } TEST(ExpressionFactor, MultiplyWithInverseFunction) { diff --git a/tests/testLie.cpp b/tests/testLie.cpp index 0ef12198b..fe1173f22 100644 --- a/tests/testLie.cpp +++ b/tests/testLie.cpp @@ -129,6 +129,46 @@ TEST( testProduct, Logmap ) { EXPECT(assert_equal(numericH, actH, tol)); } +/* ************************************************************************* */ +Product interpolate_proxy(const Product& x, const Product& y, double t) { + return interpolate(x, y, t); +} + +TEST(Lie, Interpolate) { + Product x(Point2(1, 2), Pose2(3, 4, 5)); + Product y(Point2(6, 7), Pose2(8, 9, 0)); + + double t; + Matrix actH1, numericH1, actH2, numericH2; + + t = 0.0; + interpolate(x, y, t, actH1, actH2); + numericH1 = numericalDerivative31( + interpolate_proxy, x, y, t); + EXPECT(assert_equal(numericH1, actH1, tol)); + numericH2 = numericalDerivative32( + interpolate_proxy, x, y, t); + EXPECT(assert_equal(numericH2, actH2, tol)); + + t = 0.5; + interpolate(x, y, t, actH1, actH2); + numericH1 = numericalDerivative31( + interpolate_proxy, x, y, t); + EXPECT(assert_equal(numericH1, actH1, tol)); + numericH2 = numericalDerivative32( + interpolate_proxy, x, y, t); + EXPECT(assert_equal(numericH2, actH2, tol)); + + t = 1.0; + interpolate(x, y, t, actH1, actH2); + numericH1 = numericalDerivative31( + interpolate_proxy, x, y, t); + EXPECT(assert_equal(numericH1, actH1, tol)); + numericH2 = numericalDerivative32( + interpolate_proxy, x, y, t); + EXPECT(assert_equal(numericH2, actH2, tol)); +} + //****************************************************************************** int main() { TestResult tr; diff --git a/tests/testNonlinearFactor.cpp b/tests/testNonlinearFactor.cpp index 84bba850b..67a23355d 100644 --- a/tests/testNonlinearFactor.cpp +++ b/tests/testNonlinearFactor.cpp @@ -101,6 +101,82 @@ TEST( NonlinearFactor, NonlinearFactor ) DOUBLES_EQUAL(expected,actual,0.00000001); } +/* ************************************************************************* */ +TEST(NonlinearFactor, Weight) { + // create a values structure for the non linear factor graph + Values values; + + // Instantiate a concrete class version of a NoiseModelFactor + PriorFactor factor1(X(1), Point2(0, 0)); + values.insert(X(1), Point2(0.1, 0.1)); + + CHECK(assert_equal(1.0, factor1.weight(values))); + + // Factor with noise model + auto noise = noiseModel::Isotropic::Sigma(2, 0.2); + PriorFactor factor2(X(2), Point2(1, 1), noise); + values.insert(X(2), Point2(1.1, 1.1)); + + CHECK(assert_equal(1.0, factor2.weight(values))); + + Point2 estimate(3, 3), prior(1, 1); + double distance = (estimate - prior).norm(); + + auto gaussian = noiseModel::Isotropic::Sigma(2, 0.2); + + PriorFactor factor; + + // vector to store all the robust models in so we can test iteratively. + vector robust_models; + + // Fair noise model + auto fair = noiseModel::Robust::Create( + noiseModel::mEstimator::Fair::Create(1.3998), gaussian); + robust_models.push_back(fair); + + // Huber noise model + auto huber = noiseModel::Robust::Create( + noiseModel::mEstimator::Huber::Create(1.345), gaussian); + robust_models.push_back(huber); + + // Cauchy noise model + auto cauchy = noiseModel::Robust::Create( + noiseModel::mEstimator::Cauchy::Create(0.1), gaussian); + robust_models.push_back(cauchy); + + // Tukey noise model + auto tukey = noiseModel::Robust::Create( + noiseModel::mEstimator::Tukey::Create(4.6851), gaussian); + robust_models.push_back(tukey); + + // Welsch noise model + auto welsch = noiseModel::Robust::Create( + noiseModel::mEstimator::Welsch::Create(2.9846), gaussian); + robust_models.push_back(welsch); + + // Geman-McClure noise model + auto gm = noiseModel::Robust::Create( + noiseModel::mEstimator::GemanMcClure::Create(1.0), gaussian); + robust_models.push_back(gm); + + // DCS noise model + auto dcs = noiseModel::Robust::Create( + noiseModel::mEstimator::DCS::Create(1.0), gaussian); + robust_models.push_back(dcs); + + // L2WithDeadZone noise model + auto l2 = noiseModel::Robust::Create( + noiseModel::mEstimator::L2WithDeadZone::Create(1.0), gaussian); + robust_models.push_back(l2); + + for(auto&& model: robust_models) { + factor = PriorFactor(X(3), prior, model); + values.clear(); + values.insert(X(3), estimate); + CHECK(assert_equal(model->robust()->weight(distance), factor.weight(values))); + } +} + /* ************************************************************************* */ TEST( NonlinearFactor, linearize_f1 ) { diff --git a/tests/testSerializationSLAM.cpp b/tests/testSerializationSLAM.cpp index 2e99aff71..496122419 100644 --- a/tests/testSerializationSLAM.cpp +++ b/tests/testSerializationSLAM.cpp @@ -114,94 +114,94 @@ using symbol_shorthand::L; /* Create GUIDs for Noisemodels */ /* ************************************************************************* */ -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust"); +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust") -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber"); -BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey"); +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber") +BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey") -BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); -BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); +BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel") +BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal") /* Create GUIDs for geometry */ /* ************************************************************************* */ -GTSAM_VALUE_EXPORT(gtsam::Point2); -GTSAM_VALUE_EXPORT(gtsam::StereoPoint2); -GTSAM_VALUE_EXPORT(gtsam::Point3); -GTSAM_VALUE_EXPORT(gtsam::Rot2); -GTSAM_VALUE_EXPORT(gtsam::Rot3); -GTSAM_VALUE_EXPORT(gtsam::Pose2); -GTSAM_VALUE_EXPORT(gtsam::Pose3); -GTSAM_VALUE_EXPORT(gtsam::Cal3_S2); -GTSAM_VALUE_EXPORT(gtsam::Cal3DS2); -GTSAM_VALUE_EXPORT(gtsam::Cal3_S2Stereo); -GTSAM_VALUE_EXPORT(gtsam::CalibratedCamera); -GTSAM_VALUE_EXPORT(gtsam::PinholeCameraCal3_S2); -GTSAM_VALUE_EXPORT(gtsam::StereoCamera); +GTSAM_VALUE_EXPORT(gtsam::Point2) +GTSAM_VALUE_EXPORT(gtsam::StereoPoint2) +GTSAM_VALUE_EXPORT(gtsam::Point3) +GTSAM_VALUE_EXPORT(gtsam::Rot2) +GTSAM_VALUE_EXPORT(gtsam::Rot3) +GTSAM_VALUE_EXPORT(gtsam::Pose2) +GTSAM_VALUE_EXPORT(gtsam::Pose3) +GTSAM_VALUE_EXPORT(gtsam::Cal3_S2) +GTSAM_VALUE_EXPORT(gtsam::Cal3DS2) +GTSAM_VALUE_EXPORT(gtsam::Cal3_S2Stereo) +GTSAM_VALUE_EXPORT(gtsam::CalibratedCamera) +GTSAM_VALUE_EXPORT(gtsam::PinholeCameraCal3_S2) +GTSAM_VALUE_EXPORT(gtsam::StereoCamera) /* Create GUIDs for factors */ /* ************************************************************************* */ -BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor"); -BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor"); +BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor") +BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor") -BOOST_CLASS_EXPORT_GUID(PriorFactorPoint2, "gtsam::PriorFactorPoint2"); -BOOST_CLASS_EXPORT_GUID(PriorFactorStereoPoint2, "gtsam::PriorFactorStereoPoint2"); -BOOST_CLASS_EXPORT_GUID(PriorFactorPoint3, "gtsam::PriorFactorPoint3"); -BOOST_CLASS_EXPORT_GUID(PriorFactorRot2, "gtsam::PriorFactorRot2"); -BOOST_CLASS_EXPORT_GUID(PriorFactorRot3, "gtsam::PriorFactorRot3"); -BOOST_CLASS_EXPORT_GUID(PriorFactorPose2, "gtsam::PriorFactorPose2"); -BOOST_CLASS_EXPORT_GUID(PriorFactorPose3, "gtsam::PriorFactorPose3"); -BOOST_CLASS_EXPORT_GUID(PriorFactorCal3_S2, "gtsam::PriorFactorCal3_S2"); -BOOST_CLASS_EXPORT_GUID(PriorFactorCal3DS2, "gtsam::PriorFactorCal3DS2"); -BOOST_CLASS_EXPORT_GUID(PriorFactorCalibratedCamera, "gtsam::PriorFactorCalibratedCamera"); -BOOST_CLASS_EXPORT_GUID(PriorFactorStereoCamera, "gtsam::PriorFactorStereoCamera"); +BOOST_CLASS_EXPORT_GUID(PriorFactorPoint2, "gtsam::PriorFactorPoint2") +BOOST_CLASS_EXPORT_GUID(PriorFactorStereoPoint2, "gtsam::PriorFactorStereoPoint2") +BOOST_CLASS_EXPORT_GUID(PriorFactorPoint3, "gtsam::PriorFactorPoint3") +BOOST_CLASS_EXPORT_GUID(PriorFactorRot2, "gtsam::PriorFactorRot2") +BOOST_CLASS_EXPORT_GUID(PriorFactorRot3, "gtsam::PriorFactorRot3") +BOOST_CLASS_EXPORT_GUID(PriorFactorPose2, "gtsam::PriorFactorPose2") +BOOST_CLASS_EXPORT_GUID(PriorFactorPose3, "gtsam::PriorFactorPose3") +BOOST_CLASS_EXPORT_GUID(PriorFactorCal3_S2, "gtsam::PriorFactorCal3_S2") +BOOST_CLASS_EXPORT_GUID(PriorFactorCal3DS2, "gtsam::PriorFactorCal3DS2") +BOOST_CLASS_EXPORT_GUID(PriorFactorCalibratedCamera, "gtsam::PriorFactorCalibratedCamera") +BOOST_CLASS_EXPORT_GUID(PriorFactorStereoCamera, "gtsam::PriorFactorStereoCamera") -BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint2, "gtsam::BetweenFactorPoint2"); -BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint3, "gtsam::BetweenFactorPoint3"); -BOOST_CLASS_EXPORT_GUID(BetweenFactorRot2, "gtsam::BetweenFactorRot2"); -BOOST_CLASS_EXPORT_GUID(BetweenFactorRot3, "gtsam::BetweenFactorRot3"); -BOOST_CLASS_EXPORT_GUID(BetweenFactorPose2, "gtsam::BetweenFactorPose2"); -BOOST_CLASS_EXPORT_GUID(BetweenFactorPose3, "gtsam::BetweenFactorPose3"); +BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint2, "gtsam::BetweenFactorPoint2") +BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint3, "gtsam::BetweenFactorPoint3") +BOOST_CLASS_EXPORT_GUID(BetweenFactorRot2, "gtsam::BetweenFactorRot2") +BOOST_CLASS_EXPORT_GUID(BetweenFactorRot3, "gtsam::BetweenFactorRot3") +BOOST_CLASS_EXPORT_GUID(BetweenFactorPose2, "gtsam::BetweenFactorPose2") +BOOST_CLASS_EXPORT_GUID(BetweenFactorPose3, "gtsam::BetweenFactorPose3") -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint2, "gtsam::NonlinearEqualityPoint2"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoPoint2, "gtsam::NonlinearEqualityStereoPoint2"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint3, "gtsam::NonlinearEqualityPoint3"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot2, "gtsam::NonlinearEqualityRot2"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot3, "gtsam::NonlinearEqualityRot3"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose2, "gtsam::NonlinearEqualityPose2"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose3, "gtsam::NonlinearEqualityPose3"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3_S2, "gtsam::NonlinearEqualityCal3_S2"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3DS2, "gtsam::NonlinearEqualityCal3DS2"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqualityCalibratedCamera"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera"); +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint2, "gtsam::NonlinearEqualityPoint2") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoPoint2, "gtsam::NonlinearEqualityStereoPoint2") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint3, "gtsam::NonlinearEqualityPoint3") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot2, "gtsam::NonlinearEqualityRot2") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot3, "gtsam::NonlinearEqualityRot3") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose2, "gtsam::NonlinearEqualityPose2") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose3, "gtsam::NonlinearEqualityPose3") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3_S2, "gtsam::NonlinearEqualityCal3_S2") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3DS2, "gtsam::NonlinearEqualityCal3DS2") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqualityCalibratedCamera") +BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera") -BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D"); -BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D"); -BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2"); -BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3"); -BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint"); -BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2Point, "gtsam::RangeFactorPinholeCameraCal3_S2Point"); -BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCamera, "gtsam::RangeFactorCalibratedCamera"); -BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2, "gtsam::RangeFactorPinholeCameraCal3_S2"); +BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D") +BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D") +BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2") +BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3") +BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint") +BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2Point, "gtsam::RangeFactorPinholeCameraCal3_S2Point") +BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCamera, "gtsam::RangeFactorCalibratedCamera") +BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2, "gtsam::RangeFactorPinholeCameraCal3_S2") -BOOST_CLASS_EXPORT_GUID(BearingRangeFactor2D, "gtsam::BearingRangeFactor2D"); +BOOST_CLASS_EXPORT_GUID(BearingRangeFactor2D, "gtsam::BearingRangeFactor2D") -BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3_S2, "gtsam::GenericProjectionFactorCal3_S2"); -BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3DS2, "gtsam::GenericProjectionFactorCal3DS2"); +BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3_S2, "gtsam::GenericProjectionFactorCal3_S2") +BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3DS2, "gtsam::GenericProjectionFactorCal3DS2") -BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3_S2, "gtsam::GeneralSFMFactorCal3_S2"); -BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2"); +BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3_S2, "gtsam::GeneralSFMFactorCal3_S2") +BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2") -BOOST_CLASS_EXPORT_GUID(GeneralSFMFactor2Cal3_S2, "gtsam::GeneralSFMFactor2Cal3_S2"); +BOOST_CLASS_EXPORT_GUID(GeneralSFMFactor2Cal3_S2, "gtsam::GeneralSFMFactor2Cal3_S2") -BOOST_CLASS_EXPORT_GUID(GenericStereoFactor3D, "gtsam::GenericStereoFactor3D"); +BOOST_CLASS_EXPORT_GUID(GenericStereoFactor3D, "gtsam::GenericStereoFactor3D") /* ************************************************************************* */ diff --git a/tests/testSubgraphPreconditioner.cpp b/tests/testSubgraphPreconditioner.cpp index fb9f7a5a2..84ccc131a 100644 --- a/tests/testSubgraphPreconditioner.cpp +++ b/tests/testSubgraphPreconditioner.cpp @@ -200,7 +200,7 @@ TEST(SubgraphPreconditioner, system) { } /* ************************************************************************* */ -BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "JacobianFactor"); +BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "JacobianFactor") // Read from XML file static GaussianFactorGraph read(const string& name) { diff --git a/wrap/.github/workflows/macos-ci.yml b/wrap/.github/workflows/macos-ci.yml index 3910d28d8..8119a3acb 100644 --- a/wrap/.github/workflows/macos-ci.yml +++ b/wrap/.github/workflows/macos-ci.yml @@ -27,10 +27,12 @@ jobs: - name: Python Dependencies run: | + pip3 install -U pip setuptools pip3 install -r requirements.txt - name: Build and Test run: | + # Build cmake . cd tests # Use Pytest to run all the tests. diff --git a/wrap/cmake/PybindWrap.cmake b/wrap/cmake/PybindWrap.cmake index f341c2f98..2149c7195 100644 --- a/wrap/cmake/PybindWrap.cmake +++ b/wrap/cmake/PybindWrap.cmake @@ -192,9 +192,9 @@ function(install_python_files source_files dest_directory) endfunction() # ~~~ -# https://stackoverflow.com/questions/13959434/cmake-out-of-source-build-python-files +# Copy over the directory from source_folder to dest_foler # ~~~ -function(create_symlinks source_folder dest_folder) +function(copy_directory source_folder dest_folder) if(${source_folder} STREQUAL ${dest_folder}) return() endif() @@ -215,31 +215,13 @@ function(create_symlinks source_folder dest_folder) # Create REAL folder file(MAKE_DIRECTORY "${dest_folder}") - # Delete symlink if it exists + # Delete if it exists file(REMOVE "${dest_folder}/${path_file}") - # Get OS dependent path to use in `execute_process` - file(TO_NATIVE_PATH "${dest_folder}/${path_file}" link) + # Get OS dependent path to use in copy file(TO_NATIVE_PATH "${source_folder}/${path_file}" target) - # cmake-format: off - if(UNIX) - set(command ln -s ${target} ${link}) - else() - set(command cmd.exe /c mklink ${link} ${target}) - endif() - # cmake-format: on - - execute_process( - COMMAND ${command} - RESULT_VARIABLE result - ERROR_VARIABLE output) - - if(NOT ${result} EQUAL 0) - message( - FATAL_ERROR - "Could not create symbolic link for: ${target} --> ${output}") - endif() + file(COPY ${target} DESTINATION ${dest_folder}) endforeach(path_file) -endfunction(create_symlinks) +endfunction(copy_directory) diff --git a/wrap/gtwrap/interface_parser/classes.py b/wrap/gtwrap/interface_parser/classes.py index 841c963c2..54beb86c1 100644 --- a/wrap/gtwrap/interface_parser/classes.py +++ b/wrap/gtwrap/interface_parser/classes.py @@ -62,6 +62,10 @@ class Method: self.parent = parent + def to_cpp(self) -> str: + """Generate the C++ code for wrapping.""" + return self.name + def __repr__(self) -> str: return "Method: {} {} {}({}){}".format( self.template, @@ -84,7 +88,8 @@ class StaticMethod: ``` """ rule = ( - STATIC # + Optional(Template.rule("template")) # + + STATIC # + ReturnType.rule("return_type") # + IDENT("name") # + LPAREN # @@ -92,16 +97,18 @@ class StaticMethod: + RPAREN # + SEMI_COLON # BR ).setParseAction( - lambda t: StaticMethod(t.name, t.return_type, t.args_list)) + lambda t: StaticMethod(t.name, t.return_type, t.args_list, t.template)) def __init__(self, name: str, return_type: ReturnType, args: ArgumentList, + template: Union[Template, Any] = None, parent: Union["Class", Any] = ''): self.name = name self.return_type = return_type self.args = args + self.template = template self.parent = parent @@ -221,8 +228,8 @@ class Class: Rule for all the members within a class. """ rule = ZeroOrMore(Constructor.rule # - ^ StaticMethod.rule # ^ Method.rule # + ^ StaticMethod.rule # ^ Variable.rule # ^ Operator.rule # ^ Enum.rule # diff --git a/wrap/gtwrap/interface_parser/type.py b/wrap/gtwrap/interface_parser/type.py index 49315cc56..e94db4ff2 100644 --- a/wrap/gtwrap/interface_parser/type.py +++ b/wrap/gtwrap/interface_parser/type.py @@ -158,6 +158,8 @@ class Type: """ Parsed datatype, can be either a fundamental type or a custom datatype. E.g. void, double, size_t, Matrix. + Think of this as a high-level type which encodes the typename and other + characteristics of the type. The type can optionally be a raw pointer, shared pointer or reference. Can also be optionally qualified with a `const`, e.g. `const int`. @@ -240,6 +242,9 @@ class Type: or self.typename.name in ["Matrix", "Vector"]) else "", typename=typename)) + def get_typename(self): + """Convenience method to get the typename of this type.""" + return self.typename.name class TemplatedType: """ diff --git a/wrap/gtwrap/matlab_wrapper/mixins.py b/wrap/gtwrap/matlab_wrapper/mixins.py index 217801ff3..2d7c75b39 100644 --- a/wrap/gtwrap/matlab_wrapper/mixins.py +++ b/wrap/gtwrap/matlab_wrapper/mixins.py @@ -112,7 +112,7 @@ class FormatMixin: if separator == "::": # C++ templates = [] - for idx in range(len(type_name.instantiations)): + for idx, _ in enumerate(type_name.instantiations): template = '{}'.format( self._format_type_name(type_name.instantiations[idx], include_namespace=include_namespace, @@ -124,7 +124,7 @@ class FormatMixin: formatted_type_name += '<{}>'.format(','.join(templates)) else: - for idx in range(len(type_name.instantiations)): + for idx, _ in enumerate(type_name.instantiations): formatted_type_name += '{}'.format( self._format_type_name(type_name.instantiations[idx], separator=separator, diff --git a/wrap/gtwrap/pybind_wrapper.py b/wrap/gtwrap/pybind_wrapper.py index 40571263a..809c69b56 100755 --- a/wrap/gtwrap/pybind_wrapper.py +++ b/wrap/gtwrap/pybind_wrapper.py @@ -119,8 +119,11 @@ class PybindWrapper: if py_method in self.python_keywords: py_method = py_method + "_" - is_method = isinstance(method, instantiator.InstantiatedMethod) - is_static = isinstance(method, parser.StaticMethod) + is_method = isinstance( + method, (parser.Method, instantiator.InstantiatedMethod)) + is_static = isinstance( + method, + (parser.StaticMethod, instantiator.InstantiatedStaticMethod)) return_void = method.return_type.is_void() args_names = method.args.names() py_args_names = self._py_args_names(method.args) diff --git a/wrap/gtwrap/template_instantiator.py b/wrap/gtwrap/template_instantiator.py deleted file mode 100644 index b4d79655d..000000000 --- a/wrap/gtwrap/template_instantiator.py +++ /dev/null @@ -1,714 +0,0 @@ -"""Code to help instantiate templated classes, methods and functions.""" - -# pylint: disable=too-many-arguments, too-many-instance-attributes, no-self-use, no-else-return, too-many-arguments, unused-format-string-argument, unused-variable - -import itertools -from copy import deepcopy -from typing import Any, Iterable, List, Sequence - -import gtwrap.interface_parser as parser - - -def is_scoped_template(template_typenames, str_arg_typename): - """ - Check if the template given by `str_arg_typename` is a scoped template, - and if so, return what template and index matches the scoped template correctly. - """ - for idx, template in enumerate(template_typenames): - if template in str_arg_typename.split("::"): - return template, idx - return False, -1 - - -def instantiate_type(ctype: parser.Type, - template_typenames: List[str], - instantiations: List[parser.Typename], - cpp_typename: parser.Typename, - instantiated_class=None): - """ - Instantiate template typename for @p ctype. - - Args: - instiated_class (InstantiatedClass): - - @return If ctype's name is in the @p template_typenames, return the - corresponding type to replace in @p instantiations. - If ctype name is `This`, return the new typename @p `cpp_typename`. - Otherwise, return the original ctype. - """ - # make a deep copy so that there is no overwriting of original template params - ctype = deepcopy(ctype) - - # Check if the return type has template parameters - if ctype.typename.instantiations: - for idx, instantiation in enumerate(ctype.typename.instantiations): - if instantiation.name in template_typenames: - template_idx = template_typenames.index(instantiation.name) - ctype.typename.instantiations[ - idx] = instantiations[ # type: ignore - template_idx] - - return ctype - - str_arg_typename = str(ctype.typename) - - # Check if template is a scoped template e.g. T::Value where T is the template - scoped_template, scoped_idx = is_scoped_template(template_typenames, - str_arg_typename) - - # Instantiate templates which have enumerated instantiations in the template. - # E.g. `template`. - - # Instantiate scoped templates, e.g. T::Value. - if scoped_template: - # Create a copy of the instantiation so we can modify it. - instantiation = deepcopy(instantiations[scoped_idx]) - # Replace the part of the template with the instantiation - instantiation.name = str_arg_typename.replace(scoped_template, - instantiation.name) - return parser.Type( - typename=instantiation, - is_const=ctype.is_const, - is_shared_ptr=ctype.is_shared_ptr, - is_ptr=ctype.is_ptr, - is_ref=ctype.is_ref, - is_basic=ctype.is_basic, - ) - # Check for exact template match. - elif str_arg_typename in template_typenames: - idx = template_typenames.index(str_arg_typename) - return parser.Type( - typename=instantiations[idx], - is_const=ctype.is_const, - is_shared_ptr=ctype.is_shared_ptr, - is_ptr=ctype.is_ptr, - is_ref=ctype.is_ref, - is_basic=ctype.is_basic, - ) - - # If a method has the keyword `This`, we replace it with the (instantiated) class. - elif str_arg_typename == 'This': - # Check if the class is template instantiated - # so we can replace it with the instantiated version. - if instantiated_class: - name = instantiated_class.original.name - namespaces_name = instantiated_class.namespaces() - namespaces_name.append(name) - cpp_typename = parser.Typename( - namespaces_name, - instantiations=instantiated_class.instantiations) - - return parser.Type( - typename=cpp_typename, - is_const=ctype.is_const, - is_shared_ptr=ctype.is_shared_ptr, - is_ptr=ctype.is_ptr, - is_ref=ctype.is_ref, - is_basic=ctype.is_basic, - ) - - # Case when 'This' is present in the type namespace, e.g `This::Subclass`. - elif 'This' in str_arg_typename: - # Simply get the index of `This` in the namespace and replace it with the instantiated name. - namespace_idx = ctype.typename.namespaces.index('This') - ctype.typename.namespaces[namespace_idx] = cpp_typename.name - return ctype - - else: - return ctype - - -def instantiate_args_list(args_list, template_typenames, instantiations, - cpp_typename): - """ - Instantiate template typenames in an argument list. - Type with name `This` will be replaced by @p `cpp_typename`. - - @param[in] args_list A list of `parser.Argument` to instantiate. - @param[in] template_typenames List of template typenames to instantiate, - e.g. ['T', 'U', 'V']. - @param[in] instantiations List of specific types to instantiate, each - associated with each template typename. Each type is a parser.Typename, - including its name and full namespaces. - @param[in] cpp_typename Full-namespace cpp class name of this instantiation - to replace for arguments of type named `This`. - @return A new list of parser.Argument which types are replaced with their - instantiations. - """ - instantiated_args = [] - for arg in args_list: - new_type = instantiate_type(arg.ctype, template_typenames, - instantiations, cpp_typename) - instantiated_args.append( - parser.Argument(name=arg.name, ctype=new_type, - default=arg.default)) - return instantiated_args - - -def instantiate_return_type(return_type, - template_typenames, - instantiations, - cpp_typename, - instantiated_class=None): - """Instantiate the return type.""" - new_type1 = instantiate_type(return_type.type1, - template_typenames, - instantiations, - cpp_typename, - instantiated_class=instantiated_class) - if return_type.type2: - new_type2 = instantiate_type(return_type.type2, - template_typenames, - instantiations, - cpp_typename, - instantiated_class=instantiated_class) - else: - new_type2 = '' - return parser.ReturnType(new_type1, new_type2) - - -def instantiate_name(original_name, instantiations): - """ - Concatenate instantiated types with an @p original name to form a new - instantiated name. - TODO(duy): To avoid conflicts, we should include the instantiation's - namespaces, but I find that too verbose. - """ - instantiated_names = [] - for inst in instantiations: - # Ensure the first character of the type is capitalized - name = inst.instantiated_name() - # Using `capitalize` on the complete name causes other caps to be lower case - instantiated_names.append(name.replace(name[0], name[0].capitalize())) - - return "{}{}".format(original_name, "".join(instantiated_names)) - - -class InstantiatedGlobalFunction(parser.GlobalFunction): - """ - Instantiate global functions. - - E.g. - template - T add(const T& x, const T& y); - """ - def __init__(self, original, instantiations=(), new_name=''): - self.original = original - self.instantiations = instantiations - self.template = '' - self.parent = original.parent - - if not original.template: - self.name = original.name - self.return_type = original.return_type - self.args = original.args - else: - self.name = instantiate_name( - original.name, instantiations) if not new_name else new_name - self.return_type = instantiate_return_type( - original.return_type, - self.original.template.typenames, - self.instantiations, - # Keyword type name `This` should already be replaced in the - # previous class template instantiation round. - cpp_typename='', - ) - instantiated_args = instantiate_args_list( - original.args.list(), - self.original.template.typenames, - self.instantiations, - # Keyword type name `This` should already be replaced in the - # previous class template instantiation round. - cpp_typename='', - ) - self.args = parser.ArgumentList(instantiated_args) - - super().__init__(self.name, - self.return_type, - self.args, - self.template, - parent=self.parent) - - def to_cpp(self): - """Generate the C++ code for wrapping.""" - if self.original.template: - instantiated_names = [ - inst.instantiated_name() for inst in self.instantiations - ] - ret = "{}<{}>".format(self.original.name, - ",".join(instantiated_names)) - else: - ret = self.original.name - return ret - - def __repr__(self): - return "Instantiated {}".format( - super(InstantiatedGlobalFunction, self).__repr__()) - - -class InstantiatedMethod(parser.Method): - """ - Instantiate method with template parameters. - - E.g. - class A { - template - void func(X x, Y y); - } - """ - def __init__(self, - original: parser.Method, - instantiations: Iterable[parser.Typename] = ()): - self.original = original - self.instantiations = instantiations - self.template: Any = '' - self.is_const = original.is_const - self.parent = original.parent - - # Check for typenames if templated. - # This way, we can gracefully handle both templated and non-templated methods. - typenames: Sequence = self.original.template.typenames if self.original.template else [] - self.name = instantiate_name(original.name, self.instantiations) - self.return_type = instantiate_return_type( - original.return_type, - typenames, - self.instantiations, - # Keyword type name `This` should already be replaced in the - # previous class template instantiation round. - cpp_typename='', - ) - - instantiated_args = instantiate_args_list( - original.args.list(), - typenames, - self.instantiations, - # Keyword type name `This` should already be replaced in the - # previous class template instantiation round. - cpp_typename='', - ) - self.args = parser.ArgumentList(instantiated_args) - - super().__init__(self.template, - self.name, - self.return_type, - self.args, - self.is_const, - parent=self.parent) - - def to_cpp(self): - """Generate the C++ code for wrapping.""" - if self.original.template: - # to_cpp will handle all the namespacing and templating - instantiation_list = [x.to_cpp() for x in self.instantiations] - # now can simply combine the instantiations, separated by commas - ret = "{}<{}>".format(self.original.name, - ",".join(instantiation_list)) - else: - ret = self.original.name - return ret - - def __repr__(self): - return "Instantiated {}".format( - super(InstantiatedMethod, self).__repr__()) - - -class InstantiatedClass(parser.Class): - """ - Instantiate the class defined in the interface file. - """ - def __init__(self, original: parser.Class, instantiations=(), new_name=''): - """ - Template - Instantiations: [T1, U1] - """ - self.original = original - self.instantiations = instantiations - - self.template = None - self.is_virtual = original.is_virtual - self.parent_class = original.parent_class - self.parent = original.parent - - # If the class is templated, check if the number of provided instantiations - # match the number of templates, else it's only a partial instantiation which is bad. - if original.template: - assert len(original.template.typenames) == len( - instantiations), "Typenames and instantiations mismatch!" - - # Get the instantiated name of the class. E.g. FuncDouble - self.name = instantiate_name( - original.name, instantiations) if not new_name else new_name - - # Check for typenames if templated. - # By passing in typenames, we can gracefully handle both templated and non-templated classes - # This will allow the `This` keyword to be used in both templated and non-templated classes. - typenames = self.original.template.typenames if self.original.template else [] - - # Instantiate the constructors, static methods, properties, respectively. - self.ctors = self.instantiate_ctors(typenames) - self.static_methods = self.instantiate_static_methods(typenames) - self.properties = self.instantiate_properties(typenames) - - # Instantiate all operator overloads - self.operators = self.instantiate_operators(typenames) - - # Set enums - self.enums = original.enums - - # Instantiate all instance methods - instantiated_methods = \ - self.instantiate_class_templates_in_methods(typenames) - - # Second instantiation round to instantiate templated methods. - # This is done in case both the class and the method are templated. - self.methods = [] - for method in instantiated_methods: - if not method.template: - self.methods.append(InstantiatedMethod(method, ())) - else: - instantiations = [] - # Get all combinations of template parameters - for instantiations in itertools.product( - *method.template.instantiations): - self.methods.append( - InstantiatedMethod(method, instantiations)) - - super().__init__( - self.template, - self.is_virtual, - self.name, - [self.parent_class], - self.ctors, - self.methods, - self.static_methods, - self.properties, - self.operators, - self.enums, - parent=self.parent, - ) - - def __repr__(self): - return "{virtual}Class {cpp_class} : {parent_class}\n"\ - "{ctors}\n{static_methods}\n{methods}\n{operators}".format( - virtual="virtual " if self.is_virtual else '', - cpp_class=self.to_cpp(), - parent_class=self.parent, - ctors="\n".join([repr(ctor) for ctor in self.ctors]), - static_methods="\n".join([repr(m) - for m in self.static_methods]), - methods="\n".join([repr(m) for m in self.methods]), - operators="\n".join([repr(op) for op in self.operators]) - ) - - def instantiate_ctors(self, typenames): - """ - Instantiate the class constructors. - - Args: - typenames: List of template types to instantiate. - - Return: List of constructors instantiated with provided template args. - """ - instantiated_ctors = [] - - def instantiate(instantiated_ctors, ctor, typenames, instantiations): - instantiated_args = instantiate_args_list( - ctor.args.list(), - typenames, - instantiations, - self.cpp_typename(), - ) - instantiated_ctors.append( - parser.Constructor( - name=self.name, - args=parser.ArgumentList(instantiated_args), - template=self.original.template, - parent=self, - )) - return instantiated_ctors - - for ctor in self.original.ctors: - # Add constructor templates to the typenames and instantiations - if isinstance(ctor.template, parser.template.Template): - typenames.extend(ctor.template.typenames) - - # Get all combinations of template args - for instantiations in itertools.product( - *ctor.template.instantiations): - instantiations = self.instantiations + list(instantiations) - - instantiated_ctors = instantiate( - instantiated_ctors, - ctor, - typenames=typenames, - instantiations=instantiations) - - else: - # If no constructor level templates, just use the class templates - instantiated_ctors = instantiate( - instantiated_ctors, - ctor, - typenames=typenames, - instantiations=self.instantiations) - - return instantiated_ctors - - def instantiate_static_methods(self, typenames): - """ - Instantiate static methods in the class. - - Args: - typenames: List of template types to instantiate. - - Return: List of static methods instantiated with provided template args. - """ - instantiated_static_methods = [] - for static_method in self.original.static_methods: - instantiated_args = instantiate_args_list( - static_method.args.list(), typenames, self.instantiations, - self.cpp_typename()) - instantiated_static_methods.append( - parser.StaticMethod( - name=static_method.name, - return_type=instantiate_return_type( - static_method.return_type, - typenames, - self.instantiations, - self.cpp_typename(), - instantiated_class=self), - args=parser.ArgumentList(instantiated_args), - parent=self, - )) - return instantiated_static_methods - - def instantiate_class_templates_in_methods(self, typenames): - """ - This function only instantiates the class-level templates in the methods. - Template methods are instantiated in InstantiatedMethod in the second - round. - - E.g. - ``` - template - class Greeter{ - void sayHello(T& name); - }; - - Args: - typenames: List of template types to instantiate. - - Return: List of methods instantiated with provided template args on the class. - """ - class_instantiated_methods = [] - for method in self.original.methods: - instantiated_args = instantiate_args_list( - method.args.list(), - typenames, - self.instantiations, - self.cpp_typename(), - ) - class_instantiated_methods.append( - parser.Method( - template=method.template, - name=method.name, - return_type=instantiate_return_type( - method.return_type, - typenames, - self.instantiations, - self.cpp_typename(), - ), - args=parser.ArgumentList(instantiated_args), - is_const=method.is_const, - parent=self, - )) - return class_instantiated_methods - - def instantiate_operators(self, typenames): - """ - Instantiate the class-level template in the operator overload. - - Args: - typenames: List of template types to instantiate. - - Return: List of methods instantiated with provided template args on the class. - """ - instantiated_operators = [] - for operator in self.original.operators: - instantiated_args = instantiate_args_list( - operator.args.list(), - typenames, - self.instantiations, - self.cpp_typename(), - ) - instantiated_operators.append( - parser.Operator( - name=operator.name, - operator=operator.operator, - return_type=instantiate_return_type( - operator.return_type, - typenames, - self.instantiations, - self.cpp_typename(), - ), - args=parser.ArgumentList(instantiated_args), - is_const=operator.is_const, - parent=self, - )) - return instantiated_operators - - def instantiate_properties(self, typenames): - """ - Instantiate the class properties. - - Args: - typenames: List of template types to instantiate. - - Return: List of properties instantiated with provided template args. - """ - instantiated_properties = instantiate_args_list( - self.original.properties, - typenames, - self.instantiations, - self.cpp_typename(), - ) - return instantiated_properties - - def cpp_typename(self): - """ - Return a parser.Typename including namespaces and cpp name of this - class. - """ - if self.original.template: - name = "{}<{}>".format( - self.original.name, - ", ".join([inst.to_cpp() for inst in self.instantiations])) - else: - name = self.original.name - namespaces_name = self.namespaces() - namespaces_name.append(name) - return parser.Typename(namespaces_name) - - def to_cpp(self): - """Generate the C++ code for wrapping.""" - return self.cpp_typename().to_cpp() - - -class InstantiatedDeclaration(parser.ForwardDeclaration): - """ - Instantiate typedefs of forward declarations. - This is useful when we wish to typedef a templated class - which is not defined in the current project. - - E.g. - class FactorFromAnotherMother; - - typedef FactorFromAnotherMother FactorWeCanUse; - """ - def __init__(self, original, instantiations=(), new_name=''): - super().__init__(original.typename, - original.parent_type, - original.is_virtual, - parent=original.parent) - - self.original = original - self.instantiations = instantiations - self.parent = original.parent - - self.name = instantiate_name( - original.name, instantiations) if not new_name else new_name - - def to_cpp(self): - """Generate the C++ code for wrapping.""" - instantiated_names = [ - inst.qualified_name() for inst in self.instantiations - ] - name = "{}<{}>".format(self.original.name, - ",".join(instantiated_names)) - namespaces_name = self.namespaces() - namespaces_name.append(name) - # Leverage Typename to generate the fully qualified C++ name - return parser.Typename(namespaces_name).to_cpp() - - def __repr__(self): - return "Instantiated {}".format( - super(InstantiatedDeclaration, self).__repr__()) - - -def instantiate_namespace(namespace): - """ - Instantiate the classes and other elements in the `namespace` content and - assign it back to the namespace content attribute. - - @param[in/out] namespace The namespace whose content will be replaced with - the instantiated content. - """ - instantiated_content = [] - typedef_content = [] - - for element in namespace.content: - if isinstance(element, parser.Class): - original_class = element - if not original_class.template: - instantiated_content.append( - InstantiatedClass(original_class, [])) - else: - # This case is for when the templates have enumerated instantiations. - - # Use itertools to get all possible combinations of instantiations - # Works even if one template does not have an instantiation list - for instantiations in itertools.product( - *original_class.template.instantiations): - instantiated_content.append( - InstantiatedClass(original_class, - list(instantiations))) - - elif isinstance(element, parser.GlobalFunction): - original_func = element - if not original_func.template: - instantiated_content.append( - InstantiatedGlobalFunction(original_func, [])) - else: - # Use itertools to get all possible combinations of instantiations - # Works even if one template does not have an instantiation list - for instantiations in itertools.product( - *original_func.template.instantiations): - instantiated_content.append( - InstantiatedGlobalFunction(original_func, - list(instantiations))) - - elif isinstance(element, parser.TypedefTemplateInstantiation): - # This is for the case where `typedef` statements are used - # to specify the template parameters. - typedef_inst = element - top_level = namespace.top_level() - original_element = top_level.find_class_or_function( - typedef_inst.typename) - - # Check if element is a typedef'd class, function or - # forward declaration from another project. - if isinstance(original_element, parser.Class): - typedef_content.append( - InstantiatedClass(original_element, - typedef_inst.typename.instantiations, - typedef_inst.new_name)) - elif isinstance(original_element, parser.GlobalFunction): - typedef_content.append( - InstantiatedGlobalFunction( - original_element, typedef_inst.typename.instantiations, - typedef_inst.new_name)) - elif isinstance(original_element, parser.ForwardDeclaration): - typedef_content.append( - InstantiatedDeclaration( - original_element, typedef_inst.typename.instantiations, - typedef_inst.new_name)) - - elif isinstance(element, parser.Namespace): - element = instantiate_namespace(element) - instantiated_content.append(element) - else: - instantiated_content.append(element) - - instantiated_content.extend(typedef_content) - namespace.content = instantiated_content - - return namespace diff --git a/wrap/gtwrap/template_instantiator/__init__.py b/wrap/gtwrap/template_instantiator/__init__.py new file mode 100644 index 000000000..6a30bb3c3 --- /dev/null +++ b/wrap/gtwrap/template_instantiator/__init__.py @@ -0,0 +1,14 @@ +"""Code to help instantiate templated classes, methods and functions.""" + +# pylint: disable=too-many-arguments, too-many-instance-attributes, no-self-use, no-else-return, too-many-arguments, unused-format-string-argument, unused-variable. unused-argument, too-many-branches + +from typing import Iterable, Sequence, Union + +import gtwrap.interface_parser as parser +from gtwrap.template_instantiator.classes import * +from gtwrap.template_instantiator.constructor import * +from gtwrap.template_instantiator.declaration import * +from gtwrap.template_instantiator.function import * +from gtwrap.template_instantiator.helpers import * +from gtwrap.template_instantiator.method import * +from gtwrap.template_instantiator.namespace import * diff --git a/wrap/gtwrap/template_instantiator/classes.py b/wrap/gtwrap/template_instantiator/classes.py new file mode 100644 index 000000000..af366f80f --- /dev/null +++ b/wrap/gtwrap/template_instantiator/classes.py @@ -0,0 +1,206 @@ +"""Instantiate a class and its members.""" + +import gtwrap.interface_parser as parser +from gtwrap.template_instantiator.constructor import InstantiatedConstructor +from gtwrap.template_instantiator.helpers import (InstantiationHelper, + instantiate_args_list, + instantiate_name, + instantiate_return_type) +from gtwrap.template_instantiator.method import (InstantiatedMethod, + InstantiatedStaticMethod) + + +class InstantiatedClass(parser.Class): + """ + Instantiate the class defined in the interface file. + """ + def __init__(self, original: parser.Class, instantiations=(), new_name=''): + """ + Template + Instantiations: [T1, U1] + """ + self.original = original + self.instantiations = instantiations + + self.template = None + self.is_virtual = original.is_virtual + self.parent_class = original.parent_class + self.parent = original.parent + + # If the class is templated, check if the number of provided instantiations + # match the number of templates, else it's only a partial instantiation which is bad. + if original.template: + assert len(original.template.typenames) == len( + instantiations), "Typenames and instantiations mismatch!" + + # Get the instantiated name of the class. E.g. FuncDouble + self.name = instantiate_name( + original.name, instantiations) if not new_name else new_name + + # Check for typenames if templated. + # By passing in typenames, we can gracefully handle both templated and non-templated classes + # This will allow the `This` keyword to be used in both templated and non-templated classes. + typenames = self.original.template.typenames if self.original.template else [] + + # Instantiate the constructors, static methods, properties, respectively. + self.ctors = self.instantiate_ctors(typenames) + self.static_methods = self.instantiate_static_methods(typenames) + self.properties = self.instantiate_properties(typenames) + + # Instantiate all operator overloads + self.operators = self.instantiate_operators(typenames) + + # Set enums + self.enums = original.enums + + # Instantiate all instance methods + self.methods = self.instantiate_methods(typenames) + + super().__init__( + self.template, + self.is_virtual, + self.name, + [self.parent_class], + self.ctors, + self.methods, + self.static_methods, + self.properties, + self.operators, + self.enums, + parent=self.parent, + ) + + def __repr__(self): + return "{virtual}Class {cpp_class} : {parent_class}\n"\ + "{ctors}\n{static_methods}\n{methods}\n{operators}".format( + virtual="virtual " if self.is_virtual else '', + cpp_class=self.to_cpp(), + parent_class=self.parent, + ctors="\n".join([repr(ctor) for ctor in self.ctors]), + static_methods="\n".join([repr(m) + for m in self.static_methods]), + methods="\n".join([repr(m) for m in self.methods]), + operators="\n".join([repr(op) for op in self.operators]) + ) + + def instantiate_ctors(self, typenames): + """ + Instantiate the class constructors. + + Args: + typenames: List of template types to instantiate. + + Return: List of constructors instantiated with provided template args. + """ + + helper = InstantiationHelper( + instantiation_type=InstantiatedConstructor) + + instantiated_ctors = helper.multilevel_instantiation( + self.original.ctors, typenames, self) + + return instantiated_ctors + + def instantiate_static_methods(self, typenames): + """ + Instantiate static methods in the class. + + Args: + typenames: List of template types to instantiate. + + Return: List of static methods instantiated with provided template args. + """ + helper = InstantiationHelper( + instantiation_type=InstantiatedStaticMethod) + + instantiated_static_methods = helper.multilevel_instantiation( + self.original.static_methods, typenames, self) + + return instantiated_static_methods + + def instantiate_methods(self, typenames): + """ + Instantiate regular methods in the class. + + Args: + typenames: List of template types to instantiate. + + Return: List of methods instantiated with provided template args. + """ + instantiated_methods = [] + + helper = InstantiationHelper(instantiation_type=InstantiatedMethod) + + instantiated_methods = helper.multilevel_instantiation( + self.original.methods, typenames, self) + + return instantiated_methods + + def instantiate_operators(self, typenames): + """ + Instantiate the class-level template in the operator overload. + + Args: + typenames: List of template types to instantiate. + + Return: List of methods instantiated with provided template args on the class. + """ + instantiated_operators = [] + for operator in self.original.operators: + instantiated_args = instantiate_args_list( + operator.args.list(), + typenames, + self.instantiations, + self.cpp_typename(), + ) + instantiated_operators.append( + parser.Operator( + name=operator.name, + operator=operator.operator, + return_type=instantiate_return_type( + operator.return_type, + typenames, + self.instantiations, + self.cpp_typename(), + ), + args=parser.ArgumentList(instantiated_args), + is_const=operator.is_const, + parent=self, + )) + return instantiated_operators + + def instantiate_properties(self, typenames): + """ + Instantiate the class properties. + + Args: + typenames: List of template types to instantiate. + + Return: List of properties instantiated with provided template args. + """ + instantiated_properties = instantiate_args_list( + self.original.properties, + typenames, + self.instantiations, + self.cpp_typename(), + ) + return instantiated_properties + + def cpp_typename(self): + """ + Return a parser.Typename including namespaces and cpp name of this + class. + """ + if self.original.template: + name = "{}<{}>".format( + self.original.name, + ", ".join([inst.to_cpp() for inst in self.instantiations])) + else: + name = self.original.name + namespaces_name = self.namespaces() + namespaces_name.append(name) + return parser.Typename(namespaces_name) + + def to_cpp(self): + """Generate the C++ code for wrapping.""" + return self.cpp_typename().to_cpp() diff --git a/wrap/gtwrap/template_instantiator/constructor.py b/wrap/gtwrap/template_instantiator/constructor.py new file mode 100644 index 000000000..1ea7d22d5 --- /dev/null +++ b/wrap/gtwrap/template_instantiator/constructor.py @@ -0,0 +1,64 @@ +"""Class constructor instantiator.""" + +# pylint: disable=unused-argument + +from typing import Iterable, List + +import gtwrap.interface_parser as parser + + +class InstantiatedConstructor(parser.Constructor): + """ + Instantiate constructor with template parameters. + + E.g. + class A { + template + A(X x, Y y); + } + """ + def __init__(self, + original: parser.Constructor, + instantiations: Iterable[parser.Typename] = ()): + self.original = original + self.instantiations = instantiations + self.name = original.name + self.args = original.args + self.template = original.template + self.parent = original.parent + + super().__init__(self.name, + self.args, + self.template, + parent=self.parent) + + @classmethod + def construct(cls, original: parser.Constructor, typenames: List[str], + class_instantiations: List[parser.Typename], + method_instantiations: List[parser.Typename], + instantiated_args: List[parser.Argument], + parent: 'InstantiatedClass'): + """Class method to construct object as required by InstantiationHelper.""" + method = parser.Constructor( + name=parent.name, + args=parser.ArgumentList(instantiated_args), + template=original.template, + parent=parent, + ) + return InstantiatedConstructor(method, + instantiations=method_instantiations) + + def to_cpp(self): + """Generate the C++ code for wrapping.""" + if self.original.template: + # to_cpp will handle all the namespacing and templating + instantiation_list = [x.to_cpp() for x in self.instantiations] + # now can simply combine the instantiations, separated by commas + ret = "{}<{}>".format(self.original.name, + ",".join(instantiation_list)) + else: + ret = self.original.name + return ret + + def __repr__(self): + return "Instantiated {}".format(super().__repr__()) diff --git a/wrap/gtwrap/template_instantiator/declaration.py b/wrap/gtwrap/template_instantiator/declaration.py new file mode 100644 index 000000000..4fa6b75d8 --- /dev/null +++ b/wrap/gtwrap/template_instantiator/declaration.py @@ -0,0 +1,45 @@ +"""Instantiate a forward declaration.""" + +import gtwrap.interface_parser as parser +from gtwrap.template_instantiator.helpers import instantiate_name + + +class InstantiatedDeclaration(parser.ForwardDeclaration): + """ + Instantiate typedefs of forward declarations. + This is useful when we wish to typedef a templated class + which is not defined in the current project. + + E.g. + class FactorFromAnotherMother; + + typedef FactorFromAnotherMother FactorWeCanUse; + """ + def __init__(self, original, instantiations=(), new_name=''): + super().__init__(original.typename, + original.parent_type, + original.is_virtual, + parent=original.parent) + + self.original = original + self.instantiations = instantiations + self.parent = original.parent + + self.name = instantiate_name( + original.name, instantiations) if not new_name else new_name + + def to_cpp(self): + """Generate the C++ code for wrapping.""" + instantiated_names = [ + inst.qualified_name() for inst in self.instantiations + ] + name = "{}<{}>".format(self.original.name, + ",".join(instantiated_names)) + namespaces_name = self.namespaces() + namespaces_name.append(name) + # Leverage Typename to generate the fully qualified C++ name + return parser.Typename(namespaces_name).to_cpp() + + def __repr__(self): + return "Instantiated {}".format( + super(InstantiatedDeclaration, self).__repr__()) diff --git a/wrap/gtwrap/template_instantiator/function.py b/wrap/gtwrap/template_instantiator/function.py new file mode 100644 index 000000000..3ad5da3f4 --- /dev/null +++ b/wrap/gtwrap/template_instantiator/function.py @@ -0,0 +1,68 @@ +"""Instantiate global function.""" + +import gtwrap.interface_parser as parser +from gtwrap.template_instantiator.helpers import (instantiate_args_list, + instantiate_name, + instantiate_return_type) + + +class InstantiatedGlobalFunction(parser.GlobalFunction): + """ + Instantiate global functions. + + E.g. + template + T add(const T& x, const T& y); + """ + def __init__(self, original, instantiations=(), new_name=''): + self.original = original + self.instantiations = instantiations + self.template = '' + self.parent = original.parent + + if not original.template: + self.name = original.name + self.return_type = original.return_type + self.args = original.args + else: + self.name = instantiate_name( + original.name, instantiations) if not new_name else new_name + self.return_type = instantiate_return_type( + original.return_type, + self.original.template.typenames, + self.instantiations, + # Keyword type name `This` should already be replaced in the + # previous class template instantiation round. + cpp_typename='', + ) + instantiated_args = instantiate_args_list( + original.args.list(), + self.original.template.typenames, + self.instantiations, + # Keyword type name `This` should already be replaced in the + # previous class template instantiation round. + cpp_typename='', + ) + self.args = parser.ArgumentList(instantiated_args) + + super().__init__(self.name, + self.return_type, + self.args, + self.template, + parent=self.parent) + + def to_cpp(self): + """Generate the C++ code for wrapping.""" + if self.original.template: + instantiated_names = [ + inst.instantiated_name() for inst in self.instantiations + ] + ret = "{}<{}>".format(self.original.name, + ",".join(instantiated_names)) + else: + ret = self.original.name + return ret + + def __repr__(self): + return "Instantiated {}".format( + super(InstantiatedGlobalFunction, self).__repr__()) diff --git a/wrap/gtwrap/template_instantiator/helpers.py b/wrap/gtwrap/template_instantiator/helpers.py new file mode 100644 index 000000000..b55515dba --- /dev/null +++ b/wrap/gtwrap/template_instantiator/helpers.py @@ -0,0 +1,293 @@ +"""Various helpers for instantiation.""" + +import itertools +from copy import deepcopy +from typing import List, Sequence, Union + +import gtwrap.interface_parser as parser + +ClassMembers = Union[parser.Constructor, parser.Method, parser.StaticMethod, + parser.GlobalFunction, parser.Operator, parser.Variable, + parser.Enum] +InstantiatedMembers = Union['InstantiatedConstructor', 'InstantiatedMethod', + 'InstantiatedStaticMethod', + 'InstantiatedGlobalFunction'] + + +def is_scoped_template(template_typenames: Sequence[str], + str_arg_typename: str): + """ + Check if the template given by `str_arg_typename` is a scoped template e.g. T::Value, + and if so, return what template from `template_typenames` and + the corresponding index matches the scoped template correctly. + """ + for idx, template in enumerate(template_typenames): + if "::" in str_arg_typename and \ + template in str_arg_typename.split("::"): + return template, idx + return False, -1 + + +def instantiate_type( + ctype: parser.Type, + template_typenames: Sequence[str], + instantiations: Sequence[parser.Typename], + cpp_typename: parser.Typename, + instantiated_class: 'InstantiatedClass' = None) -> parser.Type: + """ + Instantiate template typename for `ctype`. + + Args: + ctype: The original argument type. + template_typenames: List of strings representing the templates. + instantiations: List of the instantiations of the templates in `template_typenames`. + cpp_typename: Full-namespace cpp class name of this instantiation + to replace for arguments of type named `This`. + instiated_class: The instantiated class which, if provided, + will be used for instantiating `This`. + + Returns: + If `ctype`'s name is in the `template_typenames`, return the + corresponding type to replace in `instantiations`. + If ctype name is `This`, return the new typename `cpp_typename`. + Otherwise, return the original ctype. + """ + # make a deep copy so that there is no overwriting of original template params + ctype = deepcopy(ctype) + + # Check if the return type has template parameters + if ctype.typename.instantiations: + for idx, instantiation in enumerate(ctype.typename.instantiations): + if instantiation.name in template_typenames: + template_idx = template_typenames.index(instantiation.name) + ctype.typename.instantiations[ + idx] = instantiations[ # type: ignore + template_idx] + + return ctype + + str_arg_typename = str(ctype.typename) + + # Check if template is a scoped template e.g. T::Value where T is the template + scoped_template, scoped_idx = is_scoped_template(template_typenames, + str_arg_typename) + + # Instantiate templates which have enumerated instantiations in the template. + # E.g. `template`. + + # Instantiate scoped templates, e.g. T::Value. + if scoped_template: + # Create a copy of the instantiation so we can modify it. + instantiation = deepcopy(instantiations[scoped_idx]) + # Replace the part of the template with the instantiation + instantiation.name = str_arg_typename.replace(scoped_template, + instantiation.name) + return parser.Type( + typename=instantiation, + is_const=ctype.is_const, + is_shared_ptr=ctype.is_shared_ptr, + is_ptr=ctype.is_ptr, + is_ref=ctype.is_ref, + is_basic=ctype.is_basic, + ) + # Check for exact template match. + elif str_arg_typename in template_typenames: + idx = template_typenames.index(str_arg_typename) + return parser.Type( + typename=instantiations[idx], + is_const=ctype.is_const, + is_shared_ptr=ctype.is_shared_ptr, + is_ptr=ctype.is_ptr, + is_ref=ctype.is_ref, + is_basic=ctype.is_basic, + ) + + # If a method has the keyword `This`, we replace it with the (instantiated) class. + elif str_arg_typename == 'This': + # Check if the class is template instantiated + # so we can replace it with the instantiated version. + if instantiated_class: + name = instantiated_class.original.name + namespaces_name = instantiated_class.namespaces() + namespaces_name.append(name) + cpp_typename = parser.Typename( + namespaces_name, + instantiations=instantiated_class.instantiations) + + return parser.Type( + typename=cpp_typename, + is_const=ctype.is_const, + is_shared_ptr=ctype.is_shared_ptr, + is_ptr=ctype.is_ptr, + is_ref=ctype.is_ref, + is_basic=ctype.is_basic, + ) + + # Case when 'This' is present in the type namespace, e.g `This::Subclass`. + elif 'This' in str_arg_typename: + # Simply get the index of `This` in the namespace and replace it with the instantiated name. + namespace_idx = ctype.typename.namespaces.index('This') + ctype.typename.namespaces[namespace_idx] = cpp_typename.name + return ctype + + else: + return ctype + + +def instantiate_args_list( + args_list: Sequence[parser.Argument], + template_typenames: Sequence[parser.template.Typename], + instantiations: Sequence, cpp_typename: parser.Typename): + """ + Instantiate template typenames in an argument list. + Type with name `This` will be replaced by @p `cpp_typename`. + + @param[in] args_list A list of `parser.Argument` to instantiate. + @param[in] template_typenames List of template typenames to instantiate, + e.g. ['T', 'U', 'V']. + @param[in] instantiations List of specific types to instantiate, each + associated with each template typename. Each type is a parser.Typename, + including its name and full namespaces. + @param[in] cpp_typename Full-namespace cpp class name of this instantiation + to replace for arguments of type named `This`. + @return A new list of parser.Argument which types are replaced with their + instantiations. + """ + instantiated_args = [] + for arg in args_list: + new_type = instantiate_type(arg.ctype, template_typenames, + instantiations, cpp_typename) + instantiated_args.append( + parser.Argument(name=arg.name, ctype=new_type, + default=arg.default)) + return instantiated_args + + +def instantiate_return_type( + return_type: parser.ReturnType, + template_typenames: Sequence[parser.template.Typename], + instantiations: Sequence[parser.Typename], + cpp_typename: parser.Typename, + instantiated_class: 'InstantiatedClass' = None): + """Instantiate the return type.""" + new_type1 = instantiate_type(return_type.type1, + template_typenames, + instantiations, + cpp_typename, + instantiated_class=instantiated_class) + if return_type.type2: + new_type2 = instantiate_type(return_type.type2, + template_typenames, + instantiations, + cpp_typename, + instantiated_class=instantiated_class) + else: + new_type2 = '' + return parser.ReturnType(new_type1, new_type2) + + +def instantiate_name(original_name: str, + instantiations: Sequence[parser.Typename]): + """ + Concatenate instantiated types with `original_name` to form a new + instantiated name. + + NOTE: To avoid conflicts, we should include the instantiation's + namespaces, but that is too verbose. + """ + instantiated_names = [] + for inst in instantiations: + # Ensure the first character of the type is capitalized + name = inst.instantiated_name() + # Using `capitalize` on the complete name causes other caps to be lower case + instantiated_names.append(name.replace(name[0], name[0].capitalize())) + + return "{}{}".format(original_name, "".join(instantiated_names)) + + +class InstantiationHelper: + """ + Helper class for instantiation templates. + Requires that `instantiation_type` defines a class method called + `construct` to generate the appropriate object type. + + Signature for `construct` should be + ``` + construct(method, + typenames, + class_instantiations, + method_instantiations, + instantiated_args, + parent=parent) + ``` + """ + def __init__(self, instantiation_type: InstantiatedMembers): + self.instantiation_type = instantiation_type + + def instantiate(self, instantiated_methods: List[InstantiatedMembers], + method: ClassMembers, typenames: Sequence[str], + class_instantiations: Sequence[parser.Typename], + method_instantiations: Sequence[parser.Typename], + parent: 'InstantiatedClass'): + """ + Instantiate both the class and method level templates. + """ + instantiations = class_instantiations + method_instantiations + + instantiated_args = instantiate_args_list(method.args.list(), + typenames, instantiations, + parent.cpp_typename()) + + instantiated_methods.append( + self.instantiation_type.construct(method, + typenames, + class_instantiations, + method_instantiations, + instantiated_args, + parent=parent)) + + return instantiated_methods + + def multilevel_instantiation(self, methods_list: Sequence[ClassMembers], + typenames: Sequence[str], + parent: 'InstantiatedClass'): + """ + Helper to instantiate methods at both the class and method level. + + Args: + methods_list: The list of methods in the class to instantiated. + typenames: List of class level template parameters, e.g. ['T']. + parent: The instantiated class to which `methods_list` belongs. + """ + instantiated_methods = [] + + for method in methods_list: + # We creare a copy since we will modify the typenames list. + method_typenames = deepcopy(typenames) + + if isinstance(method.template, parser.template.Template): + method_typenames.extend(method.template.typenames) + + # Get all combinations of template args + for instantiations in itertools.product( + *method.template.instantiations): + + instantiated_methods = self.instantiate( + instantiated_methods, + method, + typenames=method_typenames, + class_instantiations=parent.instantiations, + method_instantiations=list(instantiations), + parent=parent) + + else: + # If no constructor level templates, just use the class templates + instantiated_methods = self.instantiate( + instantiated_methods, + method, + typenames=method_typenames, + class_instantiations=parent.instantiations, + method_instantiations=[], + parent=parent) + + return instantiated_methods diff --git a/wrap/gtwrap/template_instantiator/method.py b/wrap/gtwrap/template_instantiator/method.py new file mode 100644 index 000000000..cd0a09c90 --- /dev/null +++ b/wrap/gtwrap/template_instantiator/method.py @@ -0,0 +1,124 @@ +"""Class method and static method instantiators.""" + +from typing import Iterable + +import gtwrap.interface_parser as parser +from gtwrap.template_instantiator.helpers import (instantiate_name, + instantiate_return_type) + + +class InstantiatedMethod(parser.Method): + """ + Instantiate method with template parameters. + + E.g. + class A { + template + void func(X x, Y y); + } + """ + def __init__(self, + original: parser.Method, + instantiations: Iterable[parser.Typename] = ()): + self.original = original + self.instantiations = instantiations + self.template = original.template + self.is_const = original.is_const + self.parent = original.parent + + self.name = instantiate_name(original.name, self.instantiations) + self.return_type = original.return_type + self.args = original.args + + super().__init__(self.template, + self.name, + self.return_type, + self.args, + self.is_const, + parent=self.parent) + + @classmethod + def construct(cls, original, typenames, class_instantiations, + method_instantiations, instantiated_args, parent): + """Class method to construct object as required by InstantiationHelper.""" + method = parser.Method( + template=original.template, + name=original.name, + return_type=instantiate_return_type( + original.return_type, typenames, + class_instantiations + method_instantiations, + parent.cpp_typename()), + args=parser.ArgumentList(instantiated_args), + is_const=original.is_const, + parent=parent, + ) + return InstantiatedMethod(method, instantiations=method_instantiations) + + def to_cpp(self): + """Generate the C++ code for wrapping.""" + if self.original.template: + # to_cpp will handle all the namespacing and templating + instantiation_list = [x.to_cpp() for x in self.instantiations] + # now can simply combine the instantiations, separated by commas + ret = "{}<{}>".format(self.original.name, + ",".join(instantiation_list)) + else: + ret = self.original.name + return ret + + def __repr__(self): + return "Instantiated {}".format(super().__repr__()) + + +class InstantiatedStaticMethod(parser.StaticMethod): + """ + Instantiate static method with template parameters. + """ + def __init__(self, + original: parser.StaticMethod, + instantiations: Iterable[parser.Typename] = ()): + self.original = original + self.instantiations = instantiations + + self.name = instantiate_name(original.name, self.instantiations) + self.return_type = original.return_type + self.args = original.args + self.template = original.template + self.parent = original.parent + + super().__init__(self.name, self.return_type, self.args, self.template, + self.parent) + + @classmethod + def construct(cls, original, typenames, class_instantiations, + method_instantiations, instantiated_args, parent): + """Class method to construct object as required by InstantiationHelper.""" + method = parser.StaticMethod( + name=original.name, + return_type=instantiate_return_type(original.return_type, + typenames, + class_instantiations + + method_instantiations, + parent.cpp_typename(), + instantiated_class=parent), + args=parser.ArgumentList(instantiated_args), + template=original.template, + parent=parent, + ) + return InstantiatedStaticMethod(method, + instantiations=method_instantiations) + + def to_cpp(self): + """Generate the C++ code for wrapping.""" + if self.original.template: + # to_cpp will handle all the namespacing and templating + instantiation_list = [x.to_cpp() for x in self.instantiations] + # now can simply combine the instantiations, separated by commas + ret = "{}<{}>".format(self.original.name, + ",".join(instantiation_list)) + else: + ret = self.original.name + return ret + + def __repr__(self): + return "Instantiated {}".format(super().__repr__()) diff --git a/wrap/gtwrap/template_instantiator/namespace.py b/wrap/gtwrap/template_instantiator/namespace.py new file mode 100644 index 000000000..32ba0b95d --- /dev/null +++ b/wrap/gtwrap/template_instantiator/namespace.py @@ -0,0 +1,88 @@ +"""Instantiate a namespace.""" + +import itertools + +import gtwrap.interface_parser as parser +from gtwrap.template_instantiator.classes import InstantiatedClass +from gtwrap.template_instantiator.declaration import InstantiatedDeclaration +from gtwrap.template_instantiator.function import InstantiatedGlobalFunction + + +def instantiate_namespace(namespace): + """ + Instantiate the classes and other elements in the `namespace` content and + assign it back to the namespace content attribute. + + @param[in/out] namespace The namespace whose content will be replaced with + the instantiated content. + """ + instantiated_content = [] + typedef_content = [] + + for element in namespace.content: + if isinstance(element, parser.Class): + original_class = element + if not original_class.template: + instantiated_content.append( + InstantiatedClass(original_class, [])) + else: + # This case is for when the templates have enumerated instantiations. + + # Use itertools to get all possible combinations of instantiations + # Works even if one template does not have an instantiation list + for instantiations in itertools.product( + *original_class.template.instantiations): + instantiated_content.append( + InstantiatedClass(original_class, + list(instantiations))) + + elif isinstance(element, parser.GlobalFunction): + original_func = element + if not original_func.template: + instantiated_content.append( + InstantiatedGlobalFunction(original_func, [])) + else: + # Use itertools to get all possible combinations of instantiations + # Works even if one template does not have an instantiation list + for instantiations in itertools.product( + *original_func.template.instantiations): + instantiated_content.append( + InstantiatedGlobalFunction(original_func, + list(instantiations))) + + elif isinstance(element, parser.TypedefTemplateInstantiation): + # This is for the case where `typedef` statements are used + # to specify the template parameters. + typedef_inst = element + top_level = namespace.top_level() + original_element = top_level.find_class_or_function( + typedef_inst.typename) + + # Check if element is a typedef'd class, function or + # forward declaration from another project. + if isinstance(original_element, parser.Class): + typedef_content.append( + InstantiatedClass(original_element, + typedef_inst.typename.instantiations, + typedef_inst.new_name)) + elif isinstance(original_element, parser.GlobalFunction): + typedef_content.append( + InstantiatedGlobalFunction( + original_element, typedef_inst.typename.instantiations, + typedef_inst.new_name)) + elif isinstance(original_element, parser.ForwardDeclaration): + typedef_content.append( + InstantiatedDeclaration( + original_element, typedef_inst.typename.instantiations, + typedef_inst.new_name)) + + elif isinstance(element, parser.Namespace): + element = instantiate_namespace(element) + instantiated_content.append(element) + else: + instantiated_content.append(element) + + instantiated_content.extend(typedef_content) + namespace.content = instantiated_content + + return namespace diff --git a/wrap/requirements.txt b/wrap/requirements.txt index a7181b271..0aac9302e 100644 --- a/wrap/requirements.txt +++ b/wrap/requirements.txt @@ -1,2 +1,2 @@ -pyparsing -pytest +pyparsing==2.4.7 +pytest>=6.2.4 diff --git a/wrap/tests/expected/matlab/FunDouble.m b/wrap/tests/expected/matlab/FunDouble.m index ca0efcacf..78609c7f6 100644 --- a/wrap/tests/expected/matlab/FunDouble.m +++ b/wrap/tests/expected/matlab/FunDouble.m @@ -7,6 +7,7 @@ % %-------Static Methods------- %staticMethodWithThis() : returns Fun +%templatedStaticMethodInt(int m) : returns double % %-------Serialization Interface------- %string_serialize() : returns string @@ -69,5 +70,16 @@ classdef FunDouble < handle error('Arguments do not match any overload of function FunDouble.staticMethodWithThis'); end + function varargout = TemplatedStaticMethodInt(varargin) + % TEMPLATEDSTATICMETHODINT usage: templatedStaticMethodInt(int m) : returns double + % Doxygen can be found at https://gtsam.org/doxygen/ + if length(varargin) == 1 && isa(varargin{1},'numeric') + varargout{1} = class_wrapper(10, varargin{:}); + return + end + + error('Arguments do not match any overload of function FunDouble.templatedStaticMethodInt'); + end + end end diff --git a/wrap/tests/expected/matlab/MultipleTemplatesIntDouble.m b/wrap/tests/expected/matlab/MultipleTemplatesIntDouble.m index 1a00572e0..863d30ee8 100644 --- a/wrap/tests/expected/matlab/MultipleTemplatesIntDouble.m +++ b/wrap/tests/expected/matlab/MultipleTemplatesIntDouble.m @@ -9,7 +9,7 @@ classdef MultipleTemplatesIntDouble < handle function obj = MultipleTemplatesIntDouble(varargin) if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682) my_ptr = varargin{2}; - class_wrapper(49, my_ptr); + class_wrapper(50, my_ptr); else error('Arguments do not match any overload of MultipleTemplatesIntDouble constructor'); end @@ -17,7 +17,7 @@ classdef MultipleTemplatesIntDouble < handle end function delete(obj) - class_wrapper(50, obj.ptr_MultipleTemplatesIntDouble); + class_wrapper(51, obj.ptr_MultipleTemplatesIntDouble); end function display(obj), obj.print(''); end diff --git a/wrap/tests/expected/matlab/MultipleTemplatesIntFloat.m b/wrap/tests/expected/matlab/MultipleTemplatesIntFloat.m index 6239b1bd1..b7f1fdac5 100644 --- a/wrap/tests/expected/matlab/MultipleTemplatesIntFloat.m +++ b/wrap/tests/expected/matlab/MultipleTemplatesIntFloat.m @@ -9,7 +9,7 @@ classdef MultipleTemplatesIntFloat < handle function obj = MultipleTemplatesIntFloat(varargin) if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682) my_ptr = varargin{2}; - class_wrapper(51, my_ptr); + class_wrapper(52, my_ptr); else error('Arguments do not match any overload of MultipleTemplatesIntFloat constructor'); end @@ -17,7 +17,7 @@ classdef MultipleTemplatesIntFloat < handle end function delete(obj) - class_wrapper(52, obj.ptr_MultipleTemplatesIntFloat); + class_wrapper(53, obj.ptr_MultipleTemplatesIntFloat); end function display(obj), obj.print(''); end diff --git a/wrap/tests/expected/matlab/MyFactorPosePoint2.m b/wrap/tests/expected/matlab/MyFactorPosePoint2.m index e4cd92196..7634ae2cb 100644 --- a/wrap/tests/expected/matlab/MyFactorPosePoint2.m +++ b/wrap/tests/expected/matlab/MyFactorPosePoint2.m @@ -15,9 +15,9 @@ classdef MyFactorPosePoint2 < handle function obj = MyFactorPosePoint2(varargin) if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682) my_ptr = varargin{2}; - class_wrapper(62, my_ptr); + class_wrapper(63, my_ptr); elseif nargin == 4 && isa(varargin{1},'numeric') && isa(varargin{2},'numeric') && isa(varargin{3},'double') && isa(varargin{4},'gtsam.noiseModel.Base') - my_ptr = class_wrapper(63, varargin{1}, varargin{2}, varargin{3}, varargin{4}); + my_ptr = class_wrapper(64, varargin{1}, varargin{2}, varargin{3}, varargin{4}); else error('Arguments do not match any overload of MyFactorPosePoint2 constructor'); end @@ -25,7 +25,7 @@ classdef MyFactorPosePoint2 < handle end function delete(obj) - class_wrapper(64, obj.ptr_MyFactorPosePoint2); + class_wrapper(65, obj.ptr_MyFactorPosePoint2); end function display(obj), obj.print(''); end @@ -36,7 +36,7 @@ classdef MyFactorPosePoint2 < handle % PRINT usage: print(string s, KeyFormatter keyFormatter) : returns void % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 2 && isa(varargin{1},'char') && isa(varargin{2},'gtsam.KeyFormatter') - class_wrapper(65, this, varargin{:}); + class_wrapper(66, this, varargin{:}); return end error('Arguments do not match any overload of function MyFactorPosePoint2.print'); diff --git a/wrap/tests/expected/matlab/MyVector12.m b/wrap/tests/expected/matlab/MyVector12.m index 00a8f1965..291d0d71b 100644 --- a/wrap/tests/expected/matlab/MyVector12.m +++ b/wrap/tests/expected/matlab/MyVector12.m @@ -12,9 +12,9 @@ classdef MyVector12 < handle function obj = MyVector12(varargin) if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682) my_ptr = varargin{2}; - class_wrapper(46, my_ptr); + class_wrapper(47, my_ptr); elseif nargin == 0 - my_ptr = class_wrapper(47); + my_ptr = class_wrapper(48); else error('Arguments do not match any overload of MyVector12 constructor'); end @@ -22,7 +22,7 @@ classdef MyVector12 < handle end function delete(obj) - class_wrapper(48, obj.ptr_MyVector12); + class_wrapper(49, obj.ptr_MyVector12); end function display(obj), obj.print(''); end diff --git a/wrap/tests/expected/matlab/MyVector3.m b/wrap/tests/expected/matlab/MyVector3.m index 5d4a80cd8..3051dc2e2 100644 --- a/wrap/tests/expected/matlab/MyVector3.m +++ b/wrap/tests/expected/matlab/MyVector3.m @@ -12,9 +12,9 @@ classdef MyVector3 < handle function obj = MyVector3(varargin) if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682) my_ptr = varargin{2}; - class_wrapper(43, my_ptr); + class_wrapper(44, my_ptr); elseif nargin == 0 - my_ptr = class_wrapper(44); + my_ptr = class_wrapper(45); else error('Arguments do not match any overload of MyVector3 constructor'); end @@ -22,7 +22,7 @@ classdef MyVector3 < handle end function delete(obj) - class_wrapper(45, obj.ptr_MyVector3); + class_wrapper(46, obj.ptr_MyVector3); end function display(obj), obj.print(''); end diff --git a/wrap/tests/expected/matlab/PrimitiveRefDouble.m b/wrap/tests/expected/matlab/PrimitiveRefDouble.m index 14f04a825..dd0a6d2da 100644 --- a/wrap/tests/expected/matlab/PrimitiveRefDouble.m +++ b/wrap/tests/expected/matlab/PrimitiveRefDouble.m @@ -19,9 +19,9 @@ classdef PrimitiveRefDouble < handle function obj = PrimitiveRefDouble(varargin) if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682) my_ptr = varargin{2}; - class_wrapper(39, my_ptr); + class_wrapper(40, my_ptr); elseif nargin == 0 - my_ptr = class_wrapper(40); + my_ptr = class_wrapper(41); else error('Arguments do not match any overload of PrimitiveRefDouble constructor'); end @@ -29,7 +29,7 @@ classdef PrimitiveRefDouble < handle end function delete(obj) - class_wrapper(41, obj.ptr_PrimitiveRefDouble); + class_wrapper(42, obj.ptr_PrimitiveRefDouble); end function display(obj), obj.print(''); end @@ -43,7 +43,7 @@ classdef PrimitiveRefDouble < handle % BRUTAL usage: Brutal(double t) : returns PrimitiveRefdouble % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'double') - varargout{1} = class_wrapper(42, varargin{:}); + varargout{1} = class_wrapper(43, varargin{:}); return end diff --git a/wrap/tests/expected/matlab/Test.m b/wrap/tests/expected/matlab/Test.m index c39882a93..8569120c5 100644 --- a/wrap/tests/expected/matlab/Test.m +++ b/wrap/tests/expected/matlab/Test.m @@ -40,11 +40,11 @@ classdef Test < handle function obj = Test(varargin) if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682) my_ptr = varargin{2}; - class_wrapper(10, my_ptr); + class_wrapper(11, my_ptr); elseif nargin == 0 - my_ptr = class_wrapper(11); + my_ptr = class_wrapper(12); elseif nargin == 2 && isa(varargin{1},'double') && isa(varargin{2},'double') - my_ptr = class_wrapper(12, varargin{1}, varargin{2}); + my_ptr = class_wrapper(13, varargin{1}, varargin{2}); else error('Arguments do not match any overload of Test constructor'); end @@ -52,7 +52,7 @@ classdef Test < handle end function delete(obj) - class_wrapper(13, obj.ptr_Test); + class_wrapper(14, obj.ptr_Test); end function display(obj), obj.print(''); end @@ -63,7 +63,7 @@ classdef Test < handle % ARG_EIGENCONSTREF usage: arg_EigenConstRef(Matrix value) : returns void % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'double') - class_wrapper(14, this, varargin{:}); + class_wrapper(15, this, varargin{:}); return end error('Arguments do not match any overload of function Test.arg_EigenConstRef'); @@ -73,7 +73,7 @@ classdef Test < handle % CREATE_MIXEDPTRS usage: create_MixedPtrs() : returns pair< Test, Test > % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 0 - [ varargout{1} varargout{2} ] = class_wrapper(15, this, varargin{:}); + [ varargout{1} varargout{2} ] = class_wrapper(16, this, varargin{:}); return end error('Arguments do not match any overload of function Test.create_MixedPtrs'); @@ -83,7 +83,7 @@ classdef Test < handle % CREATE_PTRS usage: create_ptrs() : returns pair< Test, Test > % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 0 - [ varargout{1} varargout{2} ] = class_wrapper(16, this, varargin{:}); + [ varargout{1} varargout{2} ] = class_wrapper(17, this, varargin{:}); return end error('Arguments do not match any overload of function Test.create_ptrs'); @@ -93,7 +93,7 @@ classdef Test < handle % GET_CONTAINER usage: get_container() : returns std.vectorTest % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 0 - varargout{1} = class_wrapper(17, this, varargin{:}); + varargout{1} = class_wrapper(18, this, varargin{:}); return end error('Arguments do not match any overload of function Test.get_container'); @@ -103,7 +103,7 @@ classdef Test < handle % LAMBDA usage: lambda() : returns void % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 0 - class_wrapper(18, this, varargin{:}); + class_wrapper(19, this, varargin{:}); return end error('Arguments do not match any overload of function Test.lambda'); @@ -113,7 +113,7 @@ classdef Test < handle % PRINT usage: print() : returns void % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 0 - class_wrapper(19, this, varargin{:}); + class_wrapper(20, this, varargin{:}); return end error('Arguments do not match any overload of function Test.print'); @@ -123,7 +123,7 @@ classdef Test < handle % RETURN_POINT2PTR usage: return_Point2Ptr(bool value) : returns Point2 % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'logical') - varargout{1} = class_wrapper(20, this, varargin{:}); + varargout{1} = class_wrapper(21, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_Point2Ptr'); @@ -133,7 +133,7 @@ classdef Test < handle % RETURN_TEST usage: return_Test(Test value) : returns Test % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'Test') - varargout{1} = class_wrapper(21, this, varargin{:}); + varargout{1} = class_wrapper(22, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_Test'); @@ -143,7 +143,7 @@ classdef Test < handle % RETURN_TESTPTR usage: return_TestPtr(Test value) : returns Test % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'Test') - varargout{1} = class_wrapper(22, this, varargin{:}); + varargout{1} = class_wrapper(23, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_TestPtr'); @@ -153,7 +153,7 @@ classdef Test < handle % RETURN_BOOL usage: return_bool(bool value) : returns bool % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'logical') - varargout{1} = class_wrapper(23, this, varargin{:}); + varargout{1} = class_wrapper(24, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_bool'); @@ -163,7 +163,7 @@ classdef Test < handle % RETURN_DOUBLE usage: return_double(double value) : returns double % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'double') - varargout{1} = class_wrapper(24, this, varargin{:}); + varargout{1} = class_wrapper(25, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_double'); @@ -173,7 +173,7 @@ classdef Test < handle % RETURN_FIELD usage: return_field(Test t) : returns bool % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'Test') - varargout{1} = class_wrapper(25, this, varargin{:}); + varargout{1} = class_wrapper(26, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_field'); @@ -183,7 +183,7 @@ classdef Test < handle % RETURN_INT usage: return_int(int value) : returns int % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'numeric') - varargout{1} = class_wrapper(26, this, varargin{:}); + varargout{1} = class_wrapper(27, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_int'); @@ -193,7 +193,7 @@ classdef Test < handle % RETURN_MATRIX1 usage: return_matrix1(Matrix value) : returns Matrix % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'double') - varargout{1} = class_wrapper(27, this, varargin{:}); + varargout{1} = class_wrapper(28, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_matrix1'); @@ -203,7 +203,7 @@ classdef Test < handle % RETURN_MATRIX2 usage: return_matrix2(Matrix value) : returns Matrix % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'double') - varargout{1} = class_wrapper(28, this, varargin{:}); + varargout{1} = class_wrapper(29, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_matrix2'); @@ -213,13 +213,13 @@ classdef Test < handle % RETURN_PAIR usage: return_pair(Vector v, Matrix A) : returns pair< Vector, Matrix > % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 2 && isa(varargin{1},'double') && size(varargin{1},2)==1 && isa(varargin{2},'double') - [ varargout{1} varargout{2} ] = class_wrapper(29, this, varargin{:}); + [ varargout{1} varargout{2} ] = class_wrapper(30, this, varargin{:}); return end % RETURN_PAIR usage: return_pair(Vector v) : returns pair< Vector, Matrix > % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'double') && size(varargin{1},2)==1 - [ varargout{1} varargout{2} ] = class_wrapper(30, this, varargin{:}); + [ varargout{1} varargout{2} ] = class_wrapper(31, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_pair'); @@ -229,7 +229,7 @@ classdef Test < handle % RETURN_PTRS usage: return_ptrs(Test p1, Test p2) : returns pair< Test, Test > % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 2 && isa(varargin{1},'Test') && isa(varargin{2},'Test') - [ varargout{1} varargout{2} ] = class_wrapper(31, this, varargin{:}); + [ varargout{1} varargout{2} ] = class_wrapper(32, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_ptrs'); @@ -239,7 +239,7 @@ classdef Test < handle % RETURN_SIZE_T usage: return_size_t(size_t value) : returns size_t % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'numeric') - varargout{1} = class_wrapper(32, this, varargin{:}); + varargout{1} = class_wrapper(33, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_size_t'); @@ -249,7 +249,7 @@ classdef Test < handle % RETURN_STRING usage: return_string(string value) : returns string % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'char') - varargout{1} = class_wrapper(33, this, varargin{:}); + varargout{1} = class_wrapper(34, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_string'); @@ -259,7 +259,7 @@ classdef Test < handle % RETURN_VECTOR1 usage: return_vector1(Vector value) : returns Vector % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'double') && size(varargin{1},2)==1 - varargout{1} = class_wrapper(34, this, varargin{:}); + varargout{1} = class_wrapper(35, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_vector1'); @@ -269,19 +269,13 @@ classdef Test < handle % RETURN_VECTOR2 usage: return_vector2(Vector value) : returns Vector % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'double') && size(varargin{1},2)==1 - varargout{1} = class_wrapper(35, this, varargin{:}); + varargout{1} = class_wrapper(36, this, varargin{:}); return end error('Arguments do not match any overload of function Test.return_vector2'); end function varargout = set_container(this, varargin) - % SET_CONTAINER usage: set_container(vector container) : returns void - % Doxygen can be found at https://gtsam.org/doxygen/ - if length(varargin) == 1 && isa(varargin{1},'std.vectorTest') - class_wrapper(36, this, varargin{:}); - return - end % SET_CONTAINER usage: set_container(vector container) : returns void % Doxygen can be found at https://gtsam.org/doxygen/ if length(varargin) == 1 && isa(varargin{1},'std.vectorTest') @@ -294,6 +288,12 @@ classdef Test < handle class_wrapper(38, this, varargin{:}); return end + % SET_CONTAINER usage: set_container(vector container) : returns void + % Doxygen can be found at https://gtsam.org/doxygen/ + if length(varargin) == 1 && isa(varargin{1},'std.vectorTest') + class_wrapper(39, this, varargin{:}); + return + end error('Arguments do not match any overload of function Test.set_container'); end diff --git a/wrap/tests/expected/matlab/class_wrapper.cpp b/wrap/tests/expected/matlab/class_wrapper.cpp index 48d6b2dce..df6cb3307 100644 --- a/wrap/tests/expected/matlab/class_wrapper.cpp +++ b/wrap/tests/expected/matlab/class_wrapper.cpp @@ -246,7 +246,14 @@ void FunDouble_staticMethodWithThis_9(int nargout, mxArray *out[], int nargin, c out[0] = wrap_shared_ptr(boost::make_shared>(Fun::staticMethodWithThis()),"Fundouble", false); } -void Test_collectorInsertAndMakeBase_10(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void FunDouble_templatedStaticMethodInt_10(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("FunDouble.templatedStaticMethodInt",nargout,nargin,1); + int m = unwrap< int >(in[0]); + out[0] = wrap< double >(Fun::templatedStaticMethodInt(m)); +} + +void Test_collectorInsertAndMakeBase_11(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -255,7 +262,7 @@ void Test_collectorInsertAndMakeBase_10(int nargout, mxArray *out[], int nargin, collector_Test.insert(self); } -void Test_constructor_11(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_constructor_12(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -266,7 +273,7 @@ void Test_constructor_11(int nargout, mxArray *out[], int nargin, const mxArray *reinterpret_cast (mxGetData(out[0])) = self; } -void Test_constructor_12(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_constructor_13(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -279,7 +286,7 @@ void Test_constructor_12(int nargout, mxArray *out[], int nargin, const mxArray *reinterpret_cast (mxGetData(out[0])) = self; } -void Test_deconstructor_13(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_deconstructor_14(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef boost::shared_ptr Shared; checkArguments("delete_Test",nargout,nargin,1); @@ -292,7 +299,7 @@ void Test_deconstructor_13(int nargout, mxArray *out[], int nargin, const mxArra } } -void Test_arg_EigenConstRef_14(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_arg_EigenConstRef_15(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("arg_EigenConstRef",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -300,7 +307,7 @@ void Test_arg_EigenConstRef_14(int nargout, mxArray *out[], int nargin, const mx obj->arg_EigenConstRef(value); } -void Test_create_MixedPtrs_15(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_create_MixedPtrs_16(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("create_MixedPtrs",nargout,nargin-1,0); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -309,7 +316,7 @@ void Test_create_MixedPtrs_15(int nargout, mxArray *out[], int nargin, const mxA out[1] = wrap_shared_ptr(pairResult.second,"Test", false); } -void Test_create_ptrs_16(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_create_ptrs_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("create_ptrs",nargout,nargin-1,0); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -318,28 +325,28 @@ void Test_create_ptrs_16(int nargout, mxArray *out[], int nargin, const mxArray out[1] = wrap_shared_ptr(pairResult.second,"Test", false); } -void Test_get_container_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_get_container_18(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("get_container",nargout,nargin-1,0); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); out[0] = wrap_shared_ptr(boost::make_shared>(obj->get_container()),"std.vectorTest", false); } -void Test_lambda_18(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_lambda_19(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("lambda",nargout,nargin-1,0); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); obj->lambda(); } -void Test_print_19(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_print_20(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("print",nargout,nargin-1,0); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); obj->print(); } -void Test_return_Point2Ptr_20(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_Point2Ptr_21(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_Point2Ptr",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -350,7 +357,7 @@ void Test_return_Point2Ptr_20(int nargout, mxArray *out[], int nargin, const mxA } } -void Test_return_Test_21(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_Test_22(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_Test",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -358,7 +365,7 @@ void Test_return_Test_21(int nargout, mxArray *out[], int nargin, const mxArray out[0] = wrap_shared_ptr(boost::make_shared(obj->return_Test(value)),"Test", false); } -void Test_return_TestPtr_22(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_TestPtr_23(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_TestPtr",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -366,7 +373,7 @@ void Test_return_TestPtr_22(int nargout, mxArray *out[], int nargin, const mxArr out[0] = wrap_shared_ptr(obj->return_TestPtr(value),"Test", false); } -void Test_return_bool_23(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_bool_24(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_bool",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -374,7 +381,7 @@ void Test_return_bool_23(int nargout, mxArray *out[], int nargin, const mxArray out[0] = wrap< bool >(obj->return_bool(value)); } -void Test_return_double_24(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_double_25(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_double",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -382,7 +389,7 @@ void Test_return_double_24(int nargout, mxArray *out[], int nargin, const mxArra out[0] = wrap< double >(obj->return_double(value)); } -void Test_return_field_25(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_field_26(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_field",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -390,7 +397,7 @@ void Test_return_field_25(int nargout, mxArray *out[], int nargin, const mxArray out[0] = wrap< bool >(obj->return_field(t)); } -void Test_return_int_26(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_int_27(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_int",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -398,7 +405,7 @@ void Test_return_int_26(int nargout, mxArray *out[], int nargin, const mxArray * out[0] = wrap< int >(obj->return_int(value)); } -void Test_return_matrix1_27(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_matrix1_28(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_matrix1",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -406,7 +413,7 @@ void Test_return_matrix1_27(int nargout, mxArray *out[], int nargin, const mxArr out[0] = wrap< Matrix >(obj->return_matrix1(value)); } -void Test_return_matrix2_28(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_matrix2_29(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_matrix2",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -414,7 +421,7 @@ void Test_return_matrix2_28(int nargout, mxArray *out[], int nargin, const mxArr out[0] = wrap< Matrix >(obj->return_matrix2(value)); } -void Test_return_pair_29(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_pair_30(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_pair",nargout,nargin-1,2); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -425,7 +432,7 @@ void Test_return_pair_29(int nargout, mxArray *out[], int nargin, const mxArray out[1] = wrap< Matrix >(pairResult.second); } -void Test_return_pair_30(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_pair_31(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_pair",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -435,7 +442,7 @@ void Test_return_pair_30(int nargout, mxArray *out[], int nargin, const mxArray out[1] = wrap< Matrix >(pairResult.second); } -void Test_return_ptrs_31(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_ptrs_32(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_ptrs",nargout,nargin-1,2); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -446,7 +453,7 @@ void Test_return_ptrs_31(int nargout, mxArray *out[], int nargin, const mxArray out[1] = wrap_shared_ptr(pairResult.second,"Test", false); } -void Test_return_size_t_32(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_size_t_33(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_size_t",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -454,7 +461,7 @@ void Test_return_size_t_32(int nargout, mxArray *out[], int nargin, const mxArra out[0] = wrap< size_t >(obj->return_size_t(value)); } -void Test_return_string_33(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_string_34(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_string",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -462,7 +469,7 @@ void Test_return_string_33(int nargout, mxArray *out[], int nargin, const mxArra out[0] = wrap< string >(obj->return_string(value)); } -void Test_return_vector1_34(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_vector1_35(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_vector1",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -470,7 +477,7 @@ void Test_return_vector1_34(int nargout, mxArray *out[], int nargin, const mxArr out[0] = wrap< Vector >(obj->return_vector1(value)); } -void Test_return_vector2_35(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_return_vector2_36(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("return_vector2",nargout,nargin-1,1); auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); @@ -478,14 +485,6 @@ void Test_return_vector2_35(int nargout, mxArray *out[], int nargin, const mxArr out[0] = wrap< Vector >(obj->return_vector2(value)); } -void Test_set_container_36(int nargout, mxArray *out[], int nargin, const mxArray *in[]) -{ - checkArguments("set_container",nargout,nargin-1,1); - auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); - boost::shared_ptr> container = unwrap_shared_ptr< std::vector >(in[1], "ptr_stdvectorTest"); - obj->set_container(*container); -} - void Test_set_container_37(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("set_container",nargout,nargin-1,1); @@ -502,7 +501,15 @@ void Test_set_container_38(int nargout, mxArray *out[], int nargin, const mxArra obj->set_container(*container); } -void PrimitiveRefDouble_collectorInsertAndMakeBase_39(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void Test_set_container_39(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +{ + checkArguments("set_container",nargout,nargin-1,1); + auto obj = unwrap_shared_ptr(in[0], "ptr_Test"); + boost::shared_ptr> container = unwrap_shared_ptr< std::vector >(in[1], "ptr_stdvectorTest"); + obj->set_container(*container); +} + +void PrimitiveRefDouble_collectorInsertAndMakeBase_40(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -511,7 +518,7 @@ void PrimitiveRefDouble_collectorInsertAndMakeBase_39(int nargout, mxArray *out[ collector_PrimitiveRefDouble.insert(self); } -void PrimitiveRefDouble_constructor_40(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void PrimitiveRefDouble_constructor_41(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -522,7 +529,7 @@ void PrimitiveRefDouble_constructor_40(int nargout, mxArray *out[], int nargin, *reinterpret_cast (mxGetData(out[0])) = self; } -void PrimitiveRefDouble_deconstructor_41(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void PrimitiveRefDouble_deconstructor_42(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef boost::shared_ptr> Shared; checkArguments("delete_PrimitiveRefDouble",nargout,nargin,1); @@ -535,14 +542,14 @@ void PrimitiveRefDouble_deconstructor_41(int nargout, mxArray *out[], int nargin } } -void PrimitiveRefDouble_Brutal_42(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void PrimitiveRefDouble_Brutal_43(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("PrimitiveRefDouble.Brutal",nargout,nargin,1); double t = unwrap< double >(in[0]); out[0] = wrap_shared_ptr(boost::make_shared>(PrimitiveRef::Brutal(t)),"PrimitiveRefdouble", false); } -void MyVector3_collectorInsertAndMakeBase_43(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyVector3_collectorInsertAndMakeBase_44(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -551,7 +558,7 @@ void MyVector3_collectorInsertAndMakeBase_43(int nargout, mxArray *out[], int na collector_MyVector3.insert(self); } -void MyVector3_constructor_44(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyVector3_constructor_45(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -562,7 +569,7 @@ void MyVector3_constructor_44(int nargout, mxArray *out[], int nargin, const mxA *reinterpret_cast (mxGetData(out[0])) = self; } -void MyVector3_deconstructor_45(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyVector3_deconstructor_46(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef boost::shared_ptr> Shared; checkArguments("delete_MyVector3",nargout,nargin,1); @@ -575,7 +582,7 @@ void MyVector3_deconstructor_45(int nargout, mxArray *out[], int nargin, const m } } -void MyVector12_collectorInsertAndMakeBase_46(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyVector12_collectorInsertAndMakeBase_47(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -584,7 +591,7 @@ void MyVector12_collectorInsertAndMakeBase_46(int nargout, mxArray *out[], int n collector_MyVector12.insert(self); } -void MyVector12_constructor_47(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyVector12_constructor_48(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -595,7 +602,7 @@ void MyVector12_constructor_47(int nargout, mxArray *out[], int nargin, const mx *reinterpret_cast (mxGetData(out[0])) = self; } -void MyVector12_deconstructor_48(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyVector12_deconstructor_49(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef boost::shared_ptr> Shared; checkArguments("delete_MyVector12",nargout,nargin,1); @@ -608,7 +615,7 @@ void MyVector12_deconstructor_48(int nargout, mxArray *out[], int nargin, const } } -void MultipleTemplatesIntDouble_collectorInsertAndMakeBase_49(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MultipleTemplatesIntDouble_collectorInsertAndMakeBase_50(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -617,7 +624,7 @@ void MultipleTemplatesIntDouble_collectorInsertAndMakeBase_49(int nargout, mxArr collector_MultipleTemplatesIntDouble.insert(self); } -void MultipleTemplatesIntDouble_deconstructor_50(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MultipleTemplatesIntDouble_deconstructor_51(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef boost::shared_ptr> Shared; checkArguments("delete_MultipleTemplatesIntDouble",nargout,nargin,1); @@ -630,7 +637,7 @@ void MultipleTemplatesIntDouble_deconstructor_50(int nargout, mxArray *out[], in } } -void MultipleTemplatesIntFloat_collectorInsertAndMakeBase_51(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MultipleTemplatesIntFloat_collectorInsertAndMakeBase_52(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -639,7 +646,7 @@ void MultipleTemplatesIntFloat_collectorInsertAndMakeBase_51(int nargout, mxArra collector_MultipleTemplatesIntFloat.insert(self); } -void MultipleTemplatesIntFloat_deconstructor_52(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MultipleTemplatesIntFloat_deconstructor_53(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef boost::shared_ptr> Shared; checkArguments("delete_MultipleTemplatesIntFloat",nargout,nargin,1); @@ -652,7 +659,7 @@ void MultipleTemplatesIntFloat_deconstructor_52(int nargout, mxArray *out[], int } } -void ForwardKinematics_collectorInsertAndMakeBase_53(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void ForwardKinematics_collectorInsertAndMakeBase_54(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -661,7 +668,7 @@ void ForwardKinematics_collectorInsertAndMakeBase_53(int nargout, mxArray *out[] collector_ForwardKinematics.insert(self); } -void ForwardKinematics_constructor_54(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void ForwardKinematics_constructor_55(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -677,7 +684,7 @@ void ForwardKinematics_constructor_54(int nargout, mxArray *out[], int nargin, c *reinterpret_cast (mxGetData(out[0])) = self; } -void ForwardKinematics_deconstructor_55(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void ForwardKinematics_deconstructor_56(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef boost::shared_ptr Shared; checkArguments("delete_ForwardKinematics",nargout,nargin,1); @@ -690,7 +697,7 @@ void ForwardKinematics_deconstructor_55(int nargout, mxArray *out[], int nargin, } } -void TemplatedConstructor_collectorInsertAndMakeBase_56(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void TemplatedConstructor_collectorInsertAndMakeBase_57(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -699,7 +706,7 @@ void TemplatedConstructor_collectorInsertAndMakeBase_56(int nargout, mxArray *ou collector_TemplatedConstructor.insert(self); } -void TemplatedConstructor_constructor_57(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void TemplatedConstructor_constructor_58(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -710,7 +717,7 @@ void TemplatedConstructor_constructor_57(int nargout, mxArray *out[], int nargin *reinterpret_cast (mxGetData(out[0])) = self; } -void TemplatedConstructor_constructor_58(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void TemplatedConstructor_constructor_59(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -722,7 +729,7 @@ void TemplatedConstructor_constructor_58(int nargout, mxArray *out[], int nargin *reinterpret_cast (mxGetData(out[0])) = self; } -void TemplatedConstructor_constructor_59(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void TemplatedConstructor_constructor_60(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -734,7 +741,7 @@ void TemplatedConstructor_constructor_59(int nargout, mxArray *out[], int nargin *reinterpret_cast (mxGetData(out[0])) = self; } -void TemplatedConstructor_constructor_60(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void TemplatedConstructor_constructor_61(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr Shared; @@ -746,7 +753,7 @@ void TemplatedConstructor_constructor_60(int nargout, mxArray *out[], int nargin *reinterpret_cast (mxGetData(out[0])) = self; } -void TemplatedConstructor_deconstructor_61(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void TemplatedConstructor_deconstructor_62(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef boost::shared_ptr Shared; checkArguments("delete_TemplatedConstructor",nargout,nargin,1); @@ -759,7 +766,7 @@ void TemplatedConstructor_deconstructor_61(int nargout, mxArray *out[], int narg } } -void MyFactorPosePoint2_collectorInsertAndMakeBase_62(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyFactorPosePoint2_collectorInsertAndMakeBase_63(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -768,7 +775,7 @@ void MyFactorPosePoint2_collectorInsertAndMakeBase_62(int nargout, mxArray *out[ collector_MyFactorPosePoint2.insert(self); } -void MyFactorPosePoint2_constructor_63(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyFactorPosePoint2_constructor_64(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { mexAtExit(&_deleteAllObjects); typedef boost::shared_ptr> Shared; @@ -783,7 +790,7 @@ void MyFactorPosePoint2_constructor_63(int nargout, mxArray *out[], int nargin, *reinterpret_cast (mxGetData(out[0])) = self; } -void MyFactorPosePoint2_deconstructor_64(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyFactorPosePoint2_deconstructor_65(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { typedef boost::shared_ptr> Shared; checkArguments("delete_MyFactorPosePoint2",nargout,nargin,1); @@ -796,7 +803,7 @@ void MyFactorPosePoint2_deconstructor_64(int nargout, mxArray *out[], int nargin } } -void MyFactorPosePoint2_print_65(int nargout, mxArray *out[], int nargin, const mxArray *in[]) +void MyFactorPosePoint2_print_66(int nargout, mxArray *out[], int nargin, const mxArray *in[]) { checkArguments("print",nargout,nargin-1,2); auto obj = unwrap_shared_ptr>(in[0], "ptr_MyFactorPosePoint2"); @@ -848,85 +855,85 @@ void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[]) FunDouble_staticMethodWithThis_9(nargout, out, nargin-1, in+1); break; case 10: - Test_collectorInsertAndMakeBase_10(nargout, out, nargin-1, in+1); + FunDouble_templatedStaticMethodInt_10(nargout, out, nargin-1, in+1); break; case 11: - Test_constructor_11(nargout, out, nargin-1, in+1); + Test_collectorInsertAndMakeBase_11(nargout, out, nargin-1, in+1); break; case 12: Test_constructor_12(nargout, out, nargin-1, in+1); break; case 13: - Test_deconstructor_13(nargout, out, nargin-1, in+1); + Test_constructor_13(nargout, out, nargin-1, in+1); break; case 14: - Test_arg_EigenConstRef_14(nargout, out, nargin-1, in+1); + Test_deconstructor_14(nargout, out, nargin-1, in+1); break; case 15: - Test_create_MixedPtrs_15(nargout, out, nargin-1, in+1); + Test_arg_EigenConstRef_15(nargout, out, nargin-1, in+1); break; case 16: - Test_create_ptrs_16(nargout, out, nargin-1, in+1); + Test_create_MixedPtrs_16(nargout, out, nargin-1, in+1); break; case 17: - Test_get_container_17(nargout, out, nargin-1, in+1); + Test_create_ptrs_17(nargout, out, nargin-1, in+1); break; case 18: - Test_lambda_18(nargout, out, nargin-1, in+1); + Test_get_container_18(nargout, out, nargin-1, in+1); break; case 19: - Test_print_19(nargout, out, nargin-1, in+1); + Test_lambda_19(nargout, out, nargin-1, in+1); break; case 20: - Test_return_Point2Ptr_20(nargout, out, nargin-1, in+1); + Test_print_20(nargout, out, nargin-1, in+1); break; case 21: - Test_return_Test_21(nargout, out, nargin-1, in+1); + Test_return_Point2Ptr_21(nargout, out, nargin-1, in+1); break; case 22: - Test_return_TestPtr_22(nargout, out, nargin-1, in+1); + Test_return_Test_22(nargout, out, nargin-1, in+1); break; case 23: - Test_return_bool_23(nargout, out, nargin-1, in+1); + Test_return_TestPtr_23(nargout, out, nargin-1, in+1); break; case 24: - Test_return_double_24(nargout, out, nargin-1, in+1); + Test_return_bool_24(nargout, out, nargin-1, in+1); break; case 25: - Test_return_field_25(nargout, out, nargin-1, in+1); + Test_return_double_25(nargout, out, nargin-1, in+1); break; case 26: - Test_return_int_26(nargout, out, nargin-1, in+1); + Test_return_field_26(nargout, out, nargin-1, in+1); break; case 27: - Test_return_matrix1_27(nargout, out, nargin-1, in+1); + Test_return_int_27(nargout, out, nargin-1, in+1); break; case 28: - Test_return_matrix2_28(nargout, out, nargin-1, in+1); + Test_return_matrix1_28(nargout, out, nargin-1, in+1); break; case 29: - Test_return_pair_29(nargout, out, nargin-1, in+1); + Test_return_matrix2_29(nargout, out, nargin-1, in+1); break; case 30: Test_return_pair_30(nargout, out, nargin-1, in+1); break; case 31: - Test_return_ptrs_31(nargout, out, nargin-1, in+1); + Test_return_pair_31(nargout, out, nargin-1, in+1); break; case 32: - Test_return_size_t_32(nargout, out, nargin-1, in+1); + Test_return_ptrs_32(nargout, out, nargin-1, in+1); break; case 33: - Test_return_string_33(nargout, out, nargin-1, in+1); + Test_return_size_t_33(nargout, out, nargin-1, in+1); break; case 34: - Test_return_vector1_34(nargout, out, nargin-1, in+1); + Test_return_string_34(nargout, out, nargin-1, in+1); break; case 35: - Test_return_vector2_35(nargout, out, nargin-1, in+1); + Test_return_vector1_35(nargout, out, nargin-1, in+1); break; case 36: - Test_set_container_36(nargout, out, nargin-1, in+1); + Test_return_vector2_36(nargout, out, nargin-1, in+1); break; case 37: Test_set_container_37(nargout, out, nargin-1, in+1); @@ -935,61 +942,61 @@ void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[]) Test_set_container_38(nargout, out, nargin-1, in+1); break; case 39: - PrimitiveRefDouble_collectorInsertAndMakeBase_39(nargout, out, nargin-1, in+1); + Test_set_container_39(nargout, out, nargin-1, in+1); break; case 40: - PrimitiveRefDouble_constructor_40(nargout, out, nargin-1, in+1); + PrimitiveRefDouble_collectorInsertAndMakeBase_40(nargout, out, nargin-1, in+1); break; case 41: - PrimitiveRefDouble_deconstructor_41(nargout, out, nargin-1, in+1); + PrimitiveRefDouble_constructor_41(nargout, out, nargin-1, in+1); break; case 42: - PrimitiveRefDouble_Brutal_42(nargout, out, nargin-1, in+1); + PrimitiveRefDouble_deconstructor_42(nargout, out, nargin-1, in+1); break; case 43: - MyVector3_collectorInsertAndMakeBase_43(nargout, out, nargin-1, in+1); + PrimitiveRefDouble_Brutal_43(nargout, out, nargin-1, in+1); break; case 44: - MyVector3_constructor_44(nargout, out, nargin-1, in+1); + MyVector3_collectorInsertAndMakeBase_44(nargout, out, nargin-1, in+1); break; case 45: - MyVector3_deconstructor_45(nargout, out, nargin-1, in+1); + MyVector3_constructor_45(nargout, out, nargin-1, in+1); break; case 46: - MyVector12_collectorInsertAndMakeBase_46(nargout, out, nargin-1, in+1); + MyVector3_deconstructor_46(nargout, out, nargin-1, in+1); break; case 47: - MyVector12_constructor_47(nargout, out, nargin-1, in+1); + MyVector12_collectorInsertAndMakeBase_47(nargout, out, nargin-1, in+1); break; case 48: - MyVector12_deconstructor_48(nargout, out, nargin-1, in+1); + MyVector12_constructor_48(nargout, out, nargin-1, in+1); break; case 49: - MultipleTemplatesIntDouble_collectorInsertAndMakeBase_49(nargout, out, nargin-1, in+1); + MyVector12_deconstructor_49(nargout, out, nargin-1, in+1); break; case 50: - MultipleTemplatesIntDouble_deconstructor_50(nargout, out, nargin-1, in+1); + MultipleTemplatesIntDouble_collectorInsertAndMakeBase_50(nargout, out, nargin-1, in+1); break; case 51: - MultipleTemplatesIntFloat_collectorInsertAndMakeBase_51(nargout, out, nargin-1, in+1); + MultipleTemplatesIntDouble_deconstructor_51(nargout, out, nargin-1, in+1); break; case 52: - MultipleTemplatesIntFloat_deconstructor_52(nargout, out, nargin-1, in+1); + MultipleTemplatesIntFloat_collectorInsertAndMakeBase_52(nargout, out, nargin-1, in+1); break; case 53: - ForwardKinematics_collectorInsertAndMakeBase_53(nargout, out, nargin-1, in+1); + MultipleTemplatesIntFloat_deconstructor_53(nargout, out, nargin-1, in+1); break; case 54: - ForwardKinematics_constructor_54(nargout, out, nargin-1, in+1); + ForwardKinematics_collectorInsertAndMakeBase_54(nargout, out, nargin-1, in+1); break; case 55: - ForwardKinematics_deconstructor_55(nargout, out, nargin-1, in+1); + ForwardKinematics_constructor_55(nargout, out, nargin-1, in+1); break; case 56: - TemplatedConstructor_collectorInsertAndMakeBase_56(nargout, out, nargin-1, in+1); + ForwardKinematics_deconstructor_56(nargout, out, nargin-1, in+1); break; case 57: - TemplatedConstructor_constructor_57(nargout, out, nargin-1, in+1); + TemplatedConstructor_collectorInsertAndMakeBase_57(nargout, out, nargin-1, in+1); break; case 58: TemplatedConstructor_constructor_58(nargout, out, nargin-1, in+1); @@ -1001,19 +1008,22 @@ void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[]) TemplatedConstructor_constructor_60(nargout, out, nargin-1, in+1); break; case 61: - TemplatedConstructor_deconstructor_61(nargout, out, nargin-1, in+1); + TemplatedConstructor_constructor_61(nargout, out, nargin-1, in+1); break; case 62: - MyFactorPosePoint2_collectorInsertAndMakeBase_62(nargout, out, nargin-1, in+1); + TemplatedConstructor_deconstructor_62(nargout, out, nargin-1, in+1); break; case 63: - MyFactorPosePoint2_constructor_63(nargout, out, nargin-1, in+1); + MyFactorPosePoint2_collectorInsertAndMakeBase_63(nargout, out, nargin-1, in+1); break; case 64: - MyFactorPosePoint2_deconstructor_64(nargout, out, nargin-1, in+1); + MyFactorPosePoint2_constructor_64(nargout, out, nargin-1, in+1); break; case 65: - MyFactorPosePoint2_print_65(nargout, out, nargin-1, in+1); + MyFactorPosePoint2_deconstructor_65(nargout, out, nargin-1, in+1); + break; + case 66: + MyFactorPosePoint2_print_66(nargout, out, nargin-1, in+1); break; } } catch(const std::exception& e) { diff --git a/wrap/tests/expected/python/class_pybind.cpp b/wrap/tests/expected/python/class_pybind.cpp index f36c0a84c..a54d9135b 100644 --- a/wrap/tests/expected/python/class_pybind.cpp +++ b/wrap/tests/expected/python/class_pybind.cpp @@ -31,7 +31,8 @@ PYBIND11_MODULE(class_py, m_) { py::class_, std::shared_ptr>>(m_, "FunDouble") .def("templatedMethodString",[](Fun* self, double d, string t){return self->templatedMethod(d, t);}, py::arg("d"), py::arg("t")) .def("multiTemplatedMethodStringSize_t",[](Fun* self, double d, string t, size_t u){return self->multiTemplatedMethod(d, t, u);}, py::arg("d"), py::arg("t"), py::arg("u")) - .def_static("staticMethodWithThis",[](){return Fun::staticMethodWithThis();}); + .def_static("staticMethodWithThis",[](){return Fun::staticMethodWithThis();}) + .def_static("templatedStaticMethodInt",[](const int& m){return Fun::templatedStaticMethod(m);}, py::arg("m")); py::class_>(m_, "Test") .def(py::init<>()) diff --git a/wrap/tests/fixtures/class.i b/wrap/tests/fixtures/class.i index 9923ffce7..40a565506 100644 --- a/wrap/tests/fixtures/class.i +++ b/wrap/tests/fixtures/class.i @@ -10,6 +10,9 @@ class Fun { static This staticMethodWithThis(); + template + static double templatedStaticMethod(const T& m); + template This templatedMethod(double d, T t); diff --git a/wrap/tests/test_interface_parser.py b/wrap/tests/test_interface_parser.py index 25e4178a6..49165328c 100644 --- a/wrap/tests/test_interface_parser.py +++ b/wrap/tests/test_interface_parser.py @@ -18,11 +18,13 @@ import unittest sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from gtwrap.interface_parser import ( - ArgumentList, Class, Constructor, Enum, Enumerator, ForwardDeclaration, - GlobalFunction, Include, Method, Module, Namespace, Operator, ReturnType, - StaticMethod, TemplatedType, Type, TypedefTemplateInstantiation, Typename, - Variable) +from gtwrap.interface_parser import (ArgumentList, Class, Constructor, Enum, + Enumerator, ForwardDeclaration, + GlobalFunction, Include, Method, Module, + Namespace, Operator, ReturnType, + StaticMethod, TemplatedType, Type, + TypedefTemplateInstantiation, Typename, + Variable) class TestInterfaceParser(unittest.TestCase): @@ -266,6 +268,11 @@ class TestInterfaceParser(unittest.TestCase): self.assertEqual("char", return_type.type1.typename.name) self.assertEqual("int", return_type.type2.typename.name) + return_type = ReturnType.rule.parseString("pair")[0] + self.assertEqual("Test", return_type.type1.typename.name) + self.assertEqual("Test", return_type.type2.typename.name) + self.assertTrue(return_type.type2.is_shared_ptr) + def test_method(self): """Test for a class method.""" ret = Method.rule.parseString("int f();")[0] @@ -283,6 +290,13 @@ class TestInterfaceParser(unittest.TestCase): self.assertEqual("f", ret.name) self.assertEqual(3, len(ret.args)) + ret = Method.rule.parseString( + "pair create_MixedPtrs();")[0] + self.assertEqual("create_MixedPtrs", ret.name) + self.assertEqual(0, len(ret.args)) + self.assertEqual("First", ret.return_type.type1.typename.name) + self.assertEqual("Second", ret.return_type.type2.typename.name) + def test_static_method(self): """Test for static methods.""" ret = StaticMethod.rule.parseString("static int f();")[0] diff --git a/wrap/tests/test_matlab_wrapper.py b/wrap/tests/test_matlab_wrapper.py index 89e53ab21..34940d62e 100644 --- a/wrap/tests/test_matlab_wrapper.py +++ b/wrap/tests/test_matlab_wrapper.py @@ -42,17 +42,16 @@ class TestWrap(unittest.TestCase): # Create the `actual/matlab` directory os.makedirs(self.MATLAB_ACTUAL_DIR, exist_ok=True) - def compare_and_diff(self, file): + def compare_and_diff(self, file, actual): """ Compute the comparison between the expected and actual file, and assert if diff is zero. """ - output = osp.join(self.MATLAB_ACTUAL_DIR, file) expected = osp.join(self.MATLAB_TEST_DIR, file) - success = filecmp.cmp(output, expected) + success = filecmp.cmp(actual, expected) + if not success: - print("Differ in file: {}".format(file)) - os.system("diff {} {}".format(output, expected)) + os.system("diff {} {}".format(actual, expected)) self.assertTrue(success, "Mismatch for file {0}".format(file)) def test_geometry(self): @@ -77,7 +76,8 @@ class TestWrap(unittest.TestCase): self.assertTrue(osp.isdir(osp.join(self.MATLAB_ACTUAL_DIR, '+gtsam'))) for file in files: - self.compare_and_diff(file) + actual = osp.join(self.MATLAB_ACTUAL_DIR, file) + self.compare_and_diff(file, actual) def test_functions(self): """Test interface file with function info.""" @@ -99,7 +99,8 @@ class TestWrap(unittest.TestCase): ] for file in files: - self.compare_and_diff(file) + actual = osp.join(self.MATLAB_ACTUAL_DIR, file) + self.compare_and_diff(file, actual) def test_class(self): """Test interface file with only class info.""" @@ -121,7 +122,8 @@ class TestWrap(unittest.TestCase): ] for file in files: - self.compare_and_diff(file) + actual = osp.join(self.MATLAB_ACTUAL_DIR, file) + self.compare_and_diff(file, actual) def test_templates(self): """Test interface file with template info.""" @@ -138,7 +140,8 @@ class TestWrap(unittest.TestCase): files = ['template_wrapper.cpp'] for file in files: - self.compare_and_diff(file) + actual = osp.join(self.MATLAB_ACTUAL_DIR, file) + self.compare_and_diff(file, actual) def test_inheritance(self): """Test interface file with class inheritance definitions.""" @@ -157,7 +160,8 @@ class TestWrap(unittest.TestCase): ] for file in files: - self.compare_and_diff(file) + actual = osp.join(self.MATLAB_ACTUAL_DIR, file) + self.compare_and_diff(file, actual) def test_namespaces(self): """ @@ -181,7 +185,8 @@ class TestWrap(unittest.TestCase): ] for file in files: - self.compare_and_diff(file) + actual = osp.join(self.MATLAB_ACTUAL_DIR, file) + self.compare_and_diff(file, actual) def test_special_cases(self): """ @@ -203,7 +208,8 @@ class TestWrap(unittest.TestCase): ] for file in files: - self.compare_and_diff(file) + actual = osp.join(self.MATLAB_ACTUAL_DIR, file) + self.compare_and_diff(file, actual) def test_multiple_files(self): """ @@ -228,7 +234,8 @@ class TestWrap(unittest.TestCase): ] for file in files: - self.compare_and_diff(file) + actual = osp.join(self.MATLAB_ACTUAL_DIR, file) + self.compare_and_diff(file, actual) if __name__ == '__main__': diff --git a/wrap/tests/test_template_instantiator.py b/wrap/tests/test_template_instantiator.py new file mode 100644 index 000000000..4faf01aa9 --- /dev/null +++ b/wrap/tests/test_template_instantiator.py @@ -0,0 +1,606 @@ +""" +GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Tests for template_instantiator. + +Author: Varun Agrawal +""" + +# pylint: disable=import-error,wrong-import-position + +import os +import sys +import unittest + +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from gtwrap.interface_parser import (Argument, ArgumentList, Class, + Constructor, ForwardDeclaration, + GlobalFunction, Include, Method, + Namespace, ReturnType, StaticMethod, + Typename) +from gtwrap.template_instantiator import ( + InstantiatedClass, InstantiatedConstructor, InstantiatedDeclaration, + InstantiatedGlobalFunction, InstantiatedMethod, InstantiatedStaticMethod, + InstantiationHelper, instantiate_args_list, instantiate_name, + instantiate_namespace, instantiate_return_type, instantiate_type, + is_scoped_template) + + +class TestInstantiationHelper(unittest.TestCase): + """Tests for the InstantiationHelper class.""" + def test_constructor(self): + """Test constructor.""" + helper = InstantiationHelper(InstantiatedClass) + self.assertEqual(helper.instantiation_type, InstantiatedClass) + helper = InstantiationHelper(InstantiatedConstructor) + self.assertEqual(helper.instantiation_type, InstantiatedConstructor) + helper = InstantiationHelper(InstantiatedDeclaration) + self.assertEqual(helper.instantiation_type, InstantiatedDeclaration) + helper = InstantiationHelper(InstantiatedGlobalFunction) + self.assertEqual(helper.instantiation_type, InstantiatedGlobalFunction) + helper = InstantiationHelper(InstantiatedMethod) + self.assertEqual(helper.instantiation_type, InstantiatedMethod) + helper = InstantiationHelper(InstantiatedStaticMethod) + self.assertEqual(helper.instantiation_type, InstantiatedStaticMethod) + + def test_instantiate(self): + """Test instantiate method.""" + method = Method.rule.parseString(""" + template + double method(const T x, const U& param); + """)[0] + cls = Class.rule.parseString(""" + template + class Foo {}; + """)[0] + typenames = ['T', 'U'] + class_instantiations = [Typename.rule.parseString("string")[0]] + method_instantiations = [Typename.rule.parseString("double")[0]] + + parent = InstantiatedClass(cls, class_instantiations) + + helper = InstantiationHelper(InstantiatedMethod) + instantiated_methods = helper.instantiate([], method, typenames, + class_instantiations, + method_instantiations, + parent) + + self.assertEqual(len(instantiated_methods), 1) + args_list = instantiated_methods[0].args.list() + self.assertEqual(args_list[0].ctype.get_typename(), 'string') + self.assertEqual(args_list[1].ctype.get_typename(), 'double') + + def test_multilevel_instantiation(self): + """ + Test method for multilevel instantiation + i.e. instantiation at both the class and method level. + """ + cls = Class.rule.parseString(""" + template + class Foo { + template + double method1(const T x, const U& param); + + template + V method2(const T x); + }; + """)[0] + + typenames = ['T'] + class_instantiations = [Typename.rule.parseString("string")[0]] + parent = InstantiatedClass(cls, class_instantiations) + + helper = InstantiationHelper(InstantiatedMethod) + + instantiated_methods = helper.multilevel_instantiation( + cls.methods, typenames, parent) + self.assertEqual(len(instantiated_methods), 2) + self.assertEqual( + instantiated_methods[0].args.list()[0].ctype.get_typename(), + 'string') + self.assertEqual( + instantiated_methods[0].args.list()[1].ctype.get_typename(), + 'double') + self.assertEqual( + instantiated_methods[1].args.list()[0].ctype.get_typename(), + 'string') + self.assertEqual( + instantiated_methods[1].return_type.type1.get_typename(), 'int') + + +class TestInstantiatedGlobalFunction(unittest.TestCase): + """Tests for the InstantiatedGlobalFunction class.""" + def setUp(self): + original = GlobalFunction.rule.parseString(""" + template + R function(const T& x); + """)[0] + instantiations = [ + Typename.rule.parseString("int")[0], + Typename.rule.parseString("double")[0] + ] + self.func = InstantiatedGlobalFunction(original, instantiations) + + def test_constructor(self): + """Test constructor.""" + self.assertIsInstance(self.func, InstantiatedGlobalFunction) + self.assertIsInstance(self.func.original, GlobalFunction) + self.assertEqual(self.func.name, "functionIntDouble") + self.assertEqual(len(self.func.args.list()), 1) + self.assertEqual(self.func.args.list()[0].ctype.get_typename(), "int") + self.assertEqual(self.func.return_type.type1.get_typename(), "double") + + def test_to_cpp(self): + """Test to_cpp method.""" + actual = self.func.to_cpp() + self.assertEqual(actual, "function") + + +class TestInstantiatedConstructor(unittest.TestCase): + """Tests for the InstantiatedConstructor class.""" + def setUp(self): + constructor = Constructor.rule.parseString(""" + template + Class(C x, const U& param); + """)[0] + instantiations = [ + Typename.rule.parseString("double")[0], + Typename.rule.parseString("string")[0] + ] + self.constructor = InstantiatedConstructor(constructor, instantiations) + + def test_constructor(self): + """Test constructor.""" + self.assertIsInstance(self.constructor, InstantiatedConstructor) + self.assertIsInstance(self.constructor.original, Constructor) + + def test_construct(self): + """Test the construct classmethod.""" + constructor = Constructor.rule.parseString(""" + template + Class(C x, const U& param); + """)[0] + c = Class.rule.parseString(""" + template + class Class {}; + """)[0] + class_instantiations = [Typename.rule.parseString("double")[0]] + method_instantiations = [Typename.rule.parseString("string")[0]] + typenames = ['C', 'U'] + parent = InstantiatedClass(c, class_instantiations) + instantiated_args = instantiate_args_list( + constructor.args.list(), + typenames, class_instantiations + method_instantiations, + parent.cpp_typename()) + + instantiated_constructor = InstantiatedConstructor.construct( + constructor, typenames, class_instantiations, + method_instantiations, instantiated_args, parent) + self.assertEqual(instantiated_constructor.name, "ClassDouble") + self.assertEqual( + instantiated_constructor.args.list()[0].ctype.get_typename(), + "double") + self.assertEqual( + instantiated_constructor.args.list()[1].ctype.get_typename(), + "string") + + def test_to_cpp(self): + """Test the to_cpp method.""" + actual = self.constructor.to_cpp() + self.assertEqual(actual, "Class") + + +class TestInstantiatedMethod(unittest.TestCase): + """Tests for the InstantiatedMethod class.""" + def setUp(self): + method = Method.rule.parseString(""" + template + double method(const U& param); + """)[0] + instantiations = [Typename.rule.parseString("double")[0]] + self.method = InstantiatedMethod(method, instantiations) + + def test_constructor(self): + """Test constructor.""" + self.assertIsInstance(self.method, InstantiatedMethod) + self.assertIsInstance(self.method.original, Method) + self.assertEqual(self.method.name, "methodDouble") + + def test_construct(self): + """Test the construct classmethod.""" + method = Method.rule.parseString(""" + template + T method(U& param); + """)[0] + method_instantiations = [Typename.rule.parseString("double")[0]] + c = Class.rule.parseString(""" + template + class Class {}; + """)[0] + class_instantiations = [Typename.rule.parseString("string")[0]] + + typenames = ['T', 'U'] + parent = InstantiatedClass(c, class_instantiations) + instantiated_args = instantiate_args_list( + method.args.list(), + typenames, class_instantiations + method_instantiations, + parent.cpp_typename()) + + instantiated_method = InstantiatedMethod.construct( + method, typenames, class_instantiations, method_instantiations, + instantiated_args, parent) + self.assertEqual(instantiated_method.name, "methodDouble") + self.assertEqual( + instantiated_method.args.list()[0].ctype.get_typename(), "double") + self.assertEqual(instantiated_method.return_type.type1.get_typename(), + "string") + + def test_to_cpp(self): + """Test the to_cpp method.""" + actual = self.method.to_cpp() + self.assertEqual(actual, "method") + + +class TestInstantiatedStaticMethod(unittest.TestCase): + """Tests for the InstantiatedStaticMethod class.""" + def setUp(self): + static_method = StaticMethod.rule.parseString(""" + template + static T staticMethod(const U& param); + """)[0] + instantiations = [Typename.rule.parseString("double")[0]] + self.static_method = InstantiatedStaticMethod(static_method, + instantiations) + + def test_constructor(self): + """Test constructor.""" + self.assertIsInstance(self.static_method, InstantiatedStaticMethod) + self.assertIsInstance(self.static_method.original, StaticMethod) + self.assertEqual(self.static_method.name, "staticMethodDouble") + + def test_construct(self): + """Test the construct classmethod.""" + static_method = StaticMethod.rule.parseString(""" + template + static T staticMethod(U& param); + """)[0] + method_instantiations = [Typename.rule.parseString("double")[0]] + c = Class.rule.parseString(""" + template + class Class {}; + """)[0] + class_instantiations = [Typename.rule.parseString("string")[0]] + + typenames = ['T', 'U'] + parent = InstantiatedClass(c, class_instantiations) + instantiated_args = instantiate_args_list( + static_method.args.list(), + typenames, class_instantiations + method_instantiations, + parent.cpp_typename()) + + instantiated_static_method = InstantiatedStaticMethod.construct( + static_method, typenames, class_instantiations, + method_instantiations, instantiated_args, parent) + self.assertEqual(instantiated_static_method.name, "staticMethodDouble") + self.assertEqual( + instantiated_static_method.args.list()[0].ctype.get_typename(), + "double") + self.assertEqual( + instantiated_static_method.return_type.type1.get_typename(), + "string") + + def test_to_cpp(self): + """Test the to_cpp method.""" + actual = self.static_method.to_cpp() + self.assertEqual(actual, "staticMethod") + + +class TestInstantiatedClass(unittest.TestCase): + """Tests for the InstantiatedClass class.""" + def setUp(self): + cl = Class.rule.parseString(""" + template + class Foo { + template + Foo(C& c); + + template + static T staticMethod(const S& s); + + template + T method(const M& m); + + T operator*(T other) const; + + T prop; + }; + """)[0] + class_instantiations = [Typename.rule.parseString('string')[0]] + self.member_instantiations = [ + Typename.rule.parseString('int')[0], + Typename.rule.parseString('char')[0], + Typename.rule.parseString('double')[0], + ] + self.cl = InstantiatedClass(cl, class_instantiations) + self.typenames = self.cl.original.template.typenames + + def test_constructor(self): + """Test constructor.""" + self.assertIsInstance(self.cl, InstantiatedClass) + self.assertIsInstance(self.cl.original, Class) + self.assertEqual(self.cl.name, "FooString") + + def test_instantiate_ctors(self): + """Test instantiate_ctors method.""" + ctors = self.cl.instantiate_ctors(self.typenames) + self.assertEqual(len(ctors), 1) + self.assertEqual(ctors[0].name, "FooString") + self.assertEqual(ctors[0].args.list()[0].ctype.get_typename(), "int") + + def test_instantiate_static_methods(self): + """Test instantiate_static_methods method.""" + static_methods = self.cl.instantiate_static_methods(self.typenames) + self.assertEqual(len(static_methods), 1) + self.assertEqual(static_methods[0].name, "staticMethodChar") + self.assertEqual(static_methods[0].args.list()[0].ctype.get_typename(), + "char") + self.assertEqual(static_methods[0].return_type.type1.get_typename(), + "string") + + def test_instantiate_methods(self): + """Test instantiate_methods method.""" + methods = self.cl.instantiate_methods(self.typenames) + self.assertEqual(len(methods), 1) + self.assertEqual(methods[0].name, "methodDouble") + self.assertEqual(methods[0].args.list()[0].ctype.get_typename(), + "double") + self.assertEqual(methods[0].return_type.type1.get_typename(), "string") + + def test_instantiate_operators(self): + """Test instantiate_operators method.""" + operators = self.cl.instantiate_operators(self.typenames) + self.assertEqual(len(operators), 1) + self.assertEqual(operators[0].operator, "*") + self.assertEqual(operators[0].args.list()[0].ctype.get_typename(), + "string") + self.assertEqual(operators[0].return_type.type1.get_typename(), + "string") + + def test_instantiate_properties(self): + """Test instantiate_properties method.""" + properties = self.cl.instantiate_properties(self.typenames) + self.assertEqual(len(properties), 1) + self.assertEqual(properties[0].name, "prop") + self.assertEqual(properties[0].ctype.get_typename(), "string") + + def test_cpp_typename(self): + """Test cpp_typename method.""" + actual = self.cl.cpp_typename() + self.assertEqual(actual.name, "Foo") + + def test_to_cpp(self): + """Test to_cpp method.""" + actual = self.cl.to_cpp() + self.assertEqual(actual, "Foo") + + +class TestInstantiatedDeclaration(unittest.TestCase): + """Tests for the InstantiatedDeclaration class.""" + def setUp(self): + #TODO(Varun) Need to support templated class forward declaration. + forward_declaration = ForwardDeclaration.rule.parseString(""" + class FooBar; + """)[0] + instantiations = [Typename.rule.parseString("double")[0]] + self.declaration = InstantiatedDeclaration( + forward_declaration, instantiations=instantiations) + + def test_constructor(self): + """Test constructor.""" + self.assertIsInstance(self.declaration, InstantiatedDeclaration) + self.assertIsInstance(self.declaration.original, ForwardDeclaration) + self.assertEqual(self.declaration.instantiations[0].name, "double") + + def test_to_cpp(self): + """Test to_cpp method.""" + cpp = self.declaration.to_cpp() + self.assertEqual(cpp, "FooBar") + + +class TestTemplateInstantiator(unittest.TestCase): + """ + Test overall template instantiation and the functions in the module. + """ + def test_scoped_template(self): + """Test is_scoped_template.""" + # Test if not scoped template. + template_typenames = ['T'] + str_arg_typename = "double" + scoped_template, scoped_idx = is_scoped_template( + template_typenames, str_arg_typename) + self.assertFalse(scoped_template) + self.assertEqual(scoped_idx, -1) + + # Check for correct template match. + template_typenames = ['T'] + str_arg_typename = "gtsam::Matrix" + scoped_template, scoped_idx = is_scoped_template( + template_typenames, str_arg_typename) + self.assertFalse(scoped_template) + self.assertEqual(scoped_idx, -1) + + # Test scoped templatte + template_typenames = ['T'] + str_arg_typename = "T::Value" + scoped_template, scoped_idx = is_scoped_template( + template_typenames, str_arg_typename) + self.assertEqual(scoped_template, "T") + self.assertEqual(scoped_idx, 0) + + template_typenames = ['U', 'T'] + str_arg_typename = "T::Value" + scoped_template, scoped_idx = is_scoped_template( + template_typenames, str_arg_typename) + self.assertEqual(scoped_template, "T") + self.assertEqual(scoped_idx, 1) + + def test_instantiate_type(self): + """Test for instantiate_type.""" + arg = Argument.rule.parseString("const T x")[0] + template_typenames = ["T"] + instantiations = [Typename.rule.parseString("double")[0]] + cpp_typename = "ExampleClass" + new_type = instantiate_type(arg.ctype, + template_typenames, + instantiations=instantiations, + cpp_typename=cpp_typename, + instantiated_class=None) + + new_typename = new_type.typename + self.assertEqual(new_typename.name, "double") + self.assertEqual(new_typename.instantiated_name(), "double") + + def test_instantiate_args_list(self): + """Test for instantiate_args_list.""" + args = ArgumentList.rule.parseString("T x, double y, string z")[0] + args_list = args.list() + template_typenames = ['T'] + instantiations = [Typename.rule.parseString("double")[0]] + instantiated_args_list = instantiate_args_list( + args_list, + template_typenames, + instantiations, + cpp_typename="ExampleClass") + + self.assertEqual(instantiated_args_list[0].ctype.get_typename(), + "double") + + args = ArgumentList.rule.parseString("T x, U y, string z")[0] + args_list = args.list() + template_typenames = ['T', 'U'] + instantiations = [ + Typename.rule.parseString("double")[0], + Typename.rule.parseString("Matrix")[0] + ] + instantiated_args_list = instantiate_args_list( + args_list, + template_typenames, + instantiations, + cpp_typename="ExampleClass") + self.assertEqual(instantiated_args_list[0].ctype.get_typename(), + "double") + self.assertEqual(instantiated_args_list[1].ctype.get_typename(), + "Matrix") + + args = ArgumentList.rule.parseString("T x, U y, T z")[0] + args_list = args.list() + template_typenames = ['T', 'U'] + instantiations = [ + Typename.rule.parseString("double")[0], + Typename.rule.parseString("Matrix")[0] + ] + instantiated_args_list = instantiate_args_list( + args_list, + template_typenames, + instantiations, + cpp_typename="ExampleClass") + self.assertEqual(instantiated_args_list[0].ctype.get_typename(), + "double") + self.assertEqual(instantiated_args_list[1].ctype.get_typename(), + "Matrix") + self.assertEqual(instantiated_args_list[2].ctype.get_typename(), + "double") + + def test_instantiate_return_type(self): + """Test for instantiate_return_type.""" + return_type = ReturnType.rule.parseString("T")[0] + template_typenames = ['T'] + instantiations = [Typename.rule.parseString("double")[0]] + instantiated_return_type = instantiate_return_type( + return_type, + template_typenames, + instantiations, + cpp_typename="ExampleClass") + + self.assertEqual(instantiated_return_type.type1.get_typename(), + "double") + + return_type = ReturnType.rule.parseString("pair")[0] + template_typenames = ['T', 'U'] + instantiations = [ + Typename.rule.parseString("double")[0], + Typename.rule.parseString("char")[0], + ] + instantiated_return_type = instantiate_return_type( + return_type, + template_typenames, + instantiations, + cpp_typename="ExampleClass") + + self.assertEqual(instantiated_return_type.type1.get_typename(), + "double") + self.assertEqual(instantiated_return_type.type2.get_typename(), "char") + + def test_instantiate_name(self): + """Test for instantiate_name.""" + instantiations = [Typename.rule.parseString("Man")[0]] + instantiated_name = instantiate_name("Iron", instantiations) + self.assertEqual(instantiated_name, "IronMan") + + def test_instantiate_namespace(self): + """Test for instantiate_namespace.""" + namespace = Namespace.rule.parseString(""" + namespace gtsam { + #include + template + class Values { + Values(const T& other); + + template + void insert(size_t j, V x); + + template + static S staticMethod(const S& s); + }; + } + """)[0] + instantiated_namespace = instantiate_namespace(namespace) + + self.assertEqual(instantiated_namespace.name, "gtsam") + self.assertEqual(instantiated_namespace.parent, "") + + self.assertEqual(instantiated_namespace.content[0].header, + "gtsam/nonlinear/Values.h") + self.assertIsInstance(instantiated_namespace.content[0], Include) + + self.assertEqual(instantiated_namespace.content[1].name, "ValuesBasis") + self.assertIsInstance(instantiated_namespace.content[1], Class) + + self.assertIsInstance(instantiated_namespace.content[1].ctors[0], + Constructor) + self.assertEqual(instantiated_namespace.content[1].ctors[0].name, + "ValuesBasis") + + self.assertIsInstance(instantiated_namespace.content[1].methods[0], + Method) + self.assertIsInstance(instantiated_namespace.content[1].methods[1], + Method) + self.assertEqual(instantiated_namespace.content[1].methods[0].name, + "insertVector") + self.assertEqual(instantiated_namespace.content[1].methods[1].name, + "insertMatrix") + + self.assertIsInstance( + instantiated_namespace.content[1].static_methods[0], StaticMethod) + self.assertEqual( + instantiated_namespace.content[1].static_methods[0].name, + "staticMethodDouble") + + +if __name__ == '__main__': + unittest.main()