diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml index 7b13b6646..fa2425e4d 100644 --- a/.github/workflows/build-linux.yml +++ b/.github/workflows/build-linux.yml @@ -20,26 +20,26 @@ jobs: # Github Actions requires a single row to be added to the build matrix. # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. name: [ - ubuntu-18.04-gcc-5, - ubuntu-18.04-gcc-9, - ubuntu-18.04-clang-9, + ubuntu-20.04-gcc-7, + ubuntu-20.04-gcc-9, + ubuntu-20.04-clang-9, ] build_type: [Debug, Release] build_unstable: [ON] include: - - name: ubuntu-18.04-gcc-5 - os: ubuntu-18.04 + - name: ubuntu-20.04-gcc-7 + os: ubuntu-20.04 compiler: gcc - version: "5" + version: "7" - - name: ubuntu-18.04-gcc-9 - os: ubuntu-18.04 + - name: ubuntu-20.04-gcc-9 + os: ubuntu-20.04 compiler: gcc version: "9" - - name: ubuntu-18.04-clang-9 - os: ubuntu-18.04 + - name: ubuntu-20.04-clang-9 + os: ubuntu-20.04 compiler: clang version: "9" @@ -60,9 +60,9 @@ jobs: gpg -a --export $LLVM_KEY | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" fi - sudo apt-get -y update - sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev + sudo apt-get -y update + sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev if [ "${{ matrix.compiler }}" = "gcc" ]; then sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index 1e1519da6..4eb861ecc 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -19,34 +19,34 @@ jobs: # Github Actions requires a single row to be added to the build matrix. # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. name: [ - ubuntu-18.04-gcc-5, - ubuntu-18.04-gcc-9, - ubuntu-18.04-clang-9, + ubuntu-20.04-gcc-7, + ubuntu-20.04-gcc-9, + ubuntu-20.04-clang-9, macOS-11-xcode-13.4.1, - ubuntu-18.04-gcc-5-tbb, + ubuntu-20.04-gcc-7-tbb, ] build_type: [Debug, Release] python_version: [3] include: - - name: ubuntu-18.04-gcc-5 - os: ubuntu-18.04 + - name: ubuntu-20.04-gcc-7 + os: ubuntu-20.04 compiler: gcc - version: "5" + version: "7" - - name: ubuntu-18.04-gcc-9 - os: ubuntu-18.04 + - name: ubuntu-20.04-gcc-9 + os: ubuntu-20.04 compiler: gcc version: "9" - - name: ubuntu-18.04-clang-9 - os: ubuntu-18.04 + - name: ubuntu-20.04-clang-9 + os: ubuntu-20.04 compiler: clang version: "9" # NOTE temporarily added this as it is a required check. - - name: ubuntu-18.04-clang-9 - os: ubuntu-18.04 + - name: ubuntu-20.04-clang-9 + os: ubuntu-20.04 compiler: clang version: "9" build_type: Debug @@ -57,10 +57,10 @@ jobs: compiler: xcode version: "13.4.1" - - name: ubuntu-18.04-gcc-5-tbb - os: ubuntu-18.04 + - name: ubuntu-20.04-gcc-7-tbb + os: ubuntu-20.04 compiler: gcc - version: "5" + version: "7" flag: tbb steps: @@ -79,9 +79,9 @@ jobs: gpg -a --export $LLVM_KEY | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" fi + sudo apt-get -y update - - sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev + sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev if [ "${{ matrix.compiler }}" = "gcc" ]; then sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml index d357b9a34..ef7d7723d 100644 --- a/.github/workflows/build-special.yml +++ b/.github/workflows/build-special.yml @@ -32,31 +32,31 @@ jobs: include: - name: ubuntu-gcc-deprecated - os: ubuntu-18.04 + os: ubuntu-20.04 compiler: gcc version: "9" flag: deprecated - name: ubuntu-gcc-quaternions - os: ubuntu-18.04 + os: ubuntu-20.04 compiler: gcc version: "9" flag: quaternions - name: ubuntu-gcc-tbb - os: ubuntu-18.04 + os: ubuntu-20.04 compiler: gcc version: "9" flag: tbb - name: ubuntu-cayleymap - os: ubuntu-18.04 + os: ubuntu-20.04 compiler: gcc version: "9" flag: cayley - name: ubuntu-system-libs - os: ubuntu-18.04 + os: ubuntu-20.04 compiler: gcc version: "9" flag: system-libs @@ -74,9 +74,9 @@ jobs: gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" fi - sudo apt-get -y update - sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev + sudo apt-get -y update + sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev if [ "${{ matrix.compiler }}" = "gcc" ]; then sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib diff --git a/README.md b/README.md index 52ac0a5d8..b32ce70e0 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,29 @@ GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionali We provide support for [MATLAB](matlab/README.md) and [Python](python/README.md) wrappers for GTSAM. Please refer to the linked documents for more details. +## Citation + +If you are using GTSAM for academic work, please use the following citation: + +``` +@software{gtsam, + author = {Frank Dellaert and Richard Roberts and Varun Agrawal and Alex Cunningham and Chris Beall and Duy-Nguyen Ta and Fan Jiang and lucacarlone and nikai and Jose Luis Blanco-Claraco and Stephen Williams and ydjian and John Lambert and Andy Melim and Zhaoyang Lv and Akshay Krishnan and Jing Dong and Gerry Chen and Krunal Chande and balderdash-devil and DiffDecisionTrees and Sungtae An and mpaluri and Ellon Paiva Mendes and Mike Bosse and Akash Patel and Ayush Baid and Paul Furgale and matthewbroadwaynavenio and roderick-koehle}, + title = {borglab/gtsam}, + month = may, + year = 2022, + publisher = {Zenodo}, + version = {4.2a7}, + doi = {10.5281/zenodo.5794541}, + url = {https://doi.org/10.5281/zenodo.5794541} +} +``` + +You can also get the latest citation available from Zenodo below: + +[![DOI](https://zenodo.org/badge/86362856.svg)](https://doi.org/10.5281/zenodo.5794541) + +Specific formats are available in the bottom-right corner of the Zenodo page. + ## The Preintegrated IMU Factor GTSAM includes a state of the art IMU handling scheme based on @@ -73,7 +96,7 @@ GTSAM includes a state of the art IMU handling scheme based on Our implementation improves on this using integration on the manifold, as detailed in - Luca Carlone, Zsolt Kira, Chris Beall, Vadim Indelman, and Frank Dellaert, _"Eliminating conditionally independent sets in factor graphs: a unifying perspective based on smart factors"_, Int. Conf. on Robotics and Automation (ICRA), 2014. [[link]](https://ieeexplore.ieee.org/abstract/document/6907483) -- Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, "IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation", Robotics: Science and Systems (RSS), 2015. [[link]](http://www.roboticsproceedings.org/rss11/p06.pdf) +- Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, _"IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation"_, Robotics: Science and Systems (RSS), 2015. [[link]](http://www.roboticsproceedings.org/rss11/p06.pdf) If you are using the factor in academic work, please cite the publications above. diff --git a/cmake/GtsamBuildTypes.cmake b/cmake/GtsamBuildTypes.cmake index 9058807ad..e63fbf1dd 100644 --- a/cmake/GtsamBuildTypes.cmake +++ b/cmake/GtsamBuildTypes.cmake @@ -190,7 +190,7 @@ if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang") endif() if (NOT MSVC) - option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" ON) + option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" OFF) if(GTSAM_BUILD_WITH_MARCH_NATIVE AND (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")) # Add as public flag so all dependant projects also use it, as required # by Eigen to avid crashes due to SIMD vectorization: diff --git a/cmake/HandleEigen.cmake b/cmake/HandleEigen.cmake index b21d16885..c49eb4f8e 100644 --- a/cmake/HandleEigen.cmake +++ b/cmake/HandleEigen.cmake @@ -1,7 +1,10 @@ ############################################################################### # Option for using system Eigen or GTSAM-bundled Eigen - -option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF) +# Default: Use system's Eigen if found automatically: +find_package(Eigen3 QUIET) +set(USE_SYSTEM_EIGEN_INITIAL_VALUE ${Eigen3_FOUND}) +option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" ${USE_SYSTEM_EIGEN_INITIAL_VALUE}) +unset(USE_SYSTEM_EIGEN_INITIAL_VALUE) if(NOT GTSAM_USE_SYSTEM_EIGEN) # This option only makes sense if using the embedded copy of Eigen, it is @@ -11,7 +14,7 @@ endif() # Switch for using system Eigen or GTSAM-bundled Eigen if(GTSAM_USE_SYSTEM_EIGEN) - find_package(Eigen3 REQUIRED) + find_package(Eigen3 REQUIRED) # need to find again as REQUIRED # Use generic Eigen include paths e.g. set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") diff --git a/doc/ImuFactor.lyx b/doc/ImuFactor.lyx index 0922a3e9c..c335e6949 100644 --- a/doc/ImuFactor.lyx +++ b/doc/ImuFactor.lyx @@ -1,7 +1,9 @@ -#LyX 2.0 created this file. For more info see http://www.lyx.org/ -\lyxformat 413 +#LyX 2.3 created this file. For more info see http://www.lyx.org/ +\lyxformat 544 \begin_document \begin_header +\save_transient_properties true +\origin unavailable \textclass article \use_default_options true \maintain_unincluded_children false @@ -9,16 +11,18 @@ \language_package default \inputencoding auto \fontencoding global -\font_roman default -\font_sans default -\font_typewriter default +\font_roman "default" "default" +\font_sans "default" "default" +\font_typewriter "default" "default" +\font_math "auto" "auto" \font_default_family default \use_non_tex_fonts false \font_sc false \font_osf false -\font_sf_scale 100 -\font_tt_scale 100 - +\font_sf_scale 100 100 +\font_tt_scale 100 100 +\use_microtype false +\use_dash_ligatures true \graphics default \default_output_format default \output_sync 0 @@ -29,16 +33,26 @@ \use_hyperref false \papersize default \use_geometry true -\use_amsmath 1 -\use_esint 1 -\use_mhchem 1 -\use_mathdots 1 +\use_package amsmath 1 +\use_package amssymb 1 +\use_package cancel 1 +\use_package esint 1 +\use_package mathdots 1 +\use_package mathtools 1 +\use_package mhchem 1 +\use_package stackrel 1 +\use_package stmaryrd 1 +\use_package undertilde 1 \cite_engine basic +\cite_engine_type default +\biblio_style plain \use_bibtopic false \use_indices false \paperorientation portrait \suppress_date false +\justification true \use_refstyle 1 +\use_minted 0 \index Index \shortcut idx \color #008000 @@ -51,7 +65,10 @@ \tocdepth 3 \paragraph_separation indent \paragraph_indentation default -\quotes_language english +\is_math_indent 0 +\math_numbering_side default +\quotes_style english +\dynamic_quotes 0 \papercolumns 1 \papersides 1 \paperpagestyle default @@ -65,11 +82,11 @@ \begin_body \begin_layout Title -The new IMU Factor +The New IMU Factor \end_layout \begin_layout Author -Frank Dellaert +Frank Dellaert & Varun Agrawal \end_layout \begin_layout Standard @@ -91,6 +108,282 @@ filename "macros.lyx" \end_layout +\begin_layout Standard +\begin_inset FormulaMacro +\newcommand{\Rnine}{\mathfrak{\mathbb{R}^{9}}} +{\mathfrak{\mathbb{R}^{9}}} +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset FormulaMacro +\newcommand{\Rninethree}{\mathfrak{\mathbb{R}^{9\times3}}} +{\mathfrak{\mathbb{R}^{9\times3}}} +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset FormulaMacro +\newcommand{\Rninesix}{\mathfrak{\mathbb{R}^{9\times6}}} +{\mathfrak{\mathbb{R}^{9\times6}}} +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset FormulaMacro +\newcommand{\Rninenine}{\mathfrak{\mathbb{R}^{9\times9}}} +{\mathfrak{\mathbb{R}^{9\times9}}} +\end_inset + + +\end_layout + +\begin_layout Subsubsection* +IMU Factor +\end_layout + +\begin_layout Standard +The IMU factor has 2 variants: +\end_layout + +\begin_layout Enumerate +ImuFactor is a 5-way factor between the previous pose and velocity, the + current pose and velocity, and the current IMU bias. +\end_layout + +\begin_layout Enumerate +ImuFactor2 is a 3-way factor between the previous NavState, the current + NavState and the current IMU bias. +\end_layout + +\begin_layout Standard +Both variants take a PreintegratedMeasurements object which encodes all + the IMU measurements between the previous timestep and the current timestep. +\end_layout + +\begin_layout Standard +There are also 2 variants of this class: +\end_layout + +\begin_layout Enumerate +Manifold Preintegration: This version keeps track of the incremental NavState + +\begin_inset Formula $\Delta X_{ij}$ +\end_inset + + with respect to the previous NavState, on the NavState manifold itself. + It also keeps track of the +\begin_inset Formula $\Rninesix$ +\end_inset + + Jacobian of +\begin_inset Formula $\Delta X_{ij}$ +\end_inset + + w.r.t. + the bias. + This corresponds to Forster et. + al. +\begin_inset CommandInset citation +LatexCommand cite +key "Forster15rss" +literal "false" + +\end_inset + + +\end_layout + +\begin_layout Enumerate +Tangent Preintegration: This version keeps track of the incremental NavState + in the NavState tangent space instead. + This is a +\begin_inset Formula $\Rnine$ +\end_inset + + vector +\emph on +preintegrated_ +\emph default +. + It also keeps track of the +\begin_inset Formula $\Rninesix$ +\end_inset + + jacobian of the +\emph on +preintegrated_ +\emph default + w.r.t. + the bias. + +\end_layout + +\begin_layout Standard +The main function of a factor is to calculate an error. + This is done exactly the same in both variants: +\begin_inset Formula +\begin{equation} +e(X_{i},X_{j})=X_{j}\ominus\widehat{X_{j}}\label{eq:imu-factor-error} +\end{equation} + +\end_inset + +where the predicted NavState +\begin_inset Formula $\widehat{X_{j}}$ +\end_inset + + at time +\begin_inset Formula $t_{j}$ +\end_inset + + is a function of the NavState +\begin_inset Formula $X_{i}$ +\end_inset + + at time +\begin_inset Formula $t_{i}$ +\end_inset + + and the preintegrated measurements +\begin_inset Formula $PIM$ +\end_inset + +: +\begin_inset Formula +\[ +\widehat{X_{j}}=f(X_{i},PIM) +\] + +\end_inset + +The noise model associated with this factor is assumed to be zero-mean Gaussian + with a +\begin_inset Formula $9\times9$ +\end_inset + + covariance matrix +\begin_inset Formula $\Sigma_{ij}$ +\end_inset + +, which is defined in the tangent space +\begin_inset Formula $T_{X_{j}}\mathcal{N}$ +\end_inset + + of the NavState manifold at the NavState +\begin_inset Formula $X_{j}$ +\end_inset + +. + This (discrete-time) covariance matrix is computed in the preintegrated + measurement class, of which there are two variants as discussed above. +\end_layout + +\begin_layout Subsubsection* +Combined IMU Factor +\end_layout + +\begin_layout Standard +The IMU factor above requires that bias drift over time be modeled as a + separate stochastic process (using a BetweenFactor for example), a crucial + aspect given that the preintegrated measurements depend on these bias values + and are thus correlated. + For this reason, we provide another type of IMU factor which we term the + Combined IMU Factor. + This factor similarly has 2 variants: +\end_layout + +\begin_layout Enumerate +CombinedImuFactor is a 6-way factor between the previous pose, velocity + and IMU bias and the current pose, velocity and IMU bias. +\end_layout + +\begin_layout Enumerate +CombinedImuFactor2 is a 4-way factor between the previous NavState and IMU + bias and the current NavState and IMU bias. +\end_layout + +\begin_layout Standard +Since the Combined IMU Factor has a larger state variable due to the inclusion + of IMU biases, the noise model associated with this factor is assumed to + be a zero mean Gaussian with a +\begin_inset Formula $15\times15$ +\end_inset + + covariance matrix +\begin_inset Formula $\Sigma$ +\end_inset + +, similarly defined on the tangent space of the NavState manifold. +\end_layout + +\begin_layout Subsubsection* +Covariance Matrices +\end_layout + +\begin_layout Standard +For IMU preintegration, it is important to propagate the uncertainty accurately + as well. + As such, we detail the various covariance matrices used in the preintegration + step. +\end_layout + +\begin_layout Itemize +Gyroscope Covariance +\begin_inset Formula $Q_{\omega}$ +\end_inset + +: Measurement uncertainty of the gyroscope. +\end_layout + +\begin_layout Itemize +Gyroscope Bias Covariance +\begin_inset Formula $Q_{\Delta b^{\omega}}$ +\end_inset + + : The covariance associated with the gyroscope bias random walk. +\end_layout + +\begin_layout Itemize +Accelerometer Covariance +\begin_inset Formula $Q_{acc}$ +\end_inset + + : Measurement uncertainty of the accelerometer. +\end_layout + +\begin_layout Itemize +Accelerometer Bias Covariance +\begin_inset Formula $Q_{\Delta b^{acc}}$ +\end_inset + + : The covariance associated with the accelerometer bias random walk. +\end_layout + +\begin_layout Itemize +Integration Covariance +\begin_inset Formula $Q_{int}$ +\end_inset + + : This is the uncertainty due to modeling errors in the integration from + acceleration to velocity and position. +\end_layout + +\begin_layout Itemize +Initial Bias Estimate Covariance +\begin_inset Formula $Q_{init}$ +\end_inset + + : This is the uncertainty associated with the estimation of the bias (since + we jointly estimate the bias as well). +\end_layout + \begin_layout Subsubsection* Navigation States \end_layout @@ -244,7 +537,7 @@ X(t)=\left\{ R_{0},P_{0}+V_{0}t,V_{0}\right\} then the differential equation describing the trajectory is \begin_inset Formula \[ -\dot{X}(t)=\left[0_{3x3},V_{0},0_{3x1}\right],\,\,\,\,\, X(0)=\left\{ R_{0},P_{0},V_{0}\right\} +\dot{X}(t)=\left[0_{3x3},V_{0},0_{3x1}\right],\,\,\,\,\,X(0)=\left\{ R_{0},P_{0},V_{0}\right\} \] \end_inset @@ -285,7 +578,15 @@ acceleration \end_inset in the body frame. - We know (from Murray84book) that the derivative of + We know (from +\begin_inset CommandInset citation +LatexCommand cite +key "Murray94book" +literal "false" + +\end_inset + +) that the derivative of \begin_inset Formula $R$ \end_inset @@ -592,6 +893,7 @@ Lie Group Methods \begin_inset CommandInset citation LatexCommand cite key "Iserles00an" +literal "true" \end_inset @@ -602,7 +904,7 @@ key "Iserles00an" , \begin_inset Formula \begin{equation} -\dot{R}(t)=F(R,t),\,\,\,\, R(0)=R_{0}\label{eq:diffSo3} +\dot{R}(t)=F(R,t),\,\,\,\,R(0)=R_{0}\label{eq:diffSo3} \end{equation} \end_inset @@ -707,15 +1009,6 @@ In other words, the vector field Retractions \end_layout -\begin_layout Standard -\begin_inset FormulaMacro -\newcommand{\Rnine}{\mathfrak{\mathbb{R}^{9}}} -{\mathfrak{\mathbb{R}^{9}}} -\end_inset - - -\end_layout - \begin_layout Standard Note that the use of the exponential map in local coordinate mappings is not obligatory, even in the context of Lie groups. @@ -947,8 +1240,8 @@ Or, as another way to state this, if we solve the differential equations \begin_inset Formula \begin{eqnarray*} \dot{\theta}(t) & = & H(\theta)^{-1}\,\omega^{b}(t)\\ -\dot{p}(t) & = & R_{0}^{T}\, V_{0}+v(t)\\ -\dot{v}(t) & = & R_{0}^{T}\, g+R_{b}^{0}(t)a^{b}(t) +\dot{p}(t) & = & R_{0}^{T}\,V_{0}+v(t)\\ +\dot{v}(t) & = & R_{0}^{T}\,g+R_{b}^{0}(t)a^{b}(t) \end{eqnarray*} \end_inset @@ -1000,7 +1293,15 @@ In the IMU factor, we need to predict the NavState needs to be known in order to compensate properly for the initial velocity and rotated gravity vector. - Hence, the idea of Lupton was to split up + Hence, the idea of Lupton +\begin_inset CommandInset citation +LatexCommand cite +key "Lupton12tro" +literal "false" + +\end_inset + + was to split up \begin_inset Formula $v(t)$ \end_inset @@ -1015,7 +1316,7 @@ v(t)=v_{g}(t)+v_{a}(t) evolving as \begin_inset Formula \begin{eqnarray*} -\dot{v}_{g}(t) & = & R_{i}^{T}\, g\\ +\dot{v}_{g}(t) & = & R_{i}^{T}\,g\\ \dot{v}_{a}(t) & = & R_{b}^{i}(t)a^{b}(t) \end{eqnarray*} @@ -1041,7 +1342,7 @@ p(t)=p_{i}(t)+p_{g}(t)+p_{v}(t) evolving as \begin_inset Formula \begin{eqnarray*} -\dot{p}_{i}(t) & = & R_{i}^{T}\, V_{i}\\ +\dot{p}_{i}(t) & = & R_{i}^{T}\,V_{i}\\ \dot{p}_{g}(t) & = & v_{g}(t)=R_{i}^{T}gt\\ \dot{p}_{v}(t) & = & v_{a}(t) \end{eqnarray*} @@ -1057,8 +1358,11 @@ p_{g}(t) & = & R_{i}^{T}\frac{gt^{2}}{2} \end_inset -The recipe for the IMU factor is then, in summary. - Solve the ordinary differential equations +The recipe for the IMU factor is then, in summary: +\end_layout + +\begin_layout Enumerate +Solve the ordinary differential equations \begin_inset Formula \begin{eqnarray*} \dot{\theta}(t) & = & H(\theta(t))^{-1}\,\omega^{b}(t)\\ @@ -1077,7 +1381,10 @@ starting from zero, up to time \end_inset at all times. - Form the local coordinate vector as +\end_layout + +\begin_layout Enumerate +Form the local coordinate vector as \begin_inset Formula \[ \zeta(t_{ij})=\left[\theta(t_{ij}),p(t_{ij}),v(t_{ij})\right]=\left[\theta(t_{ij}),R_{i}^{T}V_{i}t_{ij}+R_{i}^{T}\frac{gt_{ij}^{2}}{2}+p_{v}(t_{ij}),R_{i}^{T}gt_{ij}+v_{a}(t_{ij})\right] @@ -1085,6 +1392,10 @@ starting from zero, up to time \end_inset + +\end_layout + +\begin_layout Enumerate Predict the NavState \begin_inset Formula $X_{j}$ \end_inset @@ -1096,7 +1407,7 @@ Predict the NavState from \begin_inset Formula \[ -X_{j}=\mathcal{R}_{X_{i}}(\zeta(t_{ij}))=\left\{ \Phi_{R_{0}}\left(\theta(t_{ij})\right),P_{i}+V_{i}t_{ij}+\frac{gt_{ij}^{2}}{2}+R_{i}\, p_{v}(t_{ij}),V_{i}+gt_{ij}+R_{i}\, v_{a}(t_{ij})\right\} +X_{j}=\mathcal{R}_{X_{i}}(\zeta(t_{ij}))=\left\{ \Phi_{R_{0}}\left(\theta(t_{ij})\right),P_{i}+V_{i}t_{ij}+\frac{gt_{ij}^{2}}{2}+R_{i}\,p_{v}(t_{ij}),V_{i}+gt_{ij}+R_{i}\,v_{a}(t_{ij})\right\} \] \end_inset @@ -1179,11 +1490,59 @@ where we defined the rotation matrix \end_layout \begin_layout Subsubsection* -Noise Propagation +Noise Modeling \end_layout \begin_layout Standard -Even when we assume uncorrelated noise on +Given the above solutions to the differential equations, we add noise modeling + to account for the various sources of error in the system +\end_layout + +\begin_layout Standard +\begin_inset Formula +\begin{eqnarray} +\theta_{k+1} & = & \theta_{k}+H(\theta_{k})^{-1}\,(\omega_{k}^{b}+\epsilon_{k}^{\omega}-b_{k}^{\omega}-\epsilon_{init}^{\omega})\Delta_{t}\nonumber \\ +p_{k+1} & = & p_{k}+v_{k}\Delta_{t}+R_{k}(a_{k}^{b}+\epsilon_{k}^{a}-b_{k}^{a}-\epsilon_{init}^{a})\frac{\Delta_{t}^{2}}{2}+\epsilon_{k}^{int}\label{eq:preintegration}\\ +v_{k+1} & = & v_{k}+R_{k}(a_{k}^{b}+\epsilon_{k}^{a}-b_{k}^{a}-\epsilon_{init}^{a})\Delta_{t}\nonumber \\ +b_{k+1}^{a} & = & b_{k}^{a}+\epsilon_{k}^{b^{a}}\nonumber \\ +b_{k+1}^{\omega} & = & b_{k}^{\omega}+\epsilon_{k}^{b^{\omega}}\nonumber +\end{eqnarray} + +\end_inset + + +\end_layout + +\begin_layout Standard +which we can write compactly as, +\end_layout + +\begin_layout Standard +\begin_inset Formula +\begin{eqnarray} +\theta_{k+1} & = & f_{\theta}(\theta_{k},b_{k}^{w},\epsilon_{k}^{\omega},\epsilon_{init}^{b^{\omega}})\label{eq:compact-preintegration}\\ +p_{k+1} & = & f_{p}(p_{k},v_{k},\theta_{k},b_{k}^{a},\epsilon_{k}^{a},\epsilon_{init}^{a},\epsilon_{k}^{int})\nonumber \\ +v_{k+1} & = & f_{v}(v_{k,}\theta_{k,}b_{k}^{a},\epsilon_{k}^{a},\epsilon_{init}^{a})\nonumber \\ +b_{k+1}^{a} & = & f_{b^{a}}(b_{k}^{a},\epsilon_{k}^{b^{a}})\nonumber \\ +b_{k+1}^{\omega} & = & f_{b^{\omega}}(b_{k}^{\omega},\epsilon_{k}^{b^{\omega}})\nonumber +\end{eqnarray} + +\end_inset + + +\end_layout + +\begin_layout Subsubsection* +Noise Propagation in IMU Factor +\end_layout + +\begin_layout Standard +We wish to compute the ImuFactor covariance matrix +\begin_inset Formula $\Sigma_{ij}$ +\end_inset + +. + Even when we assume uncorrelated noise on \begin_inset Formula $\omega^{b}$ \end_inset @@ -1201,11 +1560,12 @@ Even when we assume uncorrelated noise on \end_inset appear in multiple places. - To model the noise propagation, let us define + To model the noise propagation, let us define the preintegrated navigation + state \begin_inset Formula $\zeta_{k}=[\theta_{k},p_{k},v_{k}]$ \end_inset - and rewrite Eqns. +, as a 9D vector on tangent space at and rewrite Eqns. ( \begin_inset CommandInset ref LatexCommand ref @@ -1239,7 +1599,7 @@ Then the noise on propagates as \begin_inset Formula \begin{equation} -\Sigma_{k+1}=A_{k}\Sigma_{k}A_{k}^{T}+B_{k}\Sigma_{\eta}^{ad}B_{k}+C_{k}\Sigma_{\eta}^{gd}C_{k}\label{eq:prop} +\Sigma_{k+1}=A_{k}\Sigma_{k}A_{k}^{T}+B_{k}\Sigma_{\eta}^{ad}B_{k}^{T}+C_{k}\Sigma_{\eta}^{gd}C_{k}^{T}\label{eq:prop} \end{equation} \end_inset @@ -1280,6 +1640,42 @@ where \begin_inset Formula $\omega^{b}$ \end_inset +. + Note that +\begin_inset Formula $\Sigma_{k},$ +\end_inset + + +\begin_inset Formula $\Sigma_{\eta}^{ad}$ +\end_inset + +, and +\begin_inset Formula $\Sigma_{\eta}^{gd}$ +\end_inset + + are discrete time covariances with +\begin_inset Formula $\Sigma_{\eta}^{ad}$ +\end_inset + +, and +\begin_inset Formula $\Sigma_{\eta}^{gd}$ +\end_inset + +divided by +\begin_inset Formula $\Delta_{t}$ +\end_inset + +. + Please see the section on Covariance Discretization +\begin_inset CommandInset ref +LatexCommand vpageref +reference "subsec:Covariance-Discretization" +plural "false" +caps "false" +noprefix "false" + +\end_inset + . \end_layout @@ -1295,7 +1691,7 @@ We start with the noise propagation on \begin_layout Standard \begin_inset Formula \[ -\deriv{\theta_{k+1}}{\theta_{k}}=I_{3x3}+\deriv{H(\theta_{k})^{-1}\omega_{k}^{b}}{\theta_{k}}\Delta_{t} +\deriv{\theta_{k+1}}{\theta_{k}}=I_{3\times3}+\deriv{H(\theta_{k})^{-1}\omega_{k}^{b}}{\theta_{k}}\Delta_{t} \] \end_inset @@ -1307,7 +1703,7 @@ It can be shown that for small we have \begin_inset Formula \[ -\deriv{H(\theta_{k})^{-1}\omega_{k}^{b}}{\theta_{k}}\approx-\frac{1}{2}\Skew{\omega_{k}^{b}}\mbox{ and hence }\deriv{\theta_{k+1}}{\theta_{k}}=I_{3x3}-\frac{\Delta t}{2}\Skew{\omega_{k}^{b}} +\deriv{H(\theta_{k})^{-1}\omega_{k}^{b}}{\theta_{k}}\approx-\frac{1}{2}\Skew{\omega_{k}^{b}}\mbox{ and hence }\deriv{\theta_{k+1}}{\theta_{k}}=I_{3\times3}-\frac{\Delta_{t}}{2}\Skew{\omega_{k}^{b}} \] \end_inset @@ -1357,9 +1753,9 @@ Putting all this together, we finally obtain \begin_inset Formula \[ A_{k}\approx\left[\begin{array}{ccc} -I_{3\times3}-\frac{\Delta_{t}}{2}\Skew{\omega_{k}^{b}}\\ +I_{3\times3}-\frac{\Delta_{t}}{2}\Skew{\omega_{k}^{b}} & 0_{3\times3} & 0_{3\times3}\\ R_{k}\Skew{-a_{k}^{b}}H(\theta_{k})\frac{\Delta_{t}}{2}^{2} & I_{3\times3} & I_{3\times3}\Delta_{t}\\ -R_{k}\Skew{-a_{k}^{b}}H(\theta_{k})\Delta_{t} & & I_{3\times3} +R_{k}\Skew{-a_{k}^{b}}H(\theta_{k})\Delta_{t} & 0_{3\times3} & I_{3\times3} \end{array}\right] \] @@ -1372,7 +1768,7 @@ B_{k}=\left[\begin{array}{c} 0_{3\times3}\\ R_{k}\frac{\Delta_{t}}{2}^{2}\\ R_{k}\Delta_{t} -\end{array}\right],\,\,\,\, C_{k}=\left[\begin{array}{c} +\end{array}\right],\,\,\,\,C_{k}=\left[\begin{array}{c} H(\theta_{k})^{-1}\Delta_{t}\\ 0_{3\times3}\\ 0_{3\times3} @@ -1384,9 +1780,393 @@ H(\theta_{k})^{-1}\Delta_{t}\\ \end_layout +\begin_layout Subsubsection* +Noise Propagation in Combined IMU Factor +\end_layout + +\begin_layout Standard +We can similarly account for bias drift over time, as is commonly seen in + commercial grade IMUs. +\end_layout + +\begin_layout Standard +We expand the state vector as +\begin_inset Formula $\zeta_{k}=[\theta_{k},p_{k},v_{k},b_{k}^{a},b_{k}^{\omega}]$ +\end_inset + + to include the bias terms and define the augmented noise vector +\begin_inset Formula $\epsilon=[\epsilon_{k}^{\omega},\epsilon_{k}^{a},\epsilon_{k}^{b^{a}},\epsilon_{k}^{b^{\omega}},\epsilon_{k}^{int},\epsilon_{init}^{b^{a}},\epsilon_{init}^{b^{\omega}}]$ +\end_inset + +. + This gives the noise propagation equation as +\end_layout + +\begin_layout Standard +\begin_inset Formula +\begin{equation} +\Sigma_{k+1}=F_{k}\Sigma_{k}F_{k}^{T}+G_{k}Q_{k}G_{k}^{T}\label{eq:prop-combined} +\end{equation} + +\end_inset + + +\end_layout + +\begin_layout Standard +where +\begin_inset Formula $F_{k}$ +\end_inset + + is the +\begin_inset Formula $15\times15$ +\end_inset + + derivative of +\begin_inset Formula $f$ +\end_inset + + wrpt this new +\begin_inset Formula $\zeta$ +\end_inset + +, and +\begin_inset Formula $G_{k}$ +\end_inset + + is the +\begin_inset Formula $15\times21$ +\end_inset + + matrix for first order uncertainty propagation. + +\begin_inset Formula $Q_{k}$ +\end_inset + + defines the uncertainty of +\begin_inset Formula $\eta$ +\end_inset + +. + The top-left +\begin_inset Formula $9\times9$ +\end_inset + + of +\begin_inset Formula $F_{k}$ +\end_inset + + is the same as +\begin_inset Formula $A_{k}$ +\end_inset + +, thus we only have the jacobians wrpt the biases left to account for. +\end_layout + +\begin_layout Standard +Conveniently, the jacobians of the pose and velocity wrpt the biases are + already computed in the +\emph on +ImuFactor +\emph default +derivation as matrices +\begin_inset Formula $B_{k}$ +\end_inset + + and +\begin_inset Formula $C_{k}$ +\end_inset + +, while they are identity matrices wrpt the biases themselves. + Thus, we can easily plug-in the values from the previous section to give + us the final result +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +F_{k}\approx\left[\begin{array}{ccccc} +I_{3\times3}-\frac{\Delta_{t}}{2}\Skew{\omega_{k}^{b}} & 0_{3\times3} & 0_{3\times3} & 0_{3\times3} & H(\theta_{k})^{-1}\Delta_{t}\\ +R_{k}\Skew{-a_{k}^{b}}H(\theta_{k})\frac{\Delta_{t}}{2}^{2} & I_{3\times3} & I_{3\times3}\Delta_{t} & R_{k}\frac{\Delta_{t}}{2}^{2} & 0_{3\times3}\\ +R_{k}\Skew{-a_{k}^{b}}H(\theta_{k})\Delta_{t} & 0_{3\times3} & I_{3\times3} & R_{k}\Delta_{t} & 0_{3\times3}\\ +0_{3\times3} & 0_{3\times3} & 0_{3\times3} & I_{3\times3} & 0_{3\times3}\\ +0_{3\times3} & 0_{3\times3} & 0_{3\times3} & 0_{3\times3} & I_{3\times3} +\end{array}\right] +\] + +\end_inset + + +\end_layout + +\begin_layout Standard +Similarly for +\begin_inset Formula $Q_{k},$ +\end_inset + +we get +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +Q_{k}=\left[\begin{array}{ccccccc} +\Sigma^{\omega}\\ + & \Sigma^{a}\\ + & & \Sigma^{b^{a}}\\ + & & & \Sigma^{b^{\omega}}\\ + & & & & \Sigma^{int}\\ + & & & & & \Sigma^{init_{11}} & \Sigma^{init_{12}}\\ + & & & & & \Sigma^{init_{21}} & \Sigma^{init_{22}} +\end{array}\right] +\] + +\end_inset + + +\end_layout + +\begin_layout Standard +and for +\begin_inset Formula $G_{k}$ +\end_inset + + we get +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +G_{k}=\left[\begin{array}{ccccccc} +\deriv{\theta}{\epsilon^{\omega}} & \deriv{\theta}{\epsilon^{a}} & \deriv{\theta}{\epsilon^{b^{a}}} & \deriv{\theta}{\epsilon^{b^{\omega}}} & \deriv{\theta}{\epsilon^{int}} & \deriv{\theta}{\epsilon_{init}^{b^{a}}} & \deriv{\theta}{\epsilon_{init}^{b^{\omega}}}\\ +\deriv p{\epsilon^{\omega}} & \deriv p{\epsilon^{a}} & \deriv p{\epsilon^{b^{a}}} & \deriv p{\epsilon^{b^{\omega}}} & \deriv p{\epsilon^{int}} & \deriv p{\epsilon_{init}^{b^{a}}} & \deriv p{\epsilon_{init}^{b^{\omega}}}\\ +\deriv v{\epsilon^{\omega}} & \deriv v{\epsilon^{a}} & \deriv v{\epsilon^{b^{a}}} & \deriv v{\epsilon^{b^{\omega}}} & \deriv v{\epsilon^{int}} & \deriv v{\epsilon_{init}^{b^{a}}} & \deriv v{\epsilon_{init}^{b^{\omega}}}\\ +\deriv{b^{a}}{\epsilon^{\omega}} & \deriv{b^{a}}{\epsilon^{a}} & \deriv{b^{a}}{\epsilon^{b^{a}}} & \deriv{b^{a}}{\epsilon^{b^{\omega}}} & \deriv{b^{a}}{\epsilon^{int}} & \deriv{b^{a}}{\epsilon_{init}^{b^{a}}} & \deriv{b^{a}}{\epsilon_{init}^{b^{\omega}}}\\ +\deriv{b^{\omega}}{\epsilon^{\omega}} & \deriv{b^{\omega}}{\epsilon^{a}} & \deriv{b^{\omega}}{\epsilon^{b^{a}}} & \deriv{b^{\omega}}{\epsilon^{b^{\omega}}} & \deriv{b^{\omega}}{\epsilon^{int}} & \deriv{b^{\omega}}{\epsilon_{init}^{b^{a}}} & \deriv{b^{\omega}}{\epsilon_{init}^{b^{\omega}}} +\end{array}\right]=\left[\begin{array}{ccccccc} +\deriv{\theta}{\epsilon^{\omega}} & 0 & 0 & 0 & 0 & 0 & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\\ +0 & \deriv p{\epsilon^{a}} & 0 & 0 & \deriv p{\epsilon^{int}} & \deriv p{\eta_{init}^{b^{a}}} & 0\\ +0 & \deriv v{\epsilon^{a}} & 0 & 0 & 0 & \deriv v{\eta_{init}^{b^{a}}} & 0\\ +0 & 0 & I_{3\times3} & 0 & 0 & 0 & 0\\ +0 & 0 & 0 & I_{3\times3} & 0 & 0 & 0 +\end{array}\right] +\] + +\end_inset + + +\end_layout + +\begin_layout Standard +We can perform the block-wise computation of +\begin_inset Formula $G_{k}Q_{k}G_{k}^{T}$ +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +G_{k}Q_{k}G_{k}^{T}=\left[\begin{array}{ccccccc} +\deriv{\theta}{\epsilon^{\omega}} & 0 & 0 & 0 & 0 & 0 & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\\ +0 & \deriv p{\epsilon^{a}} & 0 & 0 & \deriv p{\epsilon^{int}} & \deriv p{\eta_{init}^{b^{a}}} & 0\\ +0 & \deriv v{\epsilon^{a}} & 0 & 0 & 0 & \deriv v{\eta_{init}^{b^{a}}} & 0\\ +0 & 0 & I_{3\times3} & 0 & 0 & 0 & 0\\ +0 & 0 & 0 & I_{3\times3} & 0 & 0 & 0 +\end{array}\right]\left[\begin{array}{ccccccc} +\Sigma^{\omega}\\ + & \Sigma^{a}\\ + & & \Sigma^{b^{a}}\\ + & & & \Sigma^{b^{\omega}}\\ + & & & & \Sigma^{int}\\ + & & & & & \Sigma^{init_{11}} & \Sigma^{init_{12}}\\ + & & & & & \Sigma^{init_{21}} & \Sigma^{init_{22}} +\end{array}\right]G_{k}^{T} +\] + +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset Formula +\[ +G_{k}Q_{k}G_{k}^{T}=\left[\begin{array}{ccccccc} +\deriv{\theta}{\epsilon^{\omega}}\Sigma^{\omega} & 0 & 0 & 0 & 0 & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{21}} & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{22}}\\ +0 & \deriv p{\epsilon^{a}}\Sigma^{a} & 0 & 0 & \deriv p{\epsilon^{int}}\Sigma^{int} & \deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{11}} & \deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{12}}\\ +0 & \deriv v{\epsilon^{a}}\Sigma^{a} & 0 & 0 & 0 & \deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{11}} & \deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{12}}\\ +0 & 0 & \Sigma^{b^{a}} & 0 & 0 & 0 & 0\\ +0 & 0 & 0 & \Sigma^{b^{\omega}} & 0 & 0 & 0 +\end{array}\right]G_{k}^{T} +\] + +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset Formula +\begin{multline*} +G_{k}Q_{k}G_{k}^{T}=\left[\begin{array}{ccccccc} +\deriv{\theta}{\epsilon^{\omega}}\Sigma^{\omega} & 0 & 0 & 0 & 0 & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{21}} & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{22}}\\ +0 & \deriv p{\epsilon^{a}}\Sigma^{a} & 0 & 0 & \deriv p{\epsilon^{int}}\Sigma^{int} & \deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{11}} & \deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{12}}\\ +0 & \deriv v{\epsilon^{a}}\Sigma^{a} & 0 & 0 & 0 & \deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{11}} & \deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{12}}\\ +0 & 0 & \Sigma^{b^{a}} & 0 & 0 & 0 & 0\\ +0 & 0 & 0 & \Sigma^{b^{\omega}} & 0 & 0 & 0 +\end{array}\right]\\ +\left[\begin{array}{ccccc} +\deriv{\theta}{\epsilon^{\omega}}^{T} & 0 & 0 & 0 & 0\\ +0 & \deriv p{\epsilon^{a}}^{T} & \deriv v{\epsilon^{a}}^{T} & 0 & 0\\ +0 & 0 & 0 & I_{3\times3} & 0\\ +0 & 0 & 0 & 0 & I_{3\times3}\\ +0 & \deriv p{\epsilon^{int}}^{T} & 0 & 0 & 0\\ +0 & \deriv p{\eta_{init}^{b^{a}}}^{T} & \deriv v{\eta_{init}^{b^{a}}}^{T} & 0 & 0\\ +\deriv{\theta}{\eta_{init}^{b^{\omega}}}^{T} & 0 & 0 & 0 & 0 +\end{array}\right] +\end{multline*} + +\end_inset + + +\end_layout + +\begin_layout Standard +\begin_inset Formula +\begin{multline*} +=\\ +\left[\begin{array}{ccccc} +\deriv{\theta}{\epsilon^{\omega}}\Sigma^{\omega}\deriv{\theta}{\epsilon^{\omega}}^{T}+\deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{22}}\deriv{\theta}{\eta_{init}^{b^{\omega}}}^{T} & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{21}}\deriv p{\eta_{init}^{b^{a}}}^{T} & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{21}}\deriv v{\eta_{init}^{b^{a}}}^{T} & 0 & 0\\ +\deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{12}}\deriv{\theta}{\eta_{init}^{b^{\omega}}}^{T} & \deriv p{\epsilon^{a}}\Sigma^{a}\deriv p{\epsilon^{a}}^{T}+\deriv p{\epsilon^{int}}\Sigma^{int}\deriv p{\epsilon^{int}}^{T}\\ + & +\deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{11}}\deriv p{\eta_{init}^{b^{a}}}^{T} & \deriv p{\epsilon^{a}}\Sigma^{a}\deriv v{\epsilon^{a}}^{T}+\deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{11}}\deriv v{\eta_{init}^{b^{a}}}^{T} & 0 & 0\\ +\deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{12}}\deriv{\theta}{\eta_{init}^{b^{\omega}}}^{T} & \deriv v{\epsilon^{a}}\Sigma^{a}\deriv p{\epsilon^{a}}^{T}+\deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{11}}\deriv p{\eta_{init}^{b^{a}}}^{T} & \deriv v{\epsilon^{a}}\Sigma^{a}\deriv v{\epsilon^{a}}^{T}+\deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{11}}\deriv v{\eta_{init}^{b^{a}}}^{T} & 0 & 0\\ +0 & 0 & 0 & \Sigma^{b^{a}} & 0\\ +0 & 0 & 0 & 0 & \Sigma^{b^{\omega}} +\end{array}\right] +\end{multline*} + +\end_inset + + +\end_layout + +\begin_layout Standard +which we can break into 3 matrices for clarity, representing the main diagonal + and off-diagonal elements +\end_layout + +\begin_layout Standard +\begin_inset Formula +\begin{multline*} +=\\ +\left[\begin{array}{ccccc} +\deriv{\theta}{\epsilon^{\omega}}\Sigma^{\omega}\deriv{\theta}{\epsilon^{\omega}}^{T} & 0 & 0 & 0 & 0\\ +0 & \deriv p{\epsilon^{a}}\Sigma^{a}\deriv p{\epsilon^{a}}^{T} & 0 & 0 & 0\\ +0 & 0 & \deriv v{\epsilon^{a}}\Sigma^{a}\deriv v{\epsilon^{a}}^{T} & 0 & 0\\ +0 & 0 & 0 & \Sigma^{b^{a}} & 0\\ +0 & 0 & 0 & 0 & \Sigma^{b^{\omega}} +\end{array}\right]+\\ +\left[\begin{array}{ccccc} +\deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{22}}\deriv{\theta}{\eta_{init}^{b^{\omega}}}^{T} & 0 & 0 & 0 & 0\\ +0 & \deriv p{\epsilon^{int}}\Sigma^{int}\deriv p{\epsilon^{int}}^{T}+\deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{11}}\deriv p{\eta_{init}^{b^{a}}}^{T} & 0 & 0 & 0\\ +0 & 0 & \deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{11}}\deriv v{\eta_{init}^{b^{a}}}^{T} & 0 & 0\\ +0 & 0 & 0 & 0 & 0\\ +0 & 0 & 0 & 0 & 0 +\end{array}\right]+\\ +\left[\begin{array}{ccccc} +0 & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{21}}\deriv p{\eta_{init}^{b^{a}}}^{T} & \deriv{\theta}{\eta_{init}^{b^{\omega}}}\Sigma^{init_{21}}\deriv v{\eta_{init}^{b^{a}}}^{T} & 0 & 0\\ +\deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{12}}\deriv{\theta}{\eta_{init}^{b^{\omega}}}^{T} & 0 & \deriv p{\epsilon^{a}}\Sigma^{a}\deriv v{\epsilon^{a}}^{T}+\deriv p{\eta_{init}^{b^{a}}}\Sigma^{init_{11}}\deriv v{\eta_{init}^{b^{a}}}^{T} & 0 & 0\\ +\deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{12}}\deriv{\theta}{\eta_{init}^{b^{\omega}}}^{T} & \deriv v{\epsilon^{a}}\Sigma^{a}\deriv p{\epsilon^{a}}^{T}+\deriv v{\eta_{init}^{b^{a}}}\Sigma^{init_{11}}\deriv p{\eta_{init}^{b^{a}}}^{T} & 0 & 0 & 0\\ +0 & 0 & 0 & 0 & 0\\ +0 & 0 & 0 & 0 & 0 +\end{array}\right] +\end{multline*} + +\end_inset + + +\end_layout + +\begin_layout Subsubsection* +Covariance Discretization +\begin_inset CommandInset label +LatexCommand label +name "subsec:Covariance-Discretization" + +\end_inset + + +\end_layout + +\begin_layout Standard +So far, all the covariances are assumed to be continuous since the state + and measurement models are considered to be continuous-time stochastic + processes. + However, we sample measurements in a discrete-time fashion, necessitating + the need to convert the covariances to their discrete time equivalents. +\end_layout + +\begin_layout Standard +The IMU is modeled as a first order Gauss-Markov process, with a measurement + noise and a process noise. + Following +\begin_inset CommandInset citation +LatexCommand cite +after "Alg. 1 Page 57" +key "Nikolic16thesis" +literal "false" + +\end_inset + + and +\begin_inset CommandInset citation +LatexCommand cite +after "Eqns 129-130" +key "Trawny05report_IndirectKF" +literal "false" + +\end_inset + +, the measurement noises +\begin_inset Formula $[\epsilon^{a},\epsilon^{\omega},\epsilon_{init}]$ +\end_inset + + are simply scaled by +\begin_inset Formula $\frac{1}{\Delta t}$ +\end_inset + +, and the process noises +\begin_inset Formula $[\epsilon^{int},\epsilon^{b^{a}},\epsilon^{b^{\omega}}]$ +\end_inset + + are scaled by +\begin_inset Formula $\Delta t$ +\end_inset + + where +\begin_inset Formula $\Delta t$ +\end_inset + + is the time interval between 2 consecutive samples. + For a thorough explanation of the discretization process, please refer + to +\begin_inset CommandInset citation +LatexCommand cite +after "Section 8.1" +key "Simon06book" +literal "false" + +\end_inset + +. +\end_layout + \begin_layout Standard \begin_inset CommandInset bibtex LatexCommand bibtex +btprint "btPrintCited" bibfiles "refs" options "plain" diff --git a/doc/ImuFactor.pdf b/doc/ImuFactor.pdf index 0b13c1f59..1823cbc4a 100644 Binary files a/doc/ImuFactor.pdf and b/doc/ImuFactor.pdf differ diff --git a/doc/PreintegratedIMUJacobians.pdf b/doc/PreintegratedIMUJacobians.pdf new file mode 100644 index 000000000..02616ee07 Binary files /dev/null and b/doc/PreintegratedIMUJacobians.pdf differ diff --git a/doc/refs.bib b/doc/refs.bib index 414773483..ec42fb032 100644 --- a/doc/refs.bib +++ b/doc/refs.bib @@ -1,26 +1,72 @@ +%% This BibTeX bibliography file was created using BibDesk. +%% https://bibdesk.sourceforge.io/ + +%% Created for Varun Agrawal at 2021-09-27 17:39:09 -0400 + + +%% Saved with string encoding Unicode (UTF-8) + + + +@article{Lupton12tro, + author = {Lupton, Todd and Sukkarieh, Salah}, + date-added = {2021-09-27 17:38:56 -0400}, + date-modified = {2021-09-27 17:39:09 -0400}, + doi = {10.1109/TRO.2011.2170332}, + journal = {IEEE Transactions on Robotics}, + number = {1}, + pages = {61-76}, + title = {Visual-Inertial-Aided Navigation for High-Dynamic Motion in Built Environments Without Initial Conditions}, + volume = {28}, + year = {2012}, + Bdsk-Url-1 = {https://doi.org/10.1109/TRO.2011.2170332}} + +@inproceedings{Forster15rss, + author = {Christian Forster and Luca Carlone and Frank Dellaert and Davide Scaramuzza}, + booktitle = {Robotics: Science and Systems}, + date-added = {2021-09-26 20:44:41 -0400}, + date-modified = {2021-09-26 20:45:03 -0400}, + title = {IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation}, + year = {2015}} + @article{Iserles00an, - title = {Lie-group methods}, - author = {Iserles, Arieh and Munthe-Kaas, Hans Z and - N{\o}rsett, Syvert P and Zanna, Antonella}, - journal = {Acta Numerica 2000}, - volume = {9}, - pages = {215--365}, - year = {2000}, - publisher = {Cambridge Univ Press} -} + author = {Iserles, Arieh and Munthe-Kaas, Hans Z and N{\o}rsett, Syvert P and Zanna, Antonella}, + journal = {Acta Numerica 2000}, + pages = {215--365}, + publisher = {Cambridge Univ Press}, + title = {Lie-group methods}, + volume = {9}, + year = {2000}} @book{Murray94book, - title = {A mathematical introduction to robotic manipulation}, - author = {Murray, Richard M and Li, Zexiang and Sastry, S - Shankar and Sastry, S Shankara}, - year = {1994}, - publisher = {CRC press} -} + author = {Murray, Richard M and Li, Zexiang and Sastry, S Shankar and Sastry, S Shankara}, + publisher = {CRC press}, + title = {A mathematical introduction to robotic manipulation}, + year = {1994}} @book{Spivak65book, - title = {Calculus on manifolds}, - author = {Spivak, Michael}, - volume = {1}, - year = {1965}, - publisher = {WA Benjamin New York} -} \ No newline at end of file + author = {Spivak, Michael}, + publisher = {WA Benjamin New York}, + title = {Calculus on manifolds}, + volume = {1}, + year = {1965}} + +@phdthesis{Nikolic16thesis, + title={Characterisation, calibration, and design of visual-inertial sensor systems for robot navigation}, + author={Nikolic, Janosch}, + year={2016}, + school={ETH Zurich} +} + +@book{Simon06book, + title={Optimal state estimation: Kalman, H infinity, and nonlinear approaches}, + author={Simon, Dan}, + year={2006}, + publisher={John Wiley \& Sons} +} + +@inproceedings{Trawny05report_IndirectKF, + title={Indirect Kalman Filter for 3 D Attitude Estimation}, + author={Nikolas Trawny and Stergios I. Roumeliotis}, + year={2005} +} diff --git a/examples/CombinedImuFactorsExample.cpp b/examples/CombinedImuFactorsExample.cpp index 9211a4d5f..e0396ee81 100644 --- a/examples/CombinedImuFactorsExample.cpp +++ b/examples/CombinedImuFactorsExample.cpp @@ -60,13 +60,14 @@ namespace po = boost::program_options; po::variables_map parseOptions(int argc, char* argv[]) { po::options_description desc; - desc.add_options()("help,h", "produce help message")( - "data_csv_path", po::value()->default_value("imuAndGPSdata.csv"), - "path to the CSV file with the IMU data")( - "output_filename", - po::value()->default_value("imuFactorExampleResults.csv"), - "path to the result file to use")("use_isam", po::bool_switch(), - "use ISAM as the optimizer"); + desc.add_options()("help,h", "produce help message") // help message + ("data_csv_path", po::value()->default_value("imuAndGPSdata.csv"), + "path to the CSV file with the IMU data") // path to the data file + ("output_filename", + po::value()->default_value("imuFactorExampleResults.csv"), + "path to the result file to use") // filename to save results to + ("use_isam", po::bool_switch(), + "use ISAM as the optimizer"); // flag for ISAM optimizer po::variables_map vm; po::store(po::parse_command_line(argc, argv, desc), vm); @@ -106,7 +107,7 @@ boost::shared_ptr imuParams() { I_3x3 * 1e-8; // error committed in integrating position from velocities Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2); Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2); - Matrix66 bias_acc_omega_int = + Matrix66 bias_acc_omega_init = I_6x6 * 1e-5; // error in the bias used for preintegration auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0); @@ -122,7 +123,7 @@ boost::shared_ptr imuParams() { // PreintegrationCombinedMeasurements params: p->biasAccCovariance = bias_acc_cov; // acc bias in continuous p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous - p->biasAccOmegaInt = bias_acc_omega_int; + p->biasAccOmegaInt = bias_acc_omega_init; return p; } diff --git a/examples/ImuFactorsExample.cpp b/examples/ImuFactorsExample.cpp index 38ee4c7c7..c17631864 100644 --- a/examples/ImuFactorsExample.cpp +++ b/examples/ImuFactorsExample.cpp @@ -94,7 +94,7 @@ boost::shared_ptr imuParams() { I_3x3 * 1e-8; // error committed in integrating position from velocities Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2); Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2); - Matrix66 bias_acc_omega_int = + Matrix66 bias_acc_omega_init = I_6x6 * 1e-5; // error in the bias used for preintegration auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0); @@ -110,7 +110,7 @@ boost::shared_ptr imuParams() { // PreintegrationCombinedMeasurements params: p->biasAccCovariance = bias_acc_cov; // acc bias in continuous p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous - p->biasAccOmegaInt = bias_acc_omega_int; + p->biasAccOmegaInt = bias_acc_omega_init; return p; } diff --git a/examples/README.md b/examples/README.md index 5a72736e0..924a79521 100644 --- a/examples/README.md +++ b/examples/README.md @@ -33,7 +33,6 @@ The following examples illustrate some concepts from Georgia Tech's research pap ## 2D Pose SLAM * **LocalizationExample.cpp**: modeling robot motion -* **LocalizationExample2.cpp**: example with GPS like measurements * **Pose2SLAMExample**: A 2D Pose SLAM example using the predefined typedefs in gtsam/slam/pose2SLAM.h * **Pose2SLAMExample_advanced**: same, but uses an Optimizer object * **Pose2SLAMwSPCG**: solve a simple 3 by 3 grid of Pose2 SLAM problem by using easy SPCG interface diff --git a/gtsam/base/Group.h b/gtsam/base/Group.h index 39541416e..89fa8248c 100644 --- a/gtsam/base/Group.h +++ b/gtsam/base/Group.h @@ -95,7 +95,7 @@ template struct MultiplicativeGroupTraits { typedef group_tag structure_category; typedef multiplicative_group_tag group_flavor; - static Class Identity() { return Class::identity(); } + static Class Identity() { return Class::Identity(); } static Class Compose(const Class &g, const Class & h) { return g * h;} static Class Between(const Class &g, const Class & h) { return g.inverse() * h;} static Class Inverse(const Class &g) { return g.inverse();} @@ -111,7 +111,7 @@ template struct AdditiveGroupTraits { typedef group_tag structure_category; typedef additive_group_tag group_flavor; - static Class Identity() { return Class::identity(); } + static Class Identity() { return Class::Identity(); } static Class Compose(const Class &g, const Class & h) { return g + h;} static Class Between(const Class &g, const Class & h) { return h - g;} static Class Inverse(const Class &g) { return -g;} @@ -147,7 +147,7 @@ public: DirectProduct(const G& g, const H& h):std::pair(g,h) {} // identity - static DirectProduct identity() { return DirectProduct(); } + static DirectProduct Identity() { return DirectProduct(); } DirectProduct operator*(const DirectProduct& other) const { return DirectProduct(traits::Compose(this->first, other.first), @@ -181,7 +181,7 @@ public: DirectSum(const G& g, const H& h):std::pair(g,h) {} // identity - static DirectSum identity() { return DirectSum(); } + static DirectSum Identity() { return DirectSum(); } DirectSum operator+(const DirectSum& other) const { return DirectSum(g()+other.g(), h()+other.h()); diff --git a/gtsam/base/Lie.h b/gtsam/base/Lie.h index cb8e7d017..c4eba5deb 100644 --- a/gtsam/base/Lie.h +++ b/gtsam/base/Lie.h @@ -177,7 +177,7 @@ struct LieGroupTraits: GetDimensionImpl { /// @name Group /// @{ typedef multiplicative_group_tag group_flavor; - static Class Identity() { return Class::identity();} + static Class Identity() { return Class::Identity();} /// @} /// @name Manifold diff --git a/gtsam/base/ProductLieGroup.h b/gtsam/base/ProductLieGroup.h index a2dcf738e..6689c11d6 100644 --- a/gtsam/base/ProductLieGroup.h +++ b/gtsam/base/ProductLieGroup.h @@ -48,7 +48,7 @@ public: /// @name Group /// @{ typedef multiplicative_group_tag group_flavor; - static ProductLieGroup identity() {return ProductLieGroup();} + static ProductLieGroup Identity() {return ProductLieGroup();} ProductLieGroup operator*(const ProductLieGroup& other) const { return ProductLieGroup(traits::Compose(this->first,other.first), diff --git a/gtsam/base/Vector.h b/gtsam/base/Vector.h index 9cb2aa165..f7923ff88 100644 --- a/gtsam/base/Vector.h +++ b/gtsam/base/Vector.h @@ -60,6 +60,7 @@ GTSAM_MAKE_VECTOR_DEFS(9) GTSAM_MAKE_VECTOR_DEFS(10) GTSAM_MAKE_VECTOR_DEFS(11) GTSAM_MAKE_VECTOR_DEFS(12) +GTSAM_MAKE_VECTOR_DEFS(15) typedef Eigen::VectorBlock SubVector; typedef Eigen::VectorBlock ConstSubVector; diff --git a/gtsam/base/VectorSpace.h b/gtsam/base/VectorSpace.h index 43644b5c4..f7809f596 100644 --- a/gtsam/base/VectorSpace.h +++ b/gtsam/base/VectorSpace.h @@ -169,7 +169,7 @@ struct HasVectorSpacePrereqs { Vector v; BOOST_CONCEPT_USAGE(HasVectorSpacePrereqs) { - p = Class::identity(); // identity + p = Class::Identity(); // identity q = p + p; // addition q = p - p; // subtraction v = p.vector(); // conversion to vector @@ -192,7 +192,7 @@ struct VectorSpaceTraits: VectorSpaceImpl { /// @name Group /// @{ typedef additive_group_tag group_flavor; - static Class Identity() { return Class::identity();} + static Class Identity() { return Class::Identity();} /// @} /// @name Manifold diff --git a/gtsam/base/tests/testGroup.cpp b/gtsam/base/tests/testGroup.cpp index 976dee697..7e8cb7821 100644 --- a/gtsam/base/tests/testGroup.cpp +++ b/gtsam/base/tests/testGroup.cpp @@ -29,7 +29,7 @@ class Symmetric: private Eigen::PermutationMatrix { Eigen::PermutationMatrix(P) { } public: - static Symmetric identity() { return Symmetric(); } + static Symmetric Identity() { return Symmetric(); } Symmetric() { Eigen::PermutationMatrix::setIdentity(); } diff --git a/gtsam/basis/ParameterMatrix.h b/gtsam/basis/ParameterMatrix.h index eddcbfeae..3f830ec51 100644 --- a/gtsam/basis/ParameterMatrix.h +++ b/gtsam/basis/ParameterMatrix.h @@ -189,9 +189,9 @@ class ParameterMatrix { * NOTE: The size at compile time is unknown so this identity is zero * length and thus not valid. */ - inline static ParameterMatrix identity() { + inline static ParameterMatrix Identity() { // throw std::runtime_error( - // "ParameterMatrix::identity(): Don't use this function"); + // "ParameterMatrix::Identity(): Don't use this function"); return ParameterMatrix(0); } diff --git a/gtsam/basis/tests/testParameterMatrix.cpp b/gtsam/basis/tests/testParameterMatrix.cpp index ec62e8eea..ae2e8e111 100644 --- a/gtsam/basis/tests/testParameterMatrix.cpp +++ b/gtsam/basis/tests/testParameterMatrix.cpp @@ -133,7 +133,7 @@ TEST(ParameterMatrix, VectorSpace) { // vector EXPECT(assert_equal(Vector::Ones(M * N), params.vector())); // identity - EXPECT(assert_equal(ParameterMatrix::identity(), + EXPECT(assert_equal(ParameterMatrix::Identity(), ParameterMatrix(Matrix::Zero(M, 0)))); } diff --git a/gtsam/discrete/discrete.i b/gtsam/discrete/discrete.i index b3a12a8d5..fa98f36fa 100644 --- a/gtsam/discrete/discrete.i +++ b/gtsam/discrete/discrete.i @@ -219,6 +219,16 @@ class DiscreteBayesTree { }; #include + +class DiscreteLookupTable : gtsam::DiscreteConditional{ + DiscreteLookupTable(size_t nFrontals, const gtsam::DiscreteKeys& keys, + const gtsam::DecisionTreeFactor::ADT& potentials); + void print( + const std::string& s = "Discrete Lookup Table: ", + const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; + size_t argmax(const gtsam::DiscreteValues& parentsValues) const; +}; + class DiscreteLookupDAG { DiscreteLookupDAG(); void push_back(const gtsam::DiscreteLookupTable* table); diff --git a/gtsam/geometry/Cyclic.h b/gtsam/geometry/Cyclic.h index 065cd0140..a503a6072 100644 --- a/gtsam/geometry/Cyclic.h +++ b/gtsam/geometry/Cyclic.h @@ -38,7 +38,7 @@ public: /// Default constructor yields identity Cyclic():i_(0) { } - static Cyclic identity() { return Cyclic();} + static Cyclic Identity() { return Cyclic();} /// Cast to size_t operator size_t() const { diff --git a/gtsam/geometry/PinholeCamera.h b/gtsam/geometry/PinholeCamera.h index c20e90819..e5a8eec7a 100644 --- a/gtsam/geometry/PinholeCamera.h +++ b/gtsam/geometry/PinholeCamera.h @@ -213,7 +213,7 @@ public: } /// for Canonical - static PinholeCamera identity() { + static PinholeCamera Identity() { return PinholeCamera(); // assumes that the default constructor is valid } diff --git a/gtsam/geometry/PinholePose.h b/gtsam/geometry/PinholePose.h index 7b92be5d5..ce393e45f 100644 --- a/gtsam/geometry/PinholePose.h +++ b/gtsam/geometry/PinholePose.h @@ -412,7 +412,7 @@ public: } /// for Canonical - static PinholePose identity() { + static PinholePose Identity() { return PinholePose(); // assumes that the default constructor is valid } diff --git a/gtsam/geometry/Pose2.h b/gtsam/geometry/Pose2.h index 466c5a42a..008e6525d 100644 --- a/gtsam/geometry/Pose2.h +++ b/gtsam/geometry/Pose2.h @@ -119,7 +119,7 @@ public: /// @{ /// identity for group operation - inline static Pose2 identity() { return Pose2(); } + inline static Pose2 Identity() { return Pose2(); } /// inverse GTSAM_EXPORT Pose2 inverse() const; diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index 33fb55250..6dec7f74f 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -103,7 +103,7 @@ public: /// @{ /// identity for group operation - static Pose3 identity() { + static Pose3 Identity() { return Pose3(); } diff --git a/gtsam/geometry/Rot2.h b/gtsam/geometry/Rot2.h index 2690ca248..be8d11cda 100644 --- a/gtsam/geometry/Rot2.h +++ b/gtsam/geometry/Rot2.h @@ -107,8 +107,8 @@ namespace gtsam { /// @name Group /// @{ - /** identity */ - inline static Rot2 identity() { return Rot2(); } + /** Identity */ + inline static Rot2 Identity() { return Rot2(); } /** The inverse rotation - negative angle */ Rot2 inverse() const { return Rot2(c_, -s_);} diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index 01ca7778c..800ce4cdf 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -297,7 +297,7 @@ class GTSAM_EXPORT Rot3 : public LieGroup { /// @{ /// identity rotation for group operation - inline static Rot3 identity() { + inline static Rot3 Identity() { return Rot3(); } diff --git a/gtsam/geometry/SOn.h b/gtsam/geometry/SOn.h index af0e7a3cf..7d1e6db7a 100644 --- a/gtsam/geometry/SOn.h +++ b/gtsam/geometry/SOn.h @@ -178,13 +178,13 @@ class SO : public LieGroup, internal::DimensionSO(N)> { /// SO identity for N >= 2 template > - static SO identity() { + static SO Identity() { return SO(); } /// SO identity for N == Eigen::Dynamic template > - static SO identity(size_t n = 0) { + static SO Identity(size_t n = 0) { return SO(n); } diff --git a/gtsam/geometry/Similarity2.cpp b/gtsam/geometry/Similarity2.cpp index 4ed3351f8..4b8cfd581 100644 --- a/gtsam/geometry/Similarity2.cpp +++ b/gtsam/geometry/Similarity2.cpp @@ -134,7 +134,7 @@ void Similarity2::print(const std::string& s) const { << std::endl; } -Similarity2 Similarity2::identity() { return Similarity2(); } +Similarity2 Similarity2::Identity() { return Similarity2(); } Similarity2 Similarity2::operator*(const Similarity2& S) const { return Similarity2(R_ * S.R_, ((1.0 / S.s_) * t_) + R_ * S.t_, s_ * S.s_); diff --git a/gtsam/geometry/Similarity2.h b/gtsam/geometry/Similarity2.h index 05f10d149..e72cda484 100644 --- a/gtsam/geometry/Similarity2.h +++ b/gtsam/geometry/Similarity2.h @@ -83,7 +83,7 @@ class GTSAM_EXPORT Similarity2 : public LieGroup { /// @{ /// Return an identity transform - static Similarity2 identity(); + static Similarity2 Identity(); /// Composition Similarity2 operator*(const Similarity2& S) const; diff --git a/gtsam/geometry/Similarity3.cpp b/gtsam/geometry/Similarity3.cpp index 7fde974c5..146161796 100644 --- a/gtsam/geometry/Similarity3.cpp +++ b/gtsam/geometry/Similarity3.cpp @@ -122,7 +122,7 @@ void Similarity3::print(const std::string& s) const { std::cout << "t: " << translation().transpose() << " s: " << scale() << std::endl; } -Similarity3 Similarity3::identity() { +Similarity3 Similarity3::Identity() { return Similarity3(); } Similarity3 Similarity3::operator*(const Similarity3& S) const { diff --git a/gtsam/geometry/Similarity3.h b/gtsam/geometry/Similarity3.h index 845d4c810..76d2bf536 100644 --- a/gtsam/geometry/Similarity3.h +++ b/gtsam/geometry/Similarity3.h @@ -84,7 +84,7 @@ class GTSAM_EXPORT Similarity3 : public LieGroup { /// @{ /// Return an identity transform - static Similarity3 identity(); + static Similarity3 Identity(); /// Composition Similarity3 operator*(const Similarity3& S) const; diff --git a/gtsam/geometry/SphericalCamera.h b/gtsam/geometry/SphericalCamera.h index 4880423d3..adcbe46a5 100644 --- a/gtsam/geometry/SphericalCamera.h +++ b/gtsam/geometry/SphericalCamera.h @@ -88,7 +88,7 @@ class GTSAM_EXPORT SphericalCamera { /// Default constructor SphericalCamera() - : pose_(Pose3::identity()), emptyCal_(boost::make_shared()) {} + : pose_(Pose3()), emptyCal_(boost::make_shared()) {} /// Constructor with pose explicit SphericalCamera(const Pose3& pose) @@ -198,9 +198,9 @@ class GTSAM_EXPORT SphericalCamera { } /// for Canonical - static SphericalCamera identity() { + static SphericalCamera Identity() { return SphericalCamera( - Pose3::identity()); // assumes that the default constructor is valid + Pose3::Identity()); // assumes that the default constructor is valid } /// for Linear Triangulation diff --git a/gtsam/geometry/StereoPoint2.h b/gtsam/geometry/StereoPoint2.h index 9eef01577..8c9197fd2 100644 --- a/gtsam/geometry/StereoPoint2.h +++ b/gtsam/geometry/StereoPoint2.h @@ -71,7 +71,7 @@ public: /// @{ /// identity - inline static StereoPoint2 identity() { + inline static StereoPoint2 Identity() { return StereoPoint2(); } diff --git a/gtsam/geometry/geometry.i b/gtsam/geometry/geometry.i index 1b1169d2f..988727b98 100644 --- a/gtsam/geometry/geometry.i +++ b/gtsam/geometry/geometry.i @@ -16,7 +16,7 @@ class Point2 { bool equals(const gtsam::Point2& point, double tol) const; // Group - static gtsam::Point2 identity(); + static gtsam::Point2 Identity(); // Standard Interface double x() const; @@ -73,7 +73,7 @@ class StereoPoint2 { bool equals(const gtsam::StereoPoint2& point, double tol) const; // Group - static gtsam::StereoPoint2 identity(); + static gtsam::StereoPoint2 Identity(); gtsam::StereoPoint2 inverse() const; gtsam::StereoPoint2 compose(const gtsam::StereoPoint2& p2) const; gtsam::StereoPoint2 between(const gtsam::StereoPoint2& p2) const; @@ -115,7 +115,7 @@ class Point3 { bool equals(const gtsam::Point3& p, double tol) const; // Group - static gtsam::Point3 identity(); + static gtsam::Point3 Identity(); // Standard Interface Vector vector() const; @@ -149,7 +149,7 @@ class Rot2 { bool equals(const gtsam::Rot2& rot, double tol) const; // Group - static gtsam::Rot2 identity(); + static gtsam::Rot2 Identity(); gtsam::Rot2 inverse(); gtsam::Rot2 compose(const gtsam::Rot2& p2) const; gtsam::Rot2 between(const gtsam::Rot2& p2) const; @@ -198,7 +198,7 @@ class SO3 { bool equals(const gtsam::SO3& other, double tol) const; // Group - static gtsam::SO3 identity(); + static gtsam::SO3 Identity(); gtsam::SO3 inverse() const; gtsam::SO3 between(const gtsam::SO3& R) const; gtsam::SO3 compose(const gtsam::SO3& R) const; @@ -228,7 +228,7 @@ class SO4 { bool equals(const gtsam::SO4& other, double tol) const; // Group - static gtsam::SO4 identity(); + static gtsam::SO4 Identity(); gtsam::SO4 inverse() const; gtsam::SO4 between(const gtsam::SO4& Q) const; gtsam::SO4 compose(const gtsam::SO4& Q) const; @@ -258,7 +258,7 @@ class SOn { bool equals(const gtsam::SOn& other, double tol) const; // Group - static gtsam::SOn identity(); + static gtsam::SOn Identity(); gtsam::SOn inverse() const; gtsam::SOn between(const gtsam::SOn& Q) const; gtsam::SOn compose(const gtsam::SOn& Q) const; @@ -322,7 +322,7 @@ class Rot3 { bool equals(const gtsam::Rot3& rot, double tol) const; // Group - static gtsam::Rot3 identity(); + static gtsam::Rot3 Identity(); gtsam::Rot3 inverse() const; gtsam::Rot3 compose(const gtsam::Rot3& p2) const; gtsam::Rot3 between(const gtsam::Rot3& p2) const; @@ -380,7 +380,7 @@ class Pose2 { bool equals(const gtsam::Pose2& pose, double tol) const; // Group - static gtsam::Pose2 identity(); + static gtsam::Pose2 Identity(); gtsam::Pose2 inverse() const; gtsam::Pose2 compose(const gtsam::Pose2& p2) const; gtsam::Pose2 between(const gtsam::Pose2& p2) const; @@ -444,7 +444,7 @@ class Pose3 { bool equals(const gtsam::Pose3& pose, double tol) const; // Group - static gtsam::Pose3 identity(); + static gtsam::Pose3 Identity(); gtsam::Pose3 inverse() const; gtsam::Pose3 inverse(Eigen::Ref H) const; gtsam::Pose3 compose(const gtsam::Pose3& pose) const; diff --git a/gtsam/geometry/tests/testPose2.cpp b/gtsam/geometry/tests/testPose2.cpp index de779cc75..88c00a2e9 100644 --- a/gtsam/geometry/tests/testPose2.cpp +++ b/gtsam/geometry/tests/testPose2.cpp @@ -902,7 +902,7 @@ TEST(Pose2 , TransformCovariance3) { /* ************************************************************************* */ TEST(Pose2, Print) { - Pose2 pose(Rot2::identity(), Point2(1, 2)); + Pose2 pose(Rot2::Identity(), Point2(1, 2)); // Generate the expected output string s = "Planar Pose"; diff --git a/gtsam/geometry/tests/testPose3.cpp b/gtsam/geometry/tests/testPose3.cpp index e1d3d5ea2..78a4af799 100644 --- a/gtsam/geometry/tests/testPose3.cpp +++ b/gtsam/geometry/tests/testPose3.cpp @@ -1133,7 +1133,7 @@ Pose3 testing_interpolate(const Pose3& t1, const Pose3& t2, double gamma) { retu TEST(Pose3, interpolateJacobians) { { - Pose3 X = Pose3::identity(); + Pose3 X = Pose3::Identity(); Pose3 Y(Rot3::Rz(M_PI_2), Point3(1, 0, 0)); double t = 0.5; Pose3 expectedPoseInterp(Rot3::Rz(M_PI_4), Point3(0.5, -0.207107, 0)); // note: different from test above: this is full Pose3 interpolation @@ -1147,10 +1147,10 @@ TEST(Pose3, interpolateJacobians) { EXPECT(assert_equal(expectedJacobianY,actualJacobianY,1e-6)); } { - Pose3 X = Pose3::identity(); - Pose3 Y(Rot3::identity(), Point3(1, 0, 0)); + Pose3 X = Pose3::Identity(); + Pose3 Y(Rot3::Identity(), Point3(1, 0, 0)); double t = 0.3; - Pose3 expectedPoseInterp(Rot3::identity(), Point3(0.3, 0, 0)); + Pose3 expectedPoseInterp(Rot3::Identity(), Point3(0.3, 0, 0)); Matrix actualJacobianX, actualJacobianY; EXPECT(assert_equal(expectedPoseInterp, interpolate(X, Y, t, actualJacobianX, actualJacobianY), 1e-5)); @@ -1161,7 +1161,7 @@ TEST(Pose3, interpolateJacobians) { EXPECT(assert_equal(expectedJacobianY,actualJacobianY,1e-6)); } { - Pose3 X = Pose3::identity(); + Pose3 X = Pose3::Identity(); Pose3 Y(Rot3::Rz(M_PI_2), Point3(0, 0, 0)); double t = 0.5; Pose3 expectedPoseInterp(Rot3::Rz(M_PI_4), Point3(0, 0, 0)); @@ -1204,7 +1204,7 @@ TEST(Pose3, Create) { /* ************************************************************************* */ TEST(Pose3, Print) { - Pose3 pose(Rot3::identity(), Point3(1, 2, 3)); + Pose3 pose(Rot3::Identity(), Point3(1, 2, 3)); // Generate the expected output std::string expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 1 2 3\n"; diff --git a/gtsam/geometry/tests/testSimilarity3.cpp b/gtsam/geometry/tests/testSimilarity3.cpp index 7a134f6ef..fad24f743 100644 --- a/gtsam/geometry/tests/testSimilarity3.cpp +++ b/gtsam/geometry/tests/testSimilarity3.cpp @@ -203,11 +203,11 @@ TEST(Similarity3, ExpLogMap) { Vector7 zeros; zeros << 0, 0, 0, 0, 0, 0, 0; - Vector7 logIdentity = Similarity3::Logmap(Similarity3::identity()); + Vector7 logIdentity = Similarity3::Logmap(Similarity3::Identity()); EXPECT(assert_equal(zeros, logIdentity)); Similarity3 expZero = Similarity3::Expmap(zeros); - Similarity3 ident = Similarity3::identity(); + Similarity3 ident = Similarity3::Identity(); EXPECT(assert_equal(expZero, ident)); // Compare to matrix exponential, using expm in Lie.h @@ -391,7 +391,7 @@ TEST(Similarity3, Optimization) { NonlinearFactorGraph graph; graph.addPrior(key, prior, model); - // Create initial estimate with identity transform + // Create initial estimate with Identity transform Values initial; initial.insert(key, Similarity3()); diff --git a/gtsam/hybrid/GaussianMixture.cpp b/gtsam/hybrid/GaussianMixture.cpp index b04800d21..6816dfbf6 100644 --- a/gtsam/hybrid/GaussianMixture.cpp +++ b/gtsam/hybrid/GaussianMixture.cpp @@ -119,11 +119,36 @@ void GaussianMixture::print(const std::string &s, "", [&](Key k) { return formatter(k); }, [&](const GaussianConditional::shared_ptr &gf) -> std::string { RedirectCout rd; - if (!gf->empty()) + if (gf && !gf->empty()) gf->print("", formatter); else return {"nullptr"}; return rd.str(); }); } + +/* *******************************************************************************/ +void GaussianMixture::prune(const DecisionTreeFactor &decisionTree) { + // Functional which loops over all assignments and create a set of + // GaussianConditionals + auto pruner = [&decisionTree]( + const Assignment &choices, + const GaussianConditional::shared_ptr &conditional) + -> GaussianConditional::shared_ptr { + // typecast so we can use this to get probability value + DiscreteValues values(choices); + + if (decisionTree(values) == 0.0) { + // empty aka null pointer + boost::shared_ptr null; + return null; + } else { + return conditional; + } + }; + + auto pruned_conditionals = conditionals_.apply(pruner); + conditionals_.root_ = pruned_conditionals.root_; +} + } // namespace gtsam diff --git a/gtsam/hybrid/GaussianMixture.h b/gtsam/hybrid/GaussianMixture.h index fc1eb0f06..75deb4d55 100644 --- a/gtsam/hybrid/GaussianMixture.h +++ b/gtsam/hybrid/GaussianMixture.h @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -30,11 +31,14 @@ namespace gtsam { /** * @brief A conditional of gaussian mixtures indexed by discrete variables, as - * part of a Bayes Network. + * part of a Bayes Network. This is the result of the elimination of a + * continuous variable in a hybrid scheme, such that the remaining variables are + * discrete+continuous. * - * Represents the conditional density P(X | M, Z) where X is a continuous random - * variable, M is the selection of discrete variables corresponding to a subset - * of the Gaussian variables and Z is parent of this node + * Represents the conditional density P(X | M, Z) where X is the set of + * continuous random variables, M is the selection of discrete variables + * corresponding to a subset of the Gaussian variables and Z is parent of this + * node . * * The probability P(x|y,z,...) is proportional to * \f$ \sum_i k_i \exp - \frac{1}{2} |R_i x - (d_i - S_i y - T_i z - ...)|^2 \f$ @@ -118,7 +122,7 @@ class GTSAM_EXPORT GaussianMixture /// Test equality with base HybridFactor bool equals(const HybridFactor &lf, double tol = 1e-9) const override; - /* print utility */ + /// Print utility void print( const std::string &s = "GaussianMixture\n", const KeyFormatter &formatter = DefaultKeyFormatter) const override; @@ -128,6 +132,15 @@ class GTSAM_EXPORT GaussianMixture /// Getter for the underlying Conditionals DecisionTree const Conditionals &conditionals(); + /** + * @brief Prune the decision tree of Gaussian factors as per the discrete + * `decisionTree`. + * + * @param decisionTree A pruned decision tree of discrete keys where the + * leaves are probabilities. + */ + void prune(const DecisionTreeFactor &decisionTree); + /** * @brief Merge the Gaussian Factor Graphs in `this` and `sum` while * maintaining the decision tree structure. diff --git a/gtsam/hybrid/GaussianMixtureFactor.cpp b/gtsam/hybrid/GaussianMixtureFactor.cpp index 8f832d8ea..9b5be188a 100644 --- a/gtsam/hybrid/GaussianMixtureFactor.cpp +++ b/gtsam/hybrid/GaussianMixtureFactor.cpp @@ -51,7 +51,7 @@ GaussianMixtureFactor GaussianMixtureFactor::FromFactors( void GaussianMixtureFactor::print(const std::string &s, const KeyFormatter &formatter) const { HybridFactor::print(s, formatter); - std::cout << "]{\n"; + std::cout << "{\n"; factors_.print( "", [&](Key k) { return formatter(k); }, [&](const GaussianFactor::shared_ptr &gf) -> std::string { diff --git a/gtsam/hybrid/GaussianMixtureFactor.h b/gtsam/hybrid/GaussianMixtureFactor.h index 7b7c1899f..a0a51af55 100644 --- a/gtsam/hybrid/GaussianMixtureFactor.h +++ b/gtsam/hybrid/GaussianMixtureFactor.h @@ -22,7 +22,7 @@ #include #include -#include +#include #include namespace gtsam { diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index 816450ff3..4665a3136 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -21,6 +21,95 @@ namespace gtsam { +/* ************************************************************************* */ +/// Return the DiscreteKey vector as a set. +static std::set DiscreteKeysAsSet(const DiscreteKeys &dkeys) { + std::set s; + s.insert(dkeys.begin(), dkeys.end()); + return s; +} + +/* ************************************************************************* */ +HybridBayesNet HybridBayesNet::prune( + const DecisionTreeFactor::shared_ptr &discreteFactor) const { + /* To Prune, we visitWith every leaf in the GaussianMixture. + * For each leaf, using the assignment we can check the discrete decision tree + * for 0.0 probability, then just set the leaf to a nullptr. + * + * We can later check the GaussianMixture for just nullptrs. + */ + + HybridBayesNet prunedBayesNetFragment; + + // Functional which loops over all assignments and create a set of + // GaussianConditionals + auto pruner = [&](const Assignment &choices, + const GaussianConditional::shared_ptr &conditional) + -> GaussianConditional::shared_ptr { + // typecast so we can use this to get probability value + DiscreteValues values(choices); + + if ((*discreteFactor)(values) == 0.0) { + // empty aka null pointer + boost::shared_ptr null; + return null; + } else { + return conditional; + } + }; + + // Go through all the conditionals in the + // Bayes Net and prune them as per discreteFactor. + for (size_t i = 0; i < this->size(); i++) { + HybridConditional::shared_ptr conditional = this->at(i); + + GaussianMixture::shared_ptr gaussianMixture = + boost::dynamic_pointer_cast(conditional->inner()); + + if (gaussianMixture) { + // We may have mixtures with less discrete keys than discreteFactor so we + // skip those since the label assignment does not exist. + auto gmKeySet = DiscreteKeysAsSet(gaussianMixture->discreteKeys()); + auto dfKeySet = DiscreteKeysAsSet(discreteFactor->discreteKeys()); + if (gmKeySet != dfKeySet) { + // Add the gaussianMixture which doesn't have to be pruned. + prunedBayesNetFragment.push_back( + boost::make_shared(gaussianMixture)); + continue; + } + + // Run the pruning to get a new, pruned tree + GaussianMixture::Conditionals prunedTree = + gaussianMixture->conditionals().apply(pruner); + + DiscreteKeys discreteKeys = gaussianMixture->discreteKeys(); + // reverse keys to get a natural ordering + std::reverse(discreteKeys.begin(), discreteKeys.end()); + + // Convert from boost::iterator_range to KeyVector + // so we can pass it to constructor. + KeyVector frontals(gaussianMixture->frontals().begin(), + gaussianMixture->frontals().end()), + parents(gaussianMixture->parents().begin(), + gaussianMixture->parents().end()); + + // Create the new gaussian mixture and add it to the bayes net. + auto prunedGaussianMixture = boost::make_shared( + frontals, parents, discreteKeys, prunedTree); + + // Type-erase and add to the pruned Bayes Net fragment. + prunedBayesNetFragment.push_back( + boost::make_shared(prunedGaussianMixture)); + + } else { + // Add the non-GaussianMixture conditional + prunedBayesNetFragment.push_back(conditional); + } + } + + return prunedBayesNetFragment; +} + /* ************************************************************************* */ GaussianMixture::shared_ptr HybridBayesNet::atGaussian(size_t i) const { return boost::dynamic_pointer_cast(factors_.at(i)->inner()); diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index 9d6d5f236..0d2dc3642 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -17,6 +17,7 @@ #pragma once +#include #include #include #include @@ -39,6 +40,10 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { /** Construct empty bayes net */ HybridBayesNet() = default; + /// Prune the Hybrid Bayes Net given the discrete decision tree. + HybridBayesNet prune( + const DecisionTreeFactor::shared_ptr &discreteFactor) const; + /// Add HybridConditional to Bayes Net using Base::add; diff --git a/gtsam/hybrid/HybridBayesTree.h b/gtsam/hybrid/HybridBayesTree.h index 0b89ca8c4..02a4a11e5 100644 --- a/gtsam/hybrid/HybridBayesTree.h +++ b/gtsam/hybrid/HybridBayesTree.h @@ -76,7 +76,10 @@ class GTSAM_EXPORT HybridBayesTree : public BayesTree { /** * @brief Class for Hybrid Bayes tree orphan subtrees. * - * This does special stuff for the hybrid case + * This object stores parent keys in our base type factor so that + * eliminating those parent keys will pull this subtree into the + * elimination. + * This does special stuff for the hybrid case. * * @tparam CLIQUE */ diff --git a/gtsam/hybrid/HybridFactor.cpp b/gtsam/hybrid/HybridFactor.cpp index 8df2d524f..a9fe62cf1 100644 --- a/gtsam/hybrid/HybridFactor.cpp +++ b/gtsam/hybrid/HybridFactor.cpp @@ -78,7 +78,8 @@ bool HybridFactor::equals(const HybridFactor &lf, double tol) const { const This *e = dynamic_cast(&lf); return e != nullptr && Base::equals(*e, tol) && isDiscrete_ == e->isDiscrete_ && isContinuous_ == e->isContinuous_ && - isHybrid_ == e->isHybrid_ && nrContinuous_ == e->nrContinuous_; + isHybrid_ == e->isHybrid_ && continuousKeys_ == e->continuousKeys_ && + discreteKeys_ == e->discreteKeys_; } /* ************************************************************************ */ @@ -88,17 +89,23 @@ void HybridFactor::print(const std::string &s, if (isContinuous_) std::cout << "Continuous "; if (isDiscrete_) std::cout << "Discrete "; if (isHybrid_) std::cout << "Hybrid "; - for (size_t c=0; c 0 ? "; " : ""); } } - for(auto && discreteKey: discreteKeys_) { - std::cout << formatter(discreteKey.first) << " "; + for (size_t d = 0; d < discreteKeys_.size(); d++) { + std::cout << formatter(discreteKeys_.at(d).first); + if (d < discreteKeys_.size() - 1) { + std::cout << " "; + } + } + std::cout << "]"; } } // namespace gtsam diff --git a/gtsam/hybrid/HybridFactor.h b/gtsam/hybrid/HybridFactor.h index 3e91e2487..13dc2e6e6 100644 --- a/gtsam/hybrid/HybridFactor.h +++ b/gtsam/hybrid/HybridFactor.h @@ -50,8 +50,8 @@ class GTSAM_EXPORT HybridFactor : public Factor { size_t nrContinuous_ = 0; protected: + // Set of DiscreteKeys for this factor. DiscreteKeys discreteKeys_; - /// Record continuous keys for book-keeping KeyVector continuousKeys_; @@ -120,10 +120,13 @@ class GTSAM_EXPORT HybridFactor : public Factor { bool isHybrid() const { return isHybrid_; } /// Return the number of continuous variables in this factor. - size_t nrContinuous() const { return nrContinuous_; } + size_t nrContinuous() const { return continuousKeys_.size(); } - /// Return vector of discrete keys. - DiscreteKeys discreteKeys() const { return discreteKeys_; } + /// Return the discrete keys for this factor. + const DiscreteKeys &discreteKeys() const { return discreteKeys_; } + + /// Return only the continuous keys for this factor. + const KeyVector &continuousKeys() const { return continuousKeys_; } /// @} }; diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h index 2a92c717c..e645e63e5 100644 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -37,6 +37,8 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { using This = HybridGaussianFactor; using shared_ptr = boost::shared_ptr; + HybridGaussianFactor() = default; + // Explicit conversion from a shared ptr of GF explicit HybridGaussianFactor(GaussianFactor::shared_ptr other); @@ -52,7 +54,7 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { /// GTSAM print utility. void print( - const std::string &s = "HybridFactor\n", + const std::string &s = "HybridGaussianFactor\n", const KeyFormatter &formatter = DefaultKeyFormatter) const override; /// @} diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 88730cae9..c024c1255 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -98,6 +98,12 @@ GaussianMixtureFactor::Sum sumFrontals( } else if (f->isContinuous()) { deferredFactors.push_back( boost::dynamic_pointer_cast(f)->inner()); + + } else if (f->isDiscrete()) { + // Don't do anything for discrete-only factors + // since we want to eliminate continuous values only. + continue; + } else { // We need to handle the case where the object is actually an // BayesTreeOrphanWrapper! @@ -106,8 +112,8 @@ GaussianMixtureFactor::Sum sumFrontals( if (!orphan) { auto &fr = *f; throw std::invalid_argument( - std::string("factor is discrete in continuous elimination") + - typeid(fr).name()); + std::string("factor is discrete in continuous elimination ") + + demangle(typeid(fr).name())); } } } @@ -158,7 +164,7 @@ discreteElimination(const HybridGaussianFactorGraph &factors, } } - auto result = EliminateDiscrete(dfg, frontalKeys); + auto result = EliminateForMPE(dfg, frontalKeys); return {boost::make_shared(result.first), boost::make_shared(result.second)}; @@ -178,6 +184,19 @@ hybridElimination(const HybridGaussianFactorGraph &factors, // sum out frontals, this is the factor on the separator GaussianMixtureFactor::Sum sum = sumFrontals(factors); + // If a tree leaf contains nullptr, + // convert that leaf to an empty GaussianFactorGraph. + // Needed since the DecisionTree will otherwise create + // a GFG with a single (null) factor. + auto emptyGaussian = [](const GaussianFactorGraph &gfg) { + bool hasNull = + std::any_of(gfg.begin(), gfg.end(), + [](const GaussianFactor::shared_ptr &ptr) { return !ptr; }); + + return hasNull ? GaussianFactorGraph() : gfg; + }; + sum = GaussianMixtureFactor::Sum(sum, emptyGaussian); + using EliminationPair = GaussianFactorGraph::EliminationResult; KeyVector keysOfEliminated; // Not the ordering @@ -189,7 +208,10 @@ hybridElimination(const HybridGaussianFactorGraph &factors, if (graph.empty()) { return {nullptr, nullptr}; } - auto result = EliminatePreferCholesky(graph, frontalKeys); + std::pair, + boost::shared_ptr> + result = EliminatePreferCholesky(graph, frontalKeys); + if (keysOfEliminated.empty()) { keysOfEliminated = result.first->keys(); // Initialize the keysOfEliminated to be the @@ -229,14 +251,27 @@ hybridElimination(const HybridGaussianFactorGraph &factors, boost::make_shared(discreteFactor)}; } else { - // Create a resulting DCGaussianMixture on the separator. + // Create a resulting GaussianMixtureFactor on the separator. auto factor = boost::make_shared( KeyVector(continuousSeparator.begin(), continuousSeparator.end()), discreteSeparator, separatorFactors); return {boost::make_shared(conditional), factor}; } } -/* ************************************************************************ */ +/* ************************************************************************ + * Function to eliminate variables **under the following assumptions**: + * 1. When the ordering is fully continuous, and the graph only contains + * continuous and hybrid factors + * 2. When the ordering is fully discrete, and the graph only contains discrete + * factors + * + * Any usage outside of this is considered incorrect. + * + * \warning This function is not meant to be used with arbitrary hybrid factor + * graphs. For example, if there exists continuous parents, and one tries to + * eliminate a discrete variable (as specified in the ordering), the result will + * be INCORRECT and there will be NO error raised. + */ std::pair // EliminateHybrid(const HybridGaussianFactorGraph &factors, const Ordering &frontalKeys) { diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index e1cd2dc5f..936710330 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -24,6 +24,7 @@ #include #include #include +#include namespace gtsam { diff --git a/gtsam/hybrid/HybridGaussianISAM.cpp b/gtsam/hybrid/HybridGaussianISAM.cpp index 7783a88dd..23a95c021 100644 --- a/gtsam/hybrid/HybridGaussianISAM.cpp +++ b/gtsam/hybrid/HybridGaussianISAM.cpp @@ -41,6 +41,7 @@ HybridGaussianISAM::HybridGaussianISAM(const HybridBayesTree& bayesTree) void HybridGaussianISAM::updateInternal( const HybridGaussianFactorGraph& newFactors, HybridBayesTree::Cliques* orphans, + const boost::optional& ordering, const HybridBayesTree::Eliminate& function) { // Remove the contaminated part of the Bayes tree BayesNetType bn; @@ -74,16 +75,21 @@ void HybridGaussianISAM::updateInternal( std::copy(allDiscrete.begin(), allDiscrete.end(), std::back_inserter(newKeysDiscreteLast)); - // KeyVector new - // Get an ordering where the new keys are eliminated last const VariableIndex index(factors); - const Ordering ordering = Ordering::ColamdConstrainedLast( - index, KeyVector(newKeysDiscreteLast.begin(), newKeysDiscreteLast.end()), - true); + Ordering elimination_ordering; + if (ordering) { + elimination_ordering = *ordering; + } else { + elimination_ordering = Ordering::ColamdConstrainedLast( + index, + KeyVector(newKeysDiscreteLast.begin(), newKeysDiscreteLast.end()), + true); + } // eliminate all factors (top, added, orphans) into a new Bayes tree - auto bayesTree = factors.eliminateMultifrontal(ordering, function, index); + HybridBayesTree::shared_ptr bayesTree = + factors.eliminateMultifrontal(elimination_ordering, function, index); // Re-add into Bayes tree data structures this->roots_.insert(this->roots_.end(), bayesTree->roots().begin(), @@ -93,9 +99,61 @@ void HybridGaussianISAM::updateInternal( /* ************************************************************************* */ void HybridGaussianISAM::update(const HybridGaussianFactorGraph& newFactors, + const boost::optional& ordering, const HybridBayesTree::Eliminate& function) { Cliques orphans; - this->updateInternal(newFactors, &orphans, function); + this->updateInternal(newFactors, &orphans, ordering, function); +} + +/* ************************************************************************* */ +/** + * @brief Check if `b` is a subset of `a`. + * Non-const since they need to be sorted. + * + * @param a KeyVector + * @param b KeyVector + * @return True if the keys of b is a subset of a, else false. + */ +bool IsSubset(KeyVector a, KeyVector b) { + std::sort(a.begin(), a.end()); + std::sort(b.begin(), b.end()); + return std::includes(a.begin(), a.end(), b.begin(), b.end()); +} + +/* ************************************************************************* */ +void HybridGaussianISAM::prune(const Key& root, const size_t maxNrLeaves) { + auto decisionTree = boost::dynamic_pointer_cast( + this->clique(root)->conditional()->inner()); + DecisionTreeFactor prunedDiscreteFactor = decisionTree->prune(maxNrLeaves); + decisionTree->root_ = prunedDiscreteFactor.root_; + + std::vector prunedKeys; + for (auto&& clique : nodes()) { + // The cliques can be repeated for each frontal so we record it in + // prunedKeys and check if we have already pruned a particular clique. + if (std::find(prunedKeys.begin(), prunedKeys.end(), clique.first) != + prunedKeys.end()) { + continue; + } + + // Add all the keys of the current clique to be pruned to prunedKeys + for (auto&& key : clique.second->conditional()->frontals()) { + prunedKeys.push_back(key); + } + + // Convert parents() to a KeyVector for comparison + KeyVector parents; + for (auto&& parent : clique.second->conditional()->parents()) { + parents.push_back(parent); + } + + if (IsSubset(parents, decisionTree->keys())) { + auto gaussianMixture = boost::dynamic_pointer_cast( + clique.second->conditional()->inner()); + + gaussianMixture->prune(prunedDiscreteFactor); + } + } } } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianISAM.h b/gtsam/hybrid/HybridGaussianISAM.h index d5b6271da..4fc206582 100644 --- a/gtsam/hybrid/HybridGaussianISAM.h +++ b/gtsam/hybrid/HybridGaussianISAM.h @@ -48,6 +48,7 @@ class GTSAM_EXPORT HybridGaussianISAM : public ISAM { void updateInternal( const HybridGaussianFactorGraph& newFactors, HybridBayesTree::Cliques* orphans, + const boost::optional& ordering = boost::none, const HybridBayesTree::Eliminate& function = HybridBayesTree::EliminationTraitsType::DefaultEliminate); @@ -59,8 +60,17 @@ class GTSAM_EXPORT HybridGaussianISAM : public ISAM { * @param function Elimination function. */ void update(const HybridGaussianFactorGraph& newFactors, + const boost::optional& ordering = boost::none, const HybridBayesTree::Eliminate& function = HybridBayesTree::EliminationTraitsType::DefaultEliminate); + + /** + * @brief + * + * @param root The root key in the discrete conditional decision tree. + * @param maxNumberLeaves + */ + void prune(const Key& root, const size_t maxNumberLeaves); }; /// traits diff --git a/gtsam/hybrid/HybridNonlinearFactor.cpp b/gtsam/hybrid/HybridNonlinearFactor.cpp new file mode 100644 index 000000000..5a1833d39 --- /dev/null +++ b/gtsam/hybrid/HybridNonlinearFactor.cpp @@ -0,0 +1,41 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridNonlinearFactor.cpp + * @date May 28, 2022 + * @author Varun Agrawal + */ + +#include + +#include + +namespace gtsam { + +/* ************************************************************************* */ +HybridNonlinearFactor::HybridNonlinearFactor( + const NonlinearFactor::shared_ptr &other) + : Base(other->keys()), inner_(other) {} + +/* ************************************************************************* */ +bool HybridNonlinearFactor::equals(const HybridFactor &lf, double tol) const { + return Base::equals(lf, tol); +} + +/* ************************************************************************* */ +void HybridNonlinearFactor::print(const std::string &s, + const KeyFormatter &formatter) const { + HybridFactor::print(s, formatter); + inner_->print("\n", formatter); +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactor.h b/gtsam/hybrid/HybridNonlinearFactor.h new file mode 100644 index 000000000..7776347b3 --- /dev/null +++ b/gtsam/hybrid/HybridNonlinearFactor.h @@ -0,0 +1,62 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridNonlinearFactor.h + * @date May 28, 2022 + * @author Varun Agrawal + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +/** + * A HybridNonlinearFactor is a layer over NonlinearFactor so that we do not + * have a diamond inheritance. + */ +class HybridNonlinearFactor : public HybridFactor { + private: + NonlinearFactor::shared_ptr inner_; + + public: + using Base = HybridFactor; + using This = HybridNonlinearFactor; + using shared_ptr = boost::shared_ptr; + + // Explicit conversion from a shared ptr of NonlinearFactor + explicit HybridNonlinearFactor(const NonlinearFactor::shared_ptr &other); + + /// @name Testable + /// @{ + + /// Check equality. + virtual bool equals(const HybridFactor &lf, double tol) const override; + + /// GTSAM print utility. + void print( + const std::string &s = "HybridNonlinearFactor\n", + const KeyFormatter &formatter = DefaultKeyFormatter) const override; + + /// @} + + NonlinearFactor::shared_ptr inner() const { return inner_; } + + /// Linearize to a HybridGaussianFactor at the linearization point `c`. + boost::shared_ptr linearize(const Values &c) const { + return boost::make_shared(inner_->linearize(c)); + } +}; +} // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp new file mode 100644 index 000000000..a6abce15a --- /dev/null +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp @@ -0,0 +1,94 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridNonlinearFactorGraph.cpp + * @brief Nonlinear hybrid factor graph that uses type erasure + * @author Varun Agrawal + * @date May 28, 2022 + */ + +#include + +namespace gtsam { + +/* ************************************************************************* */ +void HybridNonlinearFactorGraph::add( + boost::shared_ptr factor) { + FactorGraph::add(boost::make_shared(factor)); +} + +/* ************************************************************************* */ +void HybridNonlinearFactorGraph::print(const std::string& s, + const KeyFormatter& keyFormatter) const { + // Base::print(str, keyFormatter); + std::cout << (s.empty() ? "" : s + " ") << std::endl; + std::cout << "size: " << size() << std::endl; + for (size_t i = 0; i < factors_.size(); i++) { + std::stringstream ss; + ss << "factor " << i << ": "; + if (factors_[i]) { + factors_[i]->print(ss.str(), keyFormatter); + std::cout << std::endl; + } + } +} + +/* ************************************************************************* */ +HybridGaussianFactorGraph HybridNonlinearFactorGraph::linearize( + const Values& continuousValues) const { + // create an empty linear FG + HybridGaussianFactorGraph linearFG; + + linearFG.reserve(size()); + + // linearize all hybrid factors + for (auto&& factor : factors_) { + // First check if it is a valid factor + if (factor) { + // Check if the factor is a hybrid factor. + // It can be either a nonlinear MixtureFactor or a linear + // GaussianMixtureFactor. + if (factor->isHybrid()) { + // Check if it is a nonlinear mixture factor + if (auto nlmf = boost::dynamic_pointer_cast(factor)) { + linearFG.push_back(nlmf->linearize(continuousValues)); + } else { + linearFG.push_back(factor); + } + + // Now check if the factor is a continuous only factor. + } else if (factor->isContinuous()) { + // In this case, we check if factor->inner() is nonlinear since + // HybridFactors wrap over continuous factors. + auto nlhf = boost::dynamic_pointer_cast(factor); + if (auto nlf = + boost::dynamic_pointer_cast(nlhf->inner())) { + auto hgf = boost::make_shared( + nlf->linearize(continuousValues)); + linearFG.push_back(hgf); + } else { + linearFG.push_back(factor); + } + // Finally if nothing else, we are discrete-only which doesn't need + // lineariztion. + } else { + linearFG.push_back(factor); + } + + } else { + linearFG.push_back(GaussianFactor::shared_ptr()); + } + } + return linearFG; +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.h b/gtsam/hybrid/HybridNonlinearFactorGraph.h new file mode 100644 index 000000000..2ddb8bcea --- /dev/null +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.h @@ -0,0 +1,134 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridNonlinearFactorGraph.h + * @brief Nonlinear hybrid factor graph that uses type erasure + * @author Varun Agrawal + * @date May 28, 2022 + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +namespace gtsam { + +/** + * Nonlinear Hybrid Factor Graph + * ----------------------- + * This is the non-linear version of a hybrid factor graph. + * Everything inside needs to be hybrid factor or hybrid conditional. + */ +class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph { + protected: + /// Check if FACTOR type is derived from NonlinearFactor. + template + using IsNonlinear = typename std::enable_if< + std::is_base_of::value>::type; + + public: + using Base = HybridFactorGraph; + using This = HybridNonlinearFactorGraph; ///< this class + using shared_ptr = boost::shared_ptr; ///< shared_ptr to This + + using Values = gtsam::Values; ///< backwards compatibility + using Indices = KeyVector; ///> map from keys to values + + /// @name Constructors + /// @{ + + HybridNonlinearFactorGraph() = default; + + /** + * Implicit copy/downcast constructor to override explicit template container + * constructor. In BayesTree this is used for: + * `cachedSeparatorMarginal_.reset(*separatorMarginal)` + * */ + template + HybridNonlinearFactorGraph(const FactorGraph& graph) + : Base(graph) {} + + /// @} + + // Allow use of selected FactorGraph methods: + using Base::empty; + using Base::reserve; + using Base::size; + using Base::operator[]; + using Base::add; + using Base::resize; + + /** + * Add a nonlinear factor *pointer* to the internal nonlinear factor graph + * @param nonlinearFactor - boost::shared_ptr to the factor to add + */ + template + IsNonlinear push_nonlinear( + const boost::shared_ptr& nonlinearFactor) { + Base::push_back(boost::make_shared(nonlinearFactor)); + } + + /// Construct a factor and add (shared pointer to it) to factor graph. + template + IsNonlinear emplace_nonlinear(Args&&... args) { + auto factor = boost::allocate_shared( + Eigen::aligned_allocator(), std::forward(args)...); + push_nonlinear(factor); + } + + /** + * @brief Add a single factor shared pointer to the hybrid factor graph. + * Dynamically handles the factor type and assigns it to the correct + * underlying container. + * + * @tparam FACTOR The factor type template + * @param sharedFactor The factor to add to this factor graph. + */ + template + void push_back(const boost::shared_ptr& sharedFactor) { + if (auto p = boost::dynamic_pointer_cast(sharedFactor)) { + push_nonlinear(p); + } else { + Base::push_back(sharedFactor); + } + } + + /// Add a nonlinear factor as a shared ptr. + void add(boost::shared_ptr factor); + + /// Print the factor graph. + void print( + const std::string& s = "HybridNonlinearFactorGraph", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; + + /** + * @brief Linearize all the continuous factors in the + * HybridNonlinearFactorGraph. + * + * @param continuousValues: Dictionary of continuous values. + * @return HybridGaussianFactorGraph::shared_ptr + */ + HybridGaussianFactorGraph linearize(const Values& continuousValues) const; +}; + +template <> +struct traits + : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/hybrid/MixtureFactor.h b/gtsam/hybrid/MixtureFactor.h new file mode 100644 index 000000000..3cd21e32e --- /dev/null +++ b/gtsam/hybrid/MixtureFactor.h @@ -0,0 +1,244 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file MixtureFactor.h + * @brief Nonlinear Mixture factor of continuous and discrete. + * @author Kevin Doherty, kdoherty@mit.edu + * @author Varun Agrawal + * @date December 2021 + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace gtsam { + +/** + * @brief Implementation of a discrete conditional mixture factor. + * + * Implements a joint discrete-continuous factor where the discrete variable + * serves to "select" a mixture component corresponding to a NonlinearFactor + * type of measurement. + * + * This class stores all factors as HybridFactors which can then be typecast to + * one of (NonlinearFactor, GaussianFactor) which can then be checked to perform + * the correct operation. + */ +class MixtureFactor : public HybridFactor { + public: + using Base = HybridFactor; + using This = MixtureFactor; + using shared_ptr = boost::shared_ptr; + using sharedFactor = boost::shared_ptr; + + /** + * @brief typedef for DecisionTree which has Keys as node labels and + * NonlinearFactor as leaf nodes. + */ + using Factors = DecisionTree; + + private: + /// Decision tree of Gaussian factors indexed by discrete keys. + Factors factors_; + bool normalized_; + + public: + MixtureFactor() = default; + + /** + * @brief Construct from Decision tree. + * + * @param keys Vector of keys for continuous factors. + * @param discreteKeys Vector of discrete keys. + * @param factors Decision tree with of shared factors. + * @param normalized Flag indicating if the factor error is already + * normalized. + */ + MixtureFactor(const KeyVector& keys, const DiscreteKeys& discreteKeys, + const Factors& factors, bool normalized = false) + : Base(keys, discreteKeys), factors_(factors), normalized_(normalized) {} + + /** + * @brief Convenience constructor that generates the underlying factor + * decision tree for us. + * + * Here it is important that the vector of factors has the correct number of + * elements based on the number of discrete keys and the cardinality of the + * keys, so that the decision tree is constructed appropriately. + * + * @tparam FACTOR The type of the factor shared pointers being passed in. Will + * be typecast to NonlinearFactor shared pointers. + * @param keys Vector of keys for continuous factors. + * @param discreteKeys Vector of discrete keys. + * @param factors Vector of shared pointers to factors. + * @param normalized Flag indicating if the factor error is already + * normalized. + */ + template + MixtureFactor(const KeyVector& keys, const DiscreteKeys& discreteKeys, + const std::vector>& factors, + bool normalized = false) + : Base(keys, discreteKeys), normalized_(normalized) { + std::vector nonlinear_factors; + for (auto&& f : factors) { + nonlinear_factors.push_back( + boost::dynamic_pointer_cast(f)); + } + factors_ = Factors(discreteKeys, nonlinear_factors); + } + + ~MixtureFactor() = default; + + /** + * @brief Compute error of factor given both continuous and discrete values. + * + * @param continuousVals The continuous Values. + * @param discreteVals The discrete Values. + * @return double The error of this factor. + */ + double error(const Values& continuousVals, + const DiscreteValues& discreteVals) const { + // Retrieve the factor corresponding to the assignment in discreteVals. + auto factor = factors_(discreteVals); + // Compute the error for the selected factor + const double factorError = factor->error(continuousVals); + if (normalized_) return factorError; + return factorError + + this->nonlinearFactorLogNormalizingConstant(factor, continuousVals); + } + + size_t dim() const { + // TODO(Varun) + throw std::runtime_error("MixtureFactor::dim not implemented."); + } + + /// Testable + /// @{ + + /// print to stdout + void print( + const std::string& s = "MixtureFactor", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { + std::cout << (s.empty() ? "" : s + " "); + Base::print("", keyFormatter); + std::cout << "\nMixtureFactor\n"; + auto valueFormatter = [](const sharedFactor& v) { + if (v) { + return (boost::format("Nonlinear factor on %d keys") % v->size()).str(); + } else { + return std::string("nullptr"); + } + }; + factors_.print("", keyFormatter, valueFormatter); + } + + /// Check equality + bool equals(const HybridFactor& other, double tol = 1e-9) const override { + // We attempt a dynamic cast from HybridFactor to MixtureFactor. If it + // fails, return false. + if (!dynamic_cast(&other)) return false; + + // If the cast is successful, we'll properly construct a MixtureFactor + // object from `other` + const MixtureFactor& f(static_cast(other)); + + // Ensure that this MixtureFactor and `f` have the same `factors_`. + auto compare = [tol](const sharedFactor& a, const sharedFactor& b) { + return traits::Equals(*a, *b, tol); + }; + if (!factors_.equals(f.factors_, compare)) return false; + + // If everything above passes, and the keys_, discreteKeys_ and normalized_ + // member variables are identical, return true. + return (std::equal(keys_.begin(), keys_.end(), f.keys().begin()) && + (discreteKeys_ == f.discreteKeys_) && + (normalized_ == f.normalized_)); + } + + /// @} + + /// Linearize specific nonlinear factors based on the assignment in + /// discreteValues. + GaussianFactor::shared_ptr linearize( + const Values& continuousVals, const DiscreteValues& discreteVals) const { + auto factor = factors_(discreteVals); + return factor->linearize(continuousVals); + } + + /// Linearize all the continuous factors to get a GaussianMixtureFactor. + boost::shared_ptr linearize( + const Values& continuousVals) const { + // functional to linearize each factor in the decision tree + auto linearizeDT = [continuousVals](const sharedFactor& factor) { + return factor->linearize(continuousVals); + }; + + DecisionTree linearized_factors( + factors_, linearizeDT); + + return boost::make_shared( + continuousKeys_, discreteKeys_, linearized_factors); + } + + /** + * If the component factors are not already normalized, we want to compute + * their normalizing constants so that the resulting joint distribution is + * appropriately computed. Remember, this is the _negative_ normalizing + * constant for the measurement likelihood (since we are minimizing the + * _negative_ log-likelihood). + */ + double nonlinearFactorLogNormalizingConstant(const sharedFactor& factor, + const Values& values) const { + // Information matrix (inverse covariance matrix) for the factor. + Matrix infoMat; + + // If this is a NoiseModelFactor, we'll use its noiseModel to + // otherwise noiseModelFactor will be nullptr + if (auto noiseModelFactor = + boost::dynamic_pointer_cast(factor)) { + // If dynamic cast to NoiseModelFactor succeeded, see if the noise model + // is Gaussian + auto noiseModel = noiseModelFactor->noiseModel(); + + auto gaussianNoiseModel = + boost::dynamic_pointer_cast(noiseModel); + if (gaussianNoiseModel) { + // If the noise model is Gaussian, retrieve the information matrix + infoMat = gaussianNoiseModel->information(); + } else { + // If the factor is not a Gaussian factor, we'll linearize it to get + // something with a normalized noise model + // TODO(kevin): does this make sense to do? I think maybe not in + // general? Should we just yell at the user? + auto gaussianFactor = factor->linearize(values); + infoMat = gaussianFactor->information(); + } + } + + // Compute the (negative) log of the normalizing constant + return -(factor->dim() * log(2.0 * M_PI) / 2.0) - + (log(infoMat.determinant()) / 2.0); + } +}; + +} // namespace gtsam diff --git a/gtsam/hybrid/tests/Switching.h b/gtsam/hybrid/tests/Switching.h index 38a327a58..52ef0439e 100644 --- a/gtsam/hybrid/tests/Switching.h +++ b/gtsam/hybrid/tests/Switching.h @@ -21,10 +21,16 @@ #include #include #include +#include +#include #include #include +#include +#include #include +#include + #pragma once namespace gtsam { @@ -33,111 +39,6 @@ using symbol_shorthand::C; using symbol_shorthand::M; using symbol_shorthand::X; -/* ****************************************************************************/ -// Test fixture with switching network. -// TODO(Varun) Currently this is only linear. We need to add nonlinear support -// and then update to -// https://github.com/borglab/gtsam/pull/973/files#diff-58c02b3b197ebf731694946e87762d252e9eaa2f5c6c4ba22d618085b321ca23 -struct Switching { - size_t K; - DiscreteKeys modes; - HybridGaussianFactorGraph linearizedFactorGraph; - Values linearizationPoint; - - using MotionModel = BetweenFactor; - // using MotionMixture = MixtureFactor; - - /// Create with given number of time steps. - Switching(size_t K, double between_sigma = 1.0, double prior_sigma = 0.1) - : K(K) { - // Create DiscreteKeys for binary K modes, modes[0] will not be used. - modes = addDiscreteModes(K); - - // Create hybrid factor graph. - // Add a prior on X(1). - auto prior = boost::make_shared( - X(1), Matrix11::Ones() / prior_sigma, Vector1::Zero()); - linearizedFactorGraph.push_back(prior); - - // Add "motion models". - linearizedFactorGraph = addMotionModels(K); - - // Add measurement factors - for (size_t k = 1; k <= K; k++) { - linearizedFactorGraph.emplace_gaussian( - X(k), Matrix11::Ones() / 0.1, Vector1::Zero()); - } - - // Add "mode chain" - linearizedFactorGraph = addModeChain(linearizedFactorGraph); - - // Create the linearization point. - for (size_t k = 1; k <= K; k++) { - linearizationPoint.insert(X(k), static_cast(k)); - } - } - - /// Create DiscreteKeys for K binary modes. - DiscreteKeys addDiscreteModes(size_t K) { - DiscreteKeys m; - for (size_t k = 0; k <= K; k++) { - m.emplace_back(M(k), 2); - } - return m; - } - - /// Helper function to add motion models for each [k, k+1] interval. - HybridGaussianFactorGraph addMotionModels(size_t K) { - HybridGaussianFactorGraph hgfg; - for (size_t k = 1; k < K; k++) { - auto keys = {X(k), X(k + 1)}; - auto components = motionModels(k); - hgfg.emplace_hybrid(keys, DiscreteKeys{modes[k]}, - components); - } - return hgfg; - } - - // Create motion models for a given time step - static std::vector motionModels( - size_t k, double sigma = 1.0) { - auto noise_model = noiseModel::Isotropic::Sigma(1, sigma); - auto still = boost::make_shared( - X(k), -Matrix11::Ones() / sigma, X(k + 1), - Matrix11::Ones() / sigma, Vector1::Zero()), - moving = boost::make_shared( - X(k), -Matrix11::Ones() / sigma, X(k + 1), - Matrix11::Ones() / sigma, -Vector1::Ones() / sigma); - return {boost::dynamic_pointer_cast(still), - boost::dynamic_pointer_cast(moving)}; - } - - // // Add "mode chain" to NonlinearHybridFactorGraph - // void addModeChain(HybridNonlinearFactorGraph& fg) { - // auto prior = boost::make_shared(modes[1], "1/1"); - // fg.push_discrete(prior); - // for (size_t k = 1; k < K - 1; k++) { - // auto parents = {modes[k]}; - // auto conditional = boost::make_shared( - // modes[k + 1], parents, "1/2 3/2"); - // fg.push_discrete(conditional); - // } - // } - - // Add "mode chain" to GaussianHybridFactorGraph - HybridGaussianFactorGraph addModeChain(HybridGaussianFactorGraph& fg) { - auto prior = boost::make_shared(modes[1], "1/1"); - fg.push_discrete(prior); - for (size_t k = 1; k < K - 1; k++) { - auto parents = {modes[k]}; - auto conditional = boost::make_shared( - modes[k + 1], parents, "1/2 3/2"); - fg.push_discrete(conditional); - } - return fg; - } -}; - /** * @brief Create a switching system chain. A switching system is a continuous * system which depends on a discrete mode at each time step of the chain. @@ -149,7 +50,7 @@ struct Switching { */ inline HybridGaussianFactorGraph::shared_ptr makeSwitchingChain( size_t n, std::function keyFunc = X, - std::function dKeyFunc = C) { + std::function dKeyFunc = M) { HybridGaussianFactorGraph hfg; hfg.add(JacobianFactor(keyFunc(1), I_3x3, Z_3x1)); @@ -182,7 +83,7 @@ inline HybridGaussianFactorGraph::shared_ptr makeSwitchingChain( * @return std::pair> */ inline std::pair> makeBinaryOrdering( - std::vector& input) { + std::vector &input) { KeyVector new_order; std::vector levels(input.size()); @@ -211,4 +112,103 @@ inline std::pair> makeBinaryOrdering( return {new_order, levels}; } +/* *************************************************************************** + */ +using MotionModel = BetweenFactor; +// using MotionMixture = MixtureFactor; + +// Test fixture with switching network. +struct Switching { + size_t K; + DiscreteKeys modes; + HybridNonlinearFactorGraph nonlinearFactorGraph; + HybridGaussianFactorGraph linearizedFactorGraph; + Values linearizationPoint; + + /// Create with given number of time steps. + Switching(size_t K, double between_sigma = 1.0, double prior_sigma = 0.1) + : K(K) { + using symbol_shorthand::M; + using symbol_shorthand::X; + + // Create DiscreteKeys for binary K modes, modes[0] will not be used. + for (size_t k = 0; k <= K; k++) { + modes.emplace_back(M(k), 2); + } + + // Create hybrid factor graph. + // Add a prior on X(1). + auto prior = boost::make_shared>( + X(1), 0, noiseModel::Isotropic::Sigma(1, prior_sigma)); + nonlinearFactorGraph.push_nonlinear(prior); + + // Add "motion models". + for (size_t k = 1; k < K; k++) { + KeyVector keys = {X(k), X(k + 1)}; + auto motion_models = motionModels(k); + std::vector components; + for (auto &&f : motion_models) { + components.push_back(boost::dynamic_pointer_cast(f)); + } + nonlinearFactorGraph.emplace_hybrid( + keys, DiscreteKeys{modes[k]}, components); + } + + // Add measurement factors + auto measurement_noise = noiseModel::Isotropic::Sigma(1, 0.1); + for (size_t k = 2; k <= K; k++) { + nonlinearFactorGraph.emplace_nonlinear>( + X(k), 1.0 * (k - 1), measurement_noise); + } + + // Add "mode chain" + addModeChain(&nonlinearFactorGraph); + + // Create the linearization point. + for (size_t k = 1; k <= K; k++) { + linearizationPoint.insert(X(k), static_cast(k)); + } + + linearizedFactorGraph = nonlinearFactorGraph.linearize(linearizationPoint); + } + + // Create motion models for a given time step + static std::vector motionModels(size_t k, + double sigma = 1.0) { + using symbol_shorthand::M; + using symbol_shorthand::X; + + auto noise_model = noiseModel::Isotropic::Sigma(1, sigma); + auto still = + boost::make_shared(X(k), X(k + 1), 0.0, noise_model), + moving = + boost::make_shared(X(k), X(k + 1), 1.0, noise_model); + return {still, moving}; + } + + // Add "mode chain" to HybridNonlinearFactorGraph + void addModeChain(HybridNonlinearFactorGraph *fg) { + auto prior = boost::make_shared(modes[1], "1/1"); + fg->push_discrete(prior); + for (size_t k = 1; k < K - 1; k++) { + auto parents = {modes[k]}; + auto conditional = boost::make_shared( + modes[k + 1], parents, "1/2 3/2"); + fg->push_discrete(conditional); + } + } + + // Add "mode chain" to HybridGaussianFactorGraph + void addModeChain(HybridGaussianFactorGraph *fg) { + auto prior = boost::make_shared(modes[1], "1/1"); + fg->push_discrete(prior); + for (size_t k = 1; k < K - 1; k++) { + auto parents = {modes[k]}; + auto conditional = boost::make_shared( + modes[k + 1], parents, "1/2 3/2"); + fg->push_discrete(conditional); + } + } +}; + } // namespace gtsam diff --git a/gtsam/hybrid/tests/testGaussianHybridFactorGraph.cpp b/gtsam/hybrid/tests/testGaussianHybridFactorGraph.cpp index 17a2d94d7..e4bd0e084 100644 --- a/gtsam/hybrid/tests/testGaussianHybridFactorGraph.cpp +++ b/gtsam/hybrid/tests/testGaussianHybridFactorGraph.cpp @@ -54,8 +54,8 @@ using namespace boost::assign; using namespace std; using namespace gtsam; -using gtsam::symbol_shorthand::C; using gtsam::symbol_shorthand::D; +using gtsam::symbol_shorthand::M; using gtsam::symbol_shorthand::X; using gtsam::symbol_shorthand::Y; @@ -69,9 +69,9 @@ TEST(HybridGaussianFactorGraph, Creation) { // Define a gaussian mixture conditional P(x0|x1, c0) and add it to the factor // graph - GaussianMixture gm({X(0)}, {X(1)}, DiscreteKeys(DiscreteKey{C(0), 2}), + GaussianMixture gm({X(0)}, {X(1)}, DiscreteKeys(DiscreteKey{M(0), 2}), GaussianMixture::Conditionals( - C(0), + M(0), boost::make_shared( X(0), Z_3x1, I_3x3, X(1), I_3x3), boost::make_shared( @@ -98,11 +98,11 @@ TEST(HybridGaussianFactorGraph, EliminateMultifrontal) { // Test multifrontal elimination HybridGaussianFactorGraph hfg; - DiscreteKey c(C(1), 2); + DiscreteKey m(M(1), 2); // Add priors on x0 and c1 hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); - hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c, {2, 8}))); + hfg.add(HybridDiscreteFactor(DecisionTreeFactor(m, {2, 8}))); Ordering ordering; ordering.push_back(X(0)); @@ -116,7 +116,7 @@ TEST(HybridGaussianFactorGraph, EliminateMultifrontal) { TEST(HybridGaussianFactorGraph, eliminateFullSequentialEqualChance) { HybridGaussianFactorGraph hfg; - DiscreteKey c1(C(1), 2); + DiscreteKey m1(M(1), 2); // Add prior on x0 hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); @@ -125,45 +125,45 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialEqualChance) { // Add a gaussian mixture factor Ï•(x1, c1) DecisionTree dt( - C(1), boost::make_shared(X(1), I_3x3, Z_3x1), + M(1), boost::make_shared(X(1), I_3x3, Z_3x1), boost::make_shared(X(1), I_3x3, Vector3::Ones())); - hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt)); + hfg.add(GaussianMixtureFactor({X(1)}, {m1}, dt)); auto result = - hfg.eliminateSequential(Ordering::ColamdConstrainedLast(hfg, {C(1)})); + hfg.eliminateSequential(Ordering::ColamdConstrainedLast(hfg, {M(1)})); auto dc = result->at(2)->asDiscreteConditional(); - DiscreteValues mode; - mode[C(1)] = 0; - EXPECT_DOUBLES_EQUAL(0.6225, (*dc)(mode), 1e-3); + DiscreteValues dv; + dv[M(1)] = 0; + EXPECT_DOUBLES_EQUAL(1, dc->operator()(dv), 1e-3); } /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { HybridGaussianFactorGraph hfg; - DiscreteKey c1(C(1), 2); + DiscreteKey m1(M(1), 2); // Add prior on x0 hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); // Add factor between x0 and x1 hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - DecisionTree dt( - C(1), boost::make_shared(X(1), I_3x3, Z_3x1), - boost::make_shared(X(1), I_3x3, Vector3::Ones())); - hfg.add(GaussianMixtureFactor({X(1)}, {c1}, dt)); + std::vector factors = { + boost::make_shared(X(1), I_3x3, Z_3x1), + boost::make_shared(X(1), I_3x3, Vector3::Ones())}; + hfg.add(GaussianMixtureFactor({X(1)}, {m1}, factors)); // Discrete probability table for c1 - hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c1, {2, 8}))); + hfg.add(DecisionTreeFactor(m1, {2, 8})); // Joint discrete probability table for c1, c2 - hfg.add(HybridDiscreteFactor( - DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 2 3 4"))); + hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); - auto result = hfg.eliminateSequential( - Ordering::ColamdConstrainedLast(hfg, {C(1), C(2)})); + HybridBayesNet::shared_ptr result = hfg.eliminateSequential( + Ordering::ColamdConstrainedLast(hfg, {M(1), M(2)})); + // There are 4 variables (2 continuous + 2 discrete) in the bayes net. EXPECT_LONGS_EQUAL(4, result->size()); } @@ -171,31 +171,33 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) { HybridGaussianFactorGraph hfg; - DiscreteKey c1(C(1), 2); + DiscreteKey m1(M(1), 2); hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); hfg.add(GaussianMixtureFactor::FromFactors( - {X(1)}, {{C(1), 2}}, + {X(1)}, {{M(1), 2}}, {boost::make_shared(X(1), I_3x3, Z_3x1), boost::make_shared(X(1), I_3x3, Vector3::Ones())})); - hfg.add(DecisionTreeFactor(c1, {2, 8})); - hfg.add(DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 2 3 4")); + hfg.add(DecisionTreeFactor(m1, {2, 8})); + hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); - auto result = hfg.eliminateMultifrontal( - Ordering::ColamdConstrainedLast(hfg, {C(1), C(2)})); + HybridBayesTree::shared_ptr result = hfg.eliminateMultifrontal( + Ordering::ColamdConstrainedLast(hfg, {M(1), M(2)})); + // The bayes tree should have 3 cliques + EXPECT_LONGS_EQUAL(3, result->size()); // GTSAM_PRINT(*result); - // GTSAM_PRINT(*result->marginalFactor(C(2))); + // GTSAM_PRINT(*result->marginalFactor(M(2))); } /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalCLG) { HybridGaussianFactorGraph hfg; - DiscreteKey c(C(1), 2); + DiscreteKey m(M(1), 2); // Prior on x0 hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); @@ -204,16 +206,16 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalCLG) { // Decision tree with different modes on x1 DecisionTree dt( - C(1), boost::make_shared(X(1), I_3x3, Z_3x1), + M(1), boost::make_shared(X(1), I_3x3, Z_3x1), boost::make_shared(X(1), I_3x3, Vector3::Ones())); // Hybrid factor P(x1|c1) - hfg.add(GaussianMixtureFactor({X(1)}, {c}, dt)); + hfg.add(GaussianMixtureFactor({X(1)}, {m}, dt)); // Prior factor on c1 - hfg.add(HybridDiscreteFactor(DecisionTreeFactor(c, {2, 8}))); + hfg.add(HybridDiscreteFactor(DecisionTreeFactor(m, {2, 8}))); // Get a constrained ordering keeping c1 last - auto ordering_full = Ordering::ColamdConstrainedLast(hfg, {C(1)}); + auto ordering_full = Ordering::ColamdConstrainedLast(hfg, {M(1)}); // Returns a Hybrid Bayes Tree with distribution P(x0|x1)P(x1|c1)P(c1) HybridBayesTree::shared_ptr hbt = hfg.eliminateMultifrontal(ordering_full); @@ -234,48 +236,48 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { { hfg.add(GaussianMixtureFactor::FromFactors( - {X(0)}, {{C(0), 2}}, + {X(0)}, {{M(0), 2}}, {boost::make_shared(X(0), I_3x3, Z_3x1), boost::make_shared(X(0), I_3x3, Vector3::Ones())})); DecisionTree dt1( - C(1), boost::make_shared(X(2), I_3x3, Z_3x1), + M(1), boost::make_shared(X(2), I_3x3, Z_3x1), boost::make_shared(X(2), I_3x3, Vector3::Ones())); - hfg.add(GaussianMixtureFactor({X(2)}, {{C(1), 2}}, dt1)); + hfg.add(GaussianMixtureFactor({X(2)}, {{M(1), 2}}, dt1)); } hfg.add(HybridDiscreteFactor( - DecisionTreeFactor({{C(1), 2}, {C(2), 2}}, "1 2 3 4"))); + DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4"))); hfg.add(JacobianFactor(X(3), I_3x3, X(4), -I_3x3, Z_3x1)); hfg.add(JacobianFactor(X(4), I_3x3, X(5), -I_3x3, Z_3x1)); { DecisionTree dt( - C(3), boost::make_shared(X(3), I_3x3, Z_3x1), + M(3), boost::make_shared(X(3), I_3x3, Z_3x1), boost::make_shared(X(3), I_3x3, Vector3::Ones())); - hfg.add(GaussianMixtureFactor({X(3)}, {{C(3), 2}}, dt)); + hfg.add(GaussianMixtureFactor({X(3)}, {{M(3), 2}}, dt)); DecisionTree dt1( - C(2), boost::make_shared(X(5), I_3x3, Z_3x1), + M(2), boost::make_shared(X(5), I_3x3, Z_3x1), boost::make_shared(X(5), I_3x3, Vector3::Ones())); - hfg.add(GaussianMixtureFactor({X(5)}, {{C(2), 2}}, dt1)); + hfg.add(GaussianMixtureFactor({X(5)}, {{M(2), 2}}, dt1)); } auto ordering_full = - Ordering::ColamdConstrainedLast(hfg, {C(0), C(1), C(2), C(3)}); + Ordering::ColamdConstrainedLast(hfg, {M(0), M(1), M(2), M(3)}); HybridBayesTree::shared_ptr hbt; HybridGaussianFactorGraph::shared_ptr remaining; std::tie(hbt, remaining) = hfg.eliminatePartialMultifrontal(ordering_full); - // GTSAM_PRINT(*hbt); - // GTSAM_PRINT(*remaining); + // 9 cliques in the bayes tree and 0 remaining variables to eliminate. + EXPECT_LONGS_EQUAL(9, hbt->size()); + EXPECT_LONGS_EQUAL(0, remaining->size()); - // hbt->marginalFactor(X(1))->print("HBT: "); /* (Fan) Explanation: the Junction tree will need to reeliminate to get to the marginal on X(1), which is not possible because it involves eliminating @@ -309,13 +311,13 @@ TEST(HybridGaussianFactorGraph, Switching) { // X(3), X(7) // X(2), X(8) // X(1), X(4), X(6), X(9) - // C(5) will be the center, C(1-4), C(6-8) - // C(3), C(7) - // C(1), C(4), C(2), C(6), C(8) + // M(5) will be the center, M(1-4), M(6-8) + // M(3), M(7) + // M(1), M(4), M(2), M(6), M(8) // auto ordering_full = // Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7), // X(5), - // C(1), C(4), C(2), C(6), C(8), C(3), C(7), C(5)}); + // M(1), M(4), M(2), M(6), M(8), M(3), M(7), M(5)}); KeyVector ordering; { @@ -338,7 +340,7 @@ TEST(HybridGaussianFactorGraph, Switching) { std::iota(naturalC.begin(), naturalC.end(), 1); std::vector ordC; std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), - [](int x) { return C(x); }); + [](int x) { return M(x); }); KeyVector ndC; std::vector lvls; @@ -355,9 +357,9 @@ TEST(HybridGaussianFactorGraph, Switching) { HybridGaussianFactorGraph::shared_ptr remaining; std::tie(hbt, remaining) = hfg->eliminatePartialMultifrontal(ordering_full); - // GTSAM_PRINT(*hbt); - // GTSAM_PRINT(*remaining); - // hbt->marginalFactor(C(11))->print("HBT: "); + // 12 cliques in the bayes tree and 0 remaining variables to eliminate. + EXPECT_LONGS_EQUAL(12, hbt->size()); + EXPECT_LONGS_EQUAL(0, remaining->size()); } /* ************************************************************************* */ @@ -370,13 +372,13 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { // X(3), X(7) // X(2), X(8) // X(1), X(4), X(6), X(9) - // C(5) will be the center, C(1-4), C(6-8) - // C(3), C(7) - // C(1), C(4), C(2), C(6), C(8) + // M(5) will be the center, M(1-4), M(6-8) + // M(3), M(7) + // M(1), M(4), M(2), M(6), M(8) // auto ordering_full = // Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7), // X(5), - // C(1), C(4), C(2), C(6), C(8), C(3), C(7), C(5)}); + // M(1), M(4), M(2), M(6), M(8), M(3), M(7), M(5)}); KeyVector ordering; { @@ -399,7 +401,7 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { std::iota(naturalC.begin(), naturalC.end(), 1); std::vector ordC; std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), - [](int x) { return C(x); }); + [](int x) { return M(x); }); KeyVector ndC; std::vector lvls; @@ -409,9 +411,6 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { } auto ordering_full = Ordering(ordering); - // GTSAM_PRINT(*hfg); - // GTSAM_PRINT(ordering_full); - HybridBayesTree::shared_ptr hbt; HybridGaussianFactorGraph::shared_ptr remaining; std::tie(hbt, remaining) = hfg->eliminatePartialMultifrontal(ordering_full); @@ -419,19 +418,18 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { auto new_fg = makeSwitchingChain(12); auto isam = HybridGaussianISAM(*hbt); - { - HybridGaussianFactorGraph factorGraph; - factorGraph.push_back(new_fg->at(new_fg->size() - 2)); - factorGraph.push_back(new_fg->at(new_fg->size() - 1)); - isam.update(factorGraph); - // std::cout << isam.dot(); - // isam.marginalFactor(C(11))->print(); - } + // Run an ISAM update. + HybridGaussianFactorGraph factorGraph; + factorGraph.push_back(new_fg->at(new_fg->size() - 2)); + factorGraph.push_back(new_fg->at(new_fg->size() - 1)); + isam.update(factorGraph); + + // ISAM should have 12 factors after the last update + EXPECT_LONGS_EQUAL(12, isam.size()); } /* ************************************************************************* */ -// TODO(Varun) Actually test something! -TEST_DISABLED(HybridGaussianFactorGraph, SwitchingTwoVar) { +TEST(HybridGaussianFactorGraph, SwitchingTwoVar) { const int N = 7; auto hfg = makeSwitchingChain(N, X); hfg->push_back(*makeSwitchingChain(N, Y, D)); @@ -451,7 +449,7 @@ TEST_DISABLED(HybridGaussianFactorGraph, SwitchingTwoVar) { } for (size_t i = 1; i <= N - 1; i++) { - ordX.emplace_back(C(i)); + ordX.emplace_back(M(i)); } for (size_t i = 1; i <= N - 1; i++) { ordX.emplace_back(D(i)); @@ -463,8 +461,8 @@ TEST_DISABLED(HybridGaussianFactorGraph, SwitchingTwoVar) { dw.positionHints['c'] = 0; dw.positionHints['d'] = 3; dw.positionHints['y'] = 2; - std::cout << hfg->dot(DefaultKeyFormatter, dw); - std::cout << "\n"; + // std::cout << hfg->dot(DefaultKeyFormatter, dw); + // std::cout << "\n"; } { @@ -473,10 +471,10 @@ TEST_DISABLED(HybridGaussianFactorGraph, SwitchingTwoVar) { // dw.positionHints['c'] = 0; // dw.positionHints['d'] = 3; dw.positionHints['x'] = 1; - std::cout << "\n"; + // std::cout << "\n"; // std::cout << hfg->eliminateSequential(Ordering(ordX)) // ->dot(DefaultKeyFormatter, dw); - hfg->eliminateMultifrontal(Ordering(ordX))->dot(std::cout); + // hfg->eliminateMultifrontal(Ordering(ordX))->dot(std::cout); } Ordering ordering_partial; @@ -484,22 +482,22 @@ TEST_DISABLED(HybridGaussianFactorGraph, SwitchingTwoVar) { ordering_partial.emplace_back(X(i)); ordering_partial.emplace_back(Y(i)); } - { - HybridBayesNet::shared_ptr hbn; - HybridGaussianFactorGraph::shared_ptr remaining; - std::tie(hbn, remaining) = - hfg->eliminatePartialSequential(ordering_partial); + HybridBayesNet::shared_ptr hbn; + HybridGaussianFactorGraph::shared_ptr remaining; + std::tie(hbn, remaining) = + hfg->eliminatePartialSequential(ordering_partial); - // remaining->print(); - { - DotWriter dw; - dw.positionHints['x'] = 1; - dw.positionHints['c'] = 0; - dw.positionHints['d'] = 3; - dw.positionHints['y'] = 2; - std::cout << remaining->dot(DefaultKeyFormatter, dw); - std::cout << "\n"; - } + EXPECT_LONGS_EQUAL(14, hbn->size()); + EXPECT_LONGS_EQUAL(11, remaining->size()); + + { + DotWriter dw; + dw.positionHints['x'] = 1; + dw.positionHints['c'] = 0; + dw.positionHints['d'] = 3; + dw.positionHints['y'] = 2; + // std::cout << remaining->dot(DefaultKeyFormatter, dw); + // std::cout << "\n"; } } diff --git a/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp b/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp index 36477218b..cb9068c30 100644 --- a/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp +++ b/gtsam/hybrid/tests/testGaussianMixtureFactor.cpp @@ -74,7 +74,7 @@ TEST(GaussianMixtureFactor, Sum) { // Check that number of keys is 3 EXPECT_LONGS_EQUAL(3, mixtureFactorA.keys().size()); - // Check that number of discrete keys is 1 // TODO(Frank): should not exist? + // Check that number of discrete keys is 1 EXPECT_LONGS_EQUAL(1, mixtureFactorA.discreteKeys().size()); // Create sum of two mixture factors: it will be a decision tree now on both @@ -104,7 +104,7 @@ TEST(GaussianMixtureFactor, Printing) { GaussianMixtureFactor mixtureFactor({X(1), X(2)}, {m1}, factors); std::string expected = - R"(Hybrid x1 x2; 1 ]{ + R"(Hybrid [x1 x2; 1]{ Choice(1) 0 Leaf : A[x1] = [ diff --git a/gtsam/hybrid/tests/testHybridFactorGraph.cpp b/gtsam/hybrid/tests/testHybridFactorGraph.cpp new file mode 100644 index 000000000..f5b4ec0b1 --- /dev/null +++ b/gtsam/hybrid/tests/testHybridFactorGraph.cpp @@ -0,0 +1,45 @@ +/* ---------------------------------------------------------------------------- + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + * See LICENSE for the license information + * -------------------------------------------------------------------------- */ + +/** + * @file testHybridFactorGraph.cpp + * @brief Unit tests for HybridFactorGraph + * @author Varun Agrawal + * @date May 2021 + */ + +#include +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; +using noiseModel::Isotropic; +using symbol_shorthand::L; +using symbol_shorthand::M; +using symbol_shorthand::X; + +/* **************************************************************************** + * Test that any linearizedFactorGraph gaussian factors are appended to the + * existing gaussian factor graph in the hybrid factor graph. + */ +TEST(HybridFactorGraph, Constructor) { + // Initialize the hybrid factor graph + HybridFactorGraph fg; +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testHybridIncremental.cpp b/gtsam/hybrid/tests/testHybridIncremental.cpp new file mode 100644 index 000000000..4449aba0b --- /dev/null +++ b/gtsam/hybrid/tests/testHybridIncremental.cpp @@ -0,0 +1,563 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testHybridIncremental.cpp + * @brief Unit tests for incremental inference + * @author Fan Jiang, Varun Agrawal, Frank Dellaert + * @date Jan 2021 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "Switching.h" + +// Include for test suite +#include + +using namespace std; +using namespace gtsam; +using noiseModel::Isotropic; +using symbol_shorthand::L; +using symbol_shorthand::M; +using symbol_shorthand::W; +using symbol_shorthand::X; +using symbol_shorthand::Y; +using symbol_shorthand::Z; + +/* ****************************************************************************/ +// Test if we can perform elimination incrementally. +TEST(HybridGaussianElimination, IncrementalElimination) { + Switching switching(3); + HybridGaussianISAM isam; + HybridGaussianFactorGraph graph1; + + // Create initial factor graph + // * * * + // | | | + // X1 -*- X2 -*- X3 + // \*-M1-*/ + graph1.push_back(switching.linearizedFactorGraph.at(0)); // P(X1) + graph1.push_back(switching.linearizedFactorGraph.at(1)); // P(X1, X2 | M1) + graph1.push_back(switching.linearizedFactorGraph.at(2)); // P(X2, X3 | M2) + graph1.push_back(switching.linearizedFactorGraph.at(5)); // P(M1) + + // Run update step + isam.update(graph1); + + // Check that after update we have 3 hybrid Bayes net nodes: + // P(X1 | X2, M1) and P(X2, X3 | M1, M2), P(M1, M2) + EXPECT_LONGS_EQUAL(3, isam.size()); + EXPECT(isam[X(1)]->conditional()->frontals() == KeyVector{X(1)}); + EXPECT(isam[X(1)]->conditional()->parents() == KeyVector({X(2), M(1)})); + EXPECT(isam[X(2)]->conditional()->frontals() == KeyVector({X(2), X(3)})); + EXPECT(isam[X(2)]->conditional()->parents() == KeyVector({M(1), M(2)})); + + /********************************************************/ + // New factor graph for incremental update. + HybridGaussianFactorGraph graph2; + + graph1.push_back(switching.linearizedFactorGraph.at(3)); // P(X2) + graph2.push_back(switching.linearizedFactorGraph.at(4)); // P(X3) + graph2.push_back(switching.linearizedFactorGraph.at(6)); // P(M1, M2) + + isam.update(graph2); + + // Check that after the second update we have + // 1 additional hybrid Bayes net node: + // P(X2, X3 | M1, M2) + EXPECT_LONGS_EQUAL(3, isam.size()); + EXPECT(isam[X(3)]->conditional()->frontals() == KeyVector({X(2), X(3)})); + EXPECT(isam[X(3)]->conditional()->parents() == KeyVector({M(1), M(2)})); +} + +/* ****************************************************************************/ +// Test if we can incrementally do the inference +TEST(HybridGaussianElimination, IncrementalInference) { + Switching switching(3); + HybridGaussianISAM isam; + HybridGaussianFactorGraph graph1; + + // Create initial factor graph + // * * * + // | | | + // X1 -*- X2 -*- X3 + // | | + // *-M1 - * - M2 + graph1.push_back(switching.linearizedFactorGraph.at(0)); // P(X1) + graph1.push_back(switching.linearizedFactorGraph.at(1)); // P(X1, X2 | M1) + graph1.push_back(switching.linearizedFactorGraph.at(3)); // P(X2) + graph1.push_back(switching.linearizedFactorGraph.at(5)); // P(M1) + + // Run update step + isam.update(graph1); + + auto discreteConditional_m1 = + isam[M(1)]->conditional()->asDiscreteConditional(); + EXPECT(discreteConditional_m1->keys() == KeyVector({M(1)})); + + /********************************************************/ + // New factor graph for incremental update. + HybridGaussianFactorGraph graph2; + + graph2.push_back(switching.linearizedFactorGraph.at(2)); // P(X2, X3 | M2) + graph2.push_back(switching.linearizedFactorGraph.at(4)); // P(X3) + graph2.push_back(switching.linearizedFactorGraph.at(6)); // P(M1, M2) + + isam.update(graph2); + + /********************************************************/ + // Run batch elimination so we can compare results. + Ordering ordering; + ordering += X(1); + ordering += X(2); + ordering += X(3); + + // Now we calculate the actual factors using full elimination + HybridBayesTree::shared_ptr expectedHybridBayesTree; + HybridGaussianFactorGraph::shared_ptr expectedRemainingGraph; + std::tie(expectedHybridBayesTree, expectedRemainingGraph) = + switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering); + + // The densities on X(1) should be the same + auto x1_conditional = + dynamic_pointer_cast(isam[X(1)]->conditional()->inner()); + auto actual_x1_conditional = dynamic_pointer_cast( + (*expectedHybridBayesTree)[X(1)]->conditional()->inner()); + EXPECT(assert_equal(*x1_conditional, *actual_x1_conditional)); + + // The densities on X(2) should be the same + auto x2_conditional = + dynamic_pointer_cast(isam[X(2)]->conditional()->inner()); + auto actual_x2_conditional = dynamic_pointer_cast( + (*expectedHybridBayesTree)[X(2)]->conditional()->inner()); + EXPECT(assert_equal(*x2_conditional, *actual_x2_conditional)); + + // The densities on X(3) should be the same + auto x3_conditional = + dynamic_pointer_cast(isam[X(3)]->conditional()->inner()); + auto actual_x3_conditional = dynamic_pointer_cast( + (*expectedHybridBayesTree)[X(2)]->conditional()->inner()); + EXPECT(assert_equal(*x3_conditional, *actual_x3_conditional)); + + // We only perform manual continuous elimination for 0,0. + // The other discrete probabilities on M(2) are calculated the same way + Ordering discrete_ordering; + discrete_ordering += M(1); + discrete_ordering += M(2); + HybridBayesTree::shared_ptr discreteBayesTree = + expectedRemainingGraph->eliminateMultifrontal(discrete_ordering); + + DiscreteValues m00; + m00[M(1)] = 0, m00[M(2)] = 0; + DiscreteConditional decisionTree = + *(*discreteBayesTree)[M(2)]->conditional()->asDiscreteConditional(); + double m00_prob = decisionTree(m00); + + auto discreteConditional = isam[M(2)]->conditional()->asDiscreteConditional(); + + // Test if the probability values are as expected with regression tests. + DiscreteValues assignment; + EXPECT(assert_equal(m00_prob, 0.0619233, 1e-5)); + assignment[M(1)] = 0; + assignment[M(2)] = 0; + EXPECT(assert_equal(m00_prob, (*discreteConditional)(assignment), 1e-5)); + assignment[M(1)] = 1; + assignment[M(2)] = 0; + EXPECT(assert_equal(0.183743, (*discreteConditional)(assignment), 1e-5)); + assignment[M(1)] = 0; + assignment[M(2)] = 1; + EXPECT(assert_equal(0.204159, (*discreteConditional)(assignment), 1e-5)); + assignment[M(1)] = 1; + assignment[M(2)] = 1; + EXPECT(assert_equal(0.2, (*discreteConditional)(assignment), 1e-5)); + + // Check if the clique conditional generated from incremental elimination + // matches that of batch elimination. + auto expectedChordal = expectedRemainingGraph->eliminateMultifrontal(); + auto expectedConditional = dynamic_pointer_cast( + (*expectedChordal)[M(2)]->conditional()->inner()); + auto actualConditional = dynamic_pointer_cast( + isam[M(2)]->conditional()->inner()); + EXPECT(assert_equal(*actualConditional, *expectedConditional, 1e-6)); +} + +/* ****************************************************************************/ +// Test if we can approximately do the inference +TEST(HybridGaussianElimination, Approx_inference) { + Switching switching(4); + HybridGaussianISAM incrementalHybrid; + HybridGaussianFactorGraph graph1; + + // Add the 3 hybrid factors, x1-x2, x2-x3, x3-x4 + for (size_t i = 1; i < 4; i++) { + graph1.push_back(switching.linearizedFactorGraph.at(i)); + } + + // Add the Gaussian factors, 1 prior on X(1), + // 3 measurements on X(2), X(3), X(4) + graph1.push_back(switching.linearizedFactorGraph.at(0)); + for (size_t i = 4; i <= 7; i++) { + graph1.push_back(switching.linearizedFactorGraph.at(i)); + } + + // Create ordering. + Ordering ordering; + for (size_t j = 1; j <= 4; j++) { + ordering += X(j); + } + + // Now we calculate the actual factors using full elimination + HybridBayesTree::shared_ptr unprunedHybridBayesTree; + HybridGaussianFactorGraph::shared_ptr unprunedRemainingGraph; + std::tie(unprunedHybridBayesTree, unprunedRemainingGraph) = + switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering); + + size_t maxNrLeaves = 5; + incrementalHybrid.update(graph1); + + incrementalHybrid.prune(M(3), maxNrLeaves); + + /* + unpruned factor is: + Choice(m3) + 0 Choice(m2) + 0 0 Choice(m1) + 0 0 0 Leaf 0.11267528 + 0 0 1 Leaf 0.18576102 + 0 1 Choice(m1) + 0 1 0 Leaf 0.18754662 + 0 1 1 Leaf 0.30623871 + 1 Choice(m2) + 1 0 Choice(m1) + 1 0 0 Leaf 0.18576102 + 1 0 1 Leaf 0.30622428 + 1 1 Choice(m1) + 1 1 0 Leaf 0.30623871 + 1 1 1 Leaf 0.5 + + pruned factors is: + Choice(m3) + 0 Choice(m2) + 0 0 Leaf 0 + 0 1 Choice(m1) + 0 1 0 Leaf 0.18754662 + 0 1 1 Leaf 0.30623871 + 1 Choice(m2) + 1 0 Choice(m1) + 1 0 0 Leaf 0 + 1 0 1 Leaf 0.30622428 + 1 1 Choice(m1) + 1 1 0 Leaf 0.30623871 + 1 1 1 Leaf 0.5 + */ + + auto discreteConditional_m1 = *dynamic_pointer_cast( + incrementalHybrid[M(1)]->conditional()->inner()); + EXPECT(discreteConditional_m1.keys() == KeyVector({M(1), M(2), M(3)})); + + // Get the number of elements which are greater than 0. + auto count = [](const double &value, int count) { + return value > 0 ? count + 1 : count; + }; + // Check that the number of leaves after pruning is 5. + EXPECT_LONGS_EQUAL(5, discreteConditional_m1.fold(count, 0)); + + // Check that the hybrid nodes of the bayes net match those of the pre-pruning + // bayes net, at the same positions. + auto &unprunedLastDensity = *dynamic_pointer_cast( + unprunedHybridBayesTree->clique(X(4))->conditional()->inner()); + auto &lastDensity = *dynamic_pointer_cast( + incrementalHybrid[X(4)]->conditional()->inner()); + + std::vector> assignments = + discreteConditional_m1.enumerate(); + // Loop over all assignments and check the pruned components + for (auto &&av : assignments) { + const DiscreteValues &assignment = av.first; + const double value = av.second; + + if (value == 0.0) { + EXPECT(lastDensity(assignment) == nullptr); + } else { + CHECK(lastDensity(assignment)); + EXPECT(assert_equal(*unprunedLastDensity(assignment), + *lastDensity(assignment))); + } + } +} + +/* ****************************************************************************/ +// Test approximate inference with an additional pruning step. +TEST(HybridGaussianElimination, Incremental_approximate) { + Switching switching(5); + HybridGaussianISAM incrementalHybrid; + HybridGaussianFactorGraph graph1; + + /***** Run Round 1 *****/ + // Add the 3 hybrid factors, x1-x2, x2-x3, x3-x4 + for (size_t i = 1; i < 4; i++) { + graph1.push_back(switching.linearizedFactorGraph.at(i)); + } + + // Add the Gaussian factors, 1 prior on X(1), + // 3 measurements on X(2), X(3), X(4) + graph1.push_back(switching.linearizedFactorGraph.at(0)); + for (size_t i = 5; i <= 7; i++) { + graph1.push_back(switching.linearizedFactorGraph.at(i)); + } + + // Run update with pruning + size_t maxComponents = 5; + incrementalHybrid.update(graph1); + incrementalHybrid.prune(M(3), maxComponents); + + // Check if we have a bayes tree with 4 hybrid nodes, + // each with 2, 4, 8, and 5 (pruned) leaves respetively. + EXPECT_LONGS_EQUAL(4, incrementalHybrid.size()); + EXPECT_LONGS_EQUAL( + 2, incrementalHybrid[X(1)]->conditional()->asMixture()->nrComponents()); + EXPECT_LONGS_EQUAL( + 4, incrementalHybrid[X(2)]->conditional()->asMixture()->nrComponents()); + EXPECT_LONGS_EQUAL( + 5, incrementalHybrid[X(3)]->conditional()->asMixture()->nrComponents()); + EXPECT_LONGS_EQUAL( + 5, incrementalHybrid[X(4)]->conditional()->asMixture()->nrComponents()); + + /***** Run Round 2 *****/ + HybridGaussianFactorGraph graph2; + graph2.push_back(switching.linearizedFactorGraph.at(4)); + graph2.push_back(switching.linearizedFactorGraph.at(8)); + + // Run update with pruning a second time. + incrementalHybrid.update(graph2); + incrementalHybrid.prune(M(4), maxComponents); + + // Check if we have a bayes tree with pruned hybrid nodes, + // with 5 (pruned) leaves. + CHECK_EQUAL(5, incrementalHybrid.size()); + EXPECT_LONGS_EQUAL( + 5, incrementalHybrid[X(4)]->conditional()->asMixture()->nrComponents()); + EXPECT_LONGS_EQUAL( + 5, incrementalHybrid[X(5)]->conditional()->asMixture()->nrComponents()); +} + +/* ************************************************************************/ +// A GTSAM-only test for running inference on a single-legged robot. +// The leg links are represented by the chain X-Y-Z-W, where X is the base and +// W is the foot. +// We use BetweenFactor as constraints between each of the poses. +TEST(HybridGaussianISAM, NonTrivial) { + /*************** Run Round 1 ***************/ + HybridNonlinearFactorGraph fg; + + // Add a prior on pose x1 at the origin. + // A prior factor consists of a mean and + // a noise model (covariance matrix) + Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin + auto priorNoise = noiseModel::Diagonal::Sigmas( + Vector3(0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta + fg.emplace_nonlinear>(X(0), prior, priorNoise); + + // create a noise model for the landmark measurements + auto poseNoise = noiseModel::Isotropic::Sigma(3, 0.1); + + // We model a robot's single leg as X - Y - Z - W + // where X is the base link and W is the foot link. + + // Add connecting poses similar to PoseFactors in GTD + fg.emplace_nonlinear>(X(0), Y(0), Pose2(0, 1.0, 0), + poseNoise); + fg.emplace_nonlinear>(Y(0), Z(0), Pose2(0, 1.0, 0), + poseNoise); + fg.emplace_nonlinear>(Z(0), W(0), Pose2(0, 1.0, 0), + poseNoise); + + // Create initial estimate + Values initial; + initial.insert(X(0), Pose2(0.0, 0.0, 0.0)); + initial.insert(Y(0), Pose2(0.0, 1.0, 0.0)); + initial.insert(Z(0), Pose2(0.0, 2.0, 0.0)); + initial.insert(W(0), Pose2(0.0, 3.0, 0.0)); + + HybridGaussianFactorGraph gfg = fg.linearize(initial); + fg = HybridNonlinearFactorGraph(); + + HybridGaussianISAM inc; + + // Update without pruning + // The result is a HybridBayesNet with no discrete variables + // (equivalent to a GaussianBayesNet). + // Factorization is: + // `P(X | measurements) = P(W0|Z0) P(Z0|Y0) P(Y0|X0) P(X0)` + inc.update(gfg); + + /*************** Run Round 2 ***************/ + using PlanarMotionModel = BetweenFactor; + + // Add odometry factor with discrete modes. + Pose2 odometry(1.0, 0.0, 0.0); + KeyVector contKeys = {W(0), W(1)}; + auto noise_model = noiseModel::Isotropic::Sigma(3, 1.0); + auto still = boost::make_shared(W(0), W(1), Pose2(0, 0, 0), + noise_model), + moving = boost::make_shared(W(0), W(1), odometry, + noise_model); + std::vector components = {moving, still}; + auto mixtureFactor = boost::make_shared( + contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components); + fg.push_back(mixtureFactor); + + // Add equivalent of ImuFactor + fg.emplace_nonlinear>(X(0), X(1), Pose2(1.0, 0.0, 0), + poseNoise); + // PoseFactors-like at k=1 + fg.emplace_nonlinear>(X(1), Y(1), Pose2(0, 1, 0), + poseNoise); + fg.emplace_nonlinear>(Y(1), Z(1), Pose2(0, 1, 0), + poseNoise); + fg.emplace_nonlinear>(Z(1), W(1), Pose2(-1, 1, 0), + poseNoise); + + initial.insert(X(1), Pose2(1.0, 0.0, 0.0)); + initial.insert(Y(1), Pose2(1.0, 1.0, 0.0)); + initial.insert(Z(1), Pose2(1.0, 2.0, 0.0)); + // The leg link did not move so we set the expected pose accordingly. + initial.insert(W(1), Pose2(0.0, 3.0, 0.0)); + + gfg = fg.linearize(initial); + fg = HybridNonlinearFactorGraph(); + + // Update without pruning + // The result is a HybridBayesNet with 1 discrete variable M(1). + // P(X | measurements) = P(W0|Z0, W1, M1) P(Z0|Y0, W1, M1) P(Y0|X0, W1, M1) + // P(X0 | X1, W1, M1) P(W1|Z1, X1, M1) P(Z1|Y1, X1, M1) + // P(Y1 | X1, M1)P(X1 | M1)P(M1) + // The MHS tree is a 1 level tree for time indices (1,) with 2 leaves. + inc.update(gfg); + + /*************** Run Round 3 ***************/ + // Add odometry factor with discrete modes. + contKeys = {W(1), W(2)}; + still = boost::make_shared(W(1), W(2), Pose2(0, 0, 0), + noise_model); + moving = + boost::make_shared(W(1), W(2), odometry, noise_model); + components = {moving, still}; + mixtureFactor = boost::make_shared( + contKeys, DiscreteKeys{gtsam::DiscreteKey(M(2), 2)}, components); + fg.push_back(mixtureFactor); + + // Add equivalent of ImuFactor + fg.emplace_nonlinear>(X(1), X(2), Pose2(1.0, 0.0, 0), + poseNoise); + // PoseFactors-like at k=1 + fg.emplace_nonlinear>(X(2), Y(2), Pose2(0, 1, 0), + poseNoise); + fg.emplace_nonlinear>(Y(2), Z(2), Pose2(0, 1, 0), + poseNoise); + fg.emplace_nonlinear>(Z(2), W(2), Pose2(-2, 1, 0), + poseNoise); + + initial.insert(X(2), Pose2(2.0, 0.0, 0.0)); + initial.insert(Y(2), Pose2(2.0, 1.0, 0.0)); + initial.insert(Z(2), Pose2(2.0, 2.0, 0.0)); + initial.insert(W(2), Pose2(0.0, 3.0, 0.0)); + + gfg = fg.linearize(initial); + fg = HybridNonlinearFactorGraph(); + + // Now we prune! + // P(X | measurements) = P(W0|Z0, W1, M1) P(Z0|Y0, W1, M1) P(Y0|X0, W1, M1) + // P(X0 | X1, W1, M1) P(W1|W2, Z1, X1, M1, M2) + // P(Z1| W2, Y1, X1, M1, M2) P(Y1 | W2, X1, M1, M2) + // P(X1 | W2, X2, M1, M2) P(W2|Z2, X2, M1, M2) + // P(Z2|Y2, X2, M1, M2) P(Y2 | X2, M1, M2) + // P(X2 | M1, M2) P(M1, M2) + // The MHS at this point should be a 2 level tree on (1, 2). + // 1 has 2 choices, and 2 has 4 choices. + inc.update(gfg); + inc.prune(M(2), 2); + + /*************** Run Round 4 ***************/ + // Add odometry factor with discrete modes. + contKeys = {W(2), W(3)}; + still = boost::make_shared(W(2), W(3), Pose2(0, 0, 0), + noise_model); + moving = + boost::make_shared(W(2), W(3), odometry, noise_model); + components = {moving, still}; + mixtureFactor = boost::make_shared( + contKeys, DiscreteKeys{gtsam::DiscreteKey(M(3), 2)}, components); + fg.push_back(mixtureFactor); + + // Add equivalent of ImuFactor + fg.emplace_nonlinear>(X(2), X(3), Pose2(1.0, 0.0, 0), + poseNoise); + // PoseFactors-like at k=3 + fg.emplace_nonlinear>(X(3), Y(3), Pose2(0, 1, 0), + poseNoise); + fg.emplace_nonlinear>(Y(3), Z(3), Pose2(0, 1, 0), + poseNoise); + fg.emplace_nonlinear>(Z(3), W(3), Pose2(-3, 1, 0), + poseNoise); + + initial.insert(X(3), Pose2(3.0, 0.0, 0.0)); + initial.insert(Y(3), Pose2(3.0, 1.0, 0.0)); + initial.insert(Z(3), Pose2(3.0, 2.0, 0.0)); + initial.insert(W(3), Pose2(0.0, 3.0, 0.0)); + + gfg = fg.linearize(initial); + fg = HybridNonlinearFactorGraph(); + + // Keep pruning! + inc.update(gfg); + inc.prune(M(3), 3); + + // The final discrete graph should not be empty since we have eliminated + // all continuous variables. + auto discreteTree = inc[M(3)]->conditional()->asDiscreteConditional(); + EXPECT_LONGS_EQUAL(3, discreteTree->size()); + + // Test if the optimal discrete mode assignment is (1, 1, 1). + DiscreteFactorGraph discreteGraph; + discreteGraph.push_back(discreteTree); + DiscreteValues optimal_assignment = discreteGraph.optimize(); + + DiscreteValues expected_assignment; + expected_assignment[M(1)] = 1; + expected_assignment[M(2)] = 1; + expected_assignment[M(3)] = 1; + + EXPECT(assert_equal(expected_assignment, optimal_assignment)); + + // Test if pruning worked correctly by checking that we only have 3 leaves in + // the last node. + auto lastConditional = inc[X(3)]->conditional()->asMixture(); + EXPECT_LONGS_EQUAL(3, lastConditional->nrComponents()); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp new file mode 100644 index 000000000..c10310b5e --- /dev/null +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -0,0 +1,761 @@ +/* ---------------------------------------------------------------------------- + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + * See LICENSE for the license information + * -------------------------------------------------------------------------- */ + +/** + * @file testHybridNonlinearFactorGraph.cpp + * @brief Unit tests for HybridNonlinearFactorGraph + * @author Varun Agrawal + * @author Fan Jiang + * @author Frank Dellaert + * @date December 2021 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "Switching.h" + +// Include for test suite +#include + +using namespace std; +using namespace gtsam; +using noiseModel::Isotropic; +using symbol_shorthand::L; +using symbol_shorthand::M; +using symbol_shorthand::X; + +/* **************************************************************************** + * Test that any linearizedFactorGraph gaussian factors are appended to the + * existing gaussian factor graph in the hybrid factor graph. + */ +TEST(HybridFactorGraph, GaussianFactorGraph) { + HybridNonlinearFactorGraph fg; + + // Add a simple prior factor to the nonlinear factor graph + fg.emplace_nonlinear>(X(0), 0, Isotropic::Sigma(1, 0.1)); + + // Linearization point + Values linearizationPoint; + linearizationPoint.insert(X(0), 0); + + HybridGaussianFactorGraph ghfg = fg.linearize(linearizationPoint); + + // Add a factor to the GaussianFactorGraph + ghfg.add(JacobianFactor(X(0), I_1x1, Vector1(5))); + + EXPECT_LONGS_EQUAL(2, ghfg.size()); +} + +/*************************************************************************** + * Test equality for Hybrid Nonlinear Factor Graph. + */ +TEST(HybridNonlinearFactorGraph, Equals) { + HybridNonlinearFactorGraph graph1; + HybridNonlinearFactorGraph graph2; + + // Test empty factor graphs + EXPECT(assert_equal(graph1, graph2)); + + auto f0 = boost::make_shared>( + 1, Pose2(), noiseModel::Isotropic::Sigma(3, 0.001)); + graph1.push_back(f0); + graph2.push_back(f0); + + auto f1 = boost::make_shared>( + 1, 2, Pose2(), noiseModel::Isotropic::Sigma(3, 0.1)); + graph1.push_back(f1); + graph2.push_back(f1); + + // Test non-empty graphs + EXPECT(assert_equal(graph1, graph2)); +} + +/*************************************************************************** + * Test that the resize method works correctly for a HybridNonlinearFactorGraph. + */ +TEST(HybridNonlinearFactorGraph, Resize) { + HybridNonlinearFactorGraph fg; + auto nonlinearFactor = boost::make_shared>(); + fg.push_back(nonlinearFactor); + + auto discreteFactor = boost::make_shared(); + fg.push_back(discreteFactor); + + auto dcFactor = boost::make_shared(); + fg.push_back(dcFactor); + + EXPECT_LONGS_EQUAL(fg.size(), 3); + + fg.resize(0); + EXPECT_LONGS_EQUAL(fg.size(), 0); +} + +/*************************************************************************** + * Test that the resize method works correctly for a + * HybridGaussianFactorGraph. + */ +TEST(HybridGaussianFactorGraph, Resize) { + HybridNonlinearFactorGraph nhfg; + auto nonlinearFactor = boost::make_shared>( + X(0), X(1), 0.0, Isotropic::Sigma(1, 0.1)); + nhfg.push_back(nonlinearFactor); + auto discreteFactor = boost::make_shared(); + nhfg.push_back(discreteFactor); + + KeyVector contKeys = {X(0), X(1)}; + auto noise_model = noiseModel::Isotropic::Sigma(1, 1.0); + auto still = boost::make_shared(X(0), X(1), 0.0, noise_model), + moving = boost::make_shared(X(0), X(1), 1.0, noise_model); + + std::vector components = {still, moving}; + auto dcFactor = boost::make_shared( + contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components); + nhfg.push_back(dcFactor); + + Values linearizationPoint; + linearizationPoint.insert(X(0), 0); + linearizationPoint.insert(X(1), 1); + + // Generate `HybridGaussianFactorGraph` by linearizing + HybridGaussianFactorGraph gfg = nhfg.linearize(linearizationPoint); + + EXPECT_LONGS_EQUAL(gfg.size(), 3); + + gfg.resize(0); + EXPECT_LONGS_EQUAL(gfg.size(), 0); +} + +/***************************************************************************** + * Test push_back on HFG makes the correct distinction. + */ +TEST(HybridFactorGraph, PushBack) { + HybridNonlinearFactorGraph fg; + + auto nonlinearFactor = boost::make_shared>(); + fg.push_back(nonlinearFactor); + + EXPECT_LONGS_EQUAL(fg.size(), 1); + + fg = HybridNonlinearFactorGraph(); + + auto discreteFactor = boost::make_shared(); + fg.push_back(discreteFactor); + + EXPECT_LONGS_EQUAL(fg.size(), 1); + + fg = HybridNonlinearFactorGraph(); + + auto dcFactor = boost::make_shared(); + fg.push_back(dcFactor); + + EXPECT_LONGS_EQUAL(fg.size(), 1); + + // Now do the same with HybridGaussianFactorGraph + HybridGaussianFactorGraph ghfg; + + auto gaussianFactor = boost::make_shared(); + ghfg.push_back(gaussianFactor); + + EXPECT_LONGS_EQUAL(ghfg.size(), 1); + + ghfg = HybridGaussianFactorGraph(); + ghfg.push_back(discreteFactor); + + EXPECT_LONGS_EQUAL(ghfg.size(), 1); + + ghfg = HybridGaussianFactorGraph(); + ghfg.push_back(dcFactor); + + EXPECT_LONGS_EQUAL(ghfg.size(), 1); +} + +/**************************************************************************** + * Test construction of switching-like hybrid factor graph. + */ +TEST(HybridFactorGraph, Switching) { + Switching self(3); + + EXPECT_LONGS_EQUAL(7, self.nonlinearFactorGraph.size()); + EXPECT_LONGS_EQUAL(7, self.linearizedFactorGraph.size()); +} + +/**************************************************************************** + * Test linearization on a switching-like hybrid factor graph. + */ +TEST(HybridFactorGraph, Linearization) { + Switching self(3); + + // Linearize here: + HybridGaussianFactorGraph actualLinearized = + self.nonlinearFactorGraph.linearize(self.linearizationPoint); + + EXPECT_LONGS_EQUAL(7, actualLinearized.size()); +} + +/**************************************************************************** + * Test elimination tree construction + */ +TEST(HybridFactorGraph, EliminationTree) { + Switching self(3); + + // Create ordering. + Ordering ordering; + for (size_t k = 1; k <= self.K; k++) ordering += X(k); + + // Create elimination tree. + HybridEliminationTree etree(self.linearizedFactorGraph, ordering); + EXPECT_LONGS_EQUAL(1, etree.roots().size()) +} + +/**************************************************************************** + *Test elimination function by eliminating x1 in *-x1-*-x2 graph. + */ +TEST(GaussianElimination, Eliminate_x1) { + Switching self(3); + + // Gather factors on x1, has a simple Gaussian and a mixture factor. + HybridGaussianFactorGraph factors; + // Add gaussian prior + factors.push_back(self.linearizedFactorGraph[0]); + // Add first hybrid factor + factors.push_back(self.linearizedFactorGraph[1]); + + // TODO(Varun) remove this block since sum is no longer exposed. + // // Check that sum works: + // auto sum = factors.sum(); + // Assignment mode; + // mode[M(1)] = 1; + // auto actual = sum(mode); // Selects one of 2 modes. + // EXPECT_LONGS_EQUAL(2, actual.size()); // Prior and motion model. + + // Eliminate x1 + Ordering ordering; + ordering += X(1); + + auto result = EliminateHybrid(factors, ordering); + CHECK(result.first); + EXPECT_LONGS_EQUAL(1, result.first->nrFrontals()); + CHECK(result.second); + // Has two keys, x2 and m1 + EXPECT_LONGS_EQUAL(2, result.second->size()); +} + +/**************************************************************************** + * Test elimination function by eliminating x2 in x1-*-x2-*-x3 chain. + * m1/ \m2 + */ +TEST(HybridsGaussianElimination, Eliminate_x2) { + Switching self(3); + + // Gather factors on x2, will be two mixture factors (with x1 and x3, resp.). + HybridGaussianFactorGraph factors; + factors.push_back(self.linearizedFactorGraph[1]); // involves m1 + factors.push_back(self.linearizedFactorGraph[2]); // involves m2 + + // TODO(Varun) remove this block since sum is no longer exposed. + // // Check that sum works: + // auto sum = factors.sum(); + // Assignment mode; + // mode[M(1)] = 0; + // mode[M(2)] = 1; + // auto actual = sum(mode); // Selects one of 4 mode + // combinations. EXPECT_LONGS_EQUAL(2, actual.size()); // 2 motion models. + + // Eliminate x2 + Ordering ordering; + ordering += X(2); + + std::pair result = + EliminateHybrid(factors, ordering); + CHECK(result.first); + EXPECT_LONGS_EQUAL(1, result.first->nrFrontals()); + CHECK(result.second); + // Note: separator keys should include m1, m2. + EXPECT_LONGS_EQUAL(4, result.second->size()); +} + +/**************************************************************************** + * Helper method to generate gaussian factor graphs with a specific mode. + */ +GaussianFactorGraph::shared_ptr batchGFG(double between, + Values linearizationPoint) { + NonlinearFactorGraph graph; + graph.addPrior(X(1), 0, Isotropic::Sigma(1, 0.1)); + + auto between_x1_x2 = boost::make_shared( + X(1), X(2), between, Isotropic::Sigma(1, 1.0)); + + graph.push_back(between_x1_x2); + + return graph.linearize(linearizationPoint); +} + +/**************************************************************************** + * Test elimination function by eliminating x1 and x2 in graph. + */ +TEST(HybridGaussianElimination, EliminateHybrid_2_Variable) { + Switching self(2, 1.0, 0.1); + + auto factors = self.linearizedFactorGraph; + + // Eliminate x1 + Ordering ordering; + ordering += X(1); + ordering += X(2); + + HybridConditional::shared_ptr hybridConditionalMixture; + HybridFactor::shared_ptr factorOnModes; + + std::tie(hybridConditionalMixture, factorOnModes) = + EliminateHybrid(factors, ordering); + + auto gaussianConditionalMixture = + dynamic_pointer_cast(hybridConditionalMixture->inner()); + + CHECK(gaussianConditionalMixture); + // Frontals = [x1, x2] + EXPECT_LONGS_EQUAL(2, gaussianConditionalMixture->nrFrontals()); + // 1 parent, which is the mode + EXPECT_LONGS_EQUAL(1, gaussianConditionalMixture->nrParents()); + + // This is now a HybridDiscreteFactor + auto hybridDiscreteFactor = + dynamic_pointer_cast(factorOnModes); + // Access the type-erased inner object and convert to DecisionTreeFactor + auto discreteFactor = + dynamic_pointer_cast(hybridDiscreteFactor->inner()); + CHECK(discreteFactor); + EXPECT_LONGS_EQUAL(1, discreteFactor->discreteKeys().size()); + EXPECT(discreteFactor->root_->isLeaf() == false); +} + +// /* +// ****************************************************************************/ +// /// Test the toDecisionTreeFactor method +// TEST(HybridFactorGraph, ToDecisionTreeFactor) { +// size_t K = 3; + +// // Provide tight sigma values so that the errors are visibly different. +// double between_sigma = 5e-8, prior_sigma = 1e-7; + +// Switching self(K, between_sigma, prior_sigma); + +// // Clear out discrete factors since sum() cannot hanldle those +// HybridGaussianFactorGraph linearizedFactorGraph( +// self.linearizedFactorGraph.gaussianGraph(), DiscreteFactorGraph(), +// self.linearizedFactorGraph.dcGraph()); + +// auto decisionTreeFactor = linearizedFactorGraph.toDecisionTreeFactor(); + +// auto allAssignments = +// DiscreteValues::CartesianProduct(linearizedFactorGraph.discreteKeys()); + +// // Get the error of the discrete assignment m1=0, m2=1. +// double actual = (*decisionTreeFactor)(allAssignments[1]); + +// /********************************************/ +// // Create equivalent factor graph for m1=0, m2=1 +// GaussianFactorGraph graph = linearizedFactorGraph.gaussianGraph(); + +// for (auto &p : linearizedFactorGraph.dcGraph()) { +// if (auto mixture = +// boost::dynamic_pointer_cast(p)) { +// graph.add((*mixture)(allAssignments[1])); +// } +// } + +// VectorValues values = graph.optimize(); +// double expected = graph.probPrime(values); +// /********************************************/ +// EXPECT_DOUBLES_EQUAL(expected, actual, 1e-12); +// // REGRESSION: +// EXPECT_DOUBLES_EQUAL(0.6125, actual, 1e-4); +// } + +/**************************************************************************** + * Test partial elimination + */ +TEST(HybridFactorGraph, Partial_Elimination) { + Switching self(3); + + auto linearizedFactorGraph = self.linearizedFactorGraph; + + // Create ordering. + Ordering ordering; + for (size_t k = 1; k <= self.K; k++) ordering += X(k); + + // Eliminate partially. + HybridBayesNet::shared_ptr hybridBayesNet; + HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; + std::tie(hybridBayesNet, remainingFactorGraph) = + linearizedFactorGraph.eliminatePartialSequential(ordering); + + CHECK(hybridBayesNet); + // GTSAM_PRINT(*hybridBayesNet); // HybridBayesNet + EXPECT_LONGS_EQUAL(3, hybridBayesNet->size()); + EXPECT(hybridBayesNet->at(0)->frontals() == KeyVector{X(1)}); + EXPECT(hybridBayesNet->at(0)->parents() == KeyVector({X(2), M(1)})); + EXPECT(hybridBayesNet->at(1)->frontals() == KeyVector{X(2)}); + EXPECT(hybridBayesNet->at(1)->parents() == KeyVector({X(3), M(1), M(2)})); + EXPECT(hybridBayesNet->at(2)->frontals() == KeyVector{X(3)}); + EXPECT(hybridBayesNet->at(2)->parents() == KeyVector({M(1), M(2)})); + + CHECK(remainingFactorGraph); + // GTSAM_PRINT(*remainingFactorGraph); // HybridFactorGraph + EXPECT_LONGS_EQUAL(3, remainingFactorGraph->size()); + EXPECT(remainingFactorGraph->at(0)->keys() == KeyVector({M(1)})); + EXPECT(remainingFactorGraph->at(1)->keys() == KeyVector({M(2), M(1)})); + EXPECT(remainingFactorGraph->at(2)->keys() == KeyVector({M(1), M(2)})); +} + +/**************************************************************************** + * Test full elimination + */ +TEST(HybridFactorGraph, Full_Elimination) { + Switching self(3); + + auto linearizedFactorGraph = self.linearizedFactorGraph; + + // We first do a partial elimination + HybridBayesNet::shared_ptr hybridBayesNet_partial; + HybridGaussianFactorGraph::shared_ptr remainingFactorGraph_partial; + DiscreteBayesNet discreteBayesNet; + + { + // Create ordering. + Ordering ordering; + for (size_t k = 1; k <= self.K; k++) ordering += X(k); + + // Eliminate partially. + std::tie(hybridBayesNet_partial, remainingFactorGraph_partial) = + linearizedFactorGraph.eliminatePartialSequential(ordering); + + DiscreteFactorGraph discrete_fg; + // TODO(Varun) Make this a function of HybridGaussianFactorGraph? + for (HybridFactor::shared_ptr& factor : (*remainingFactorGraph_partial)) { + auto df = dynamic_pointer_cast(factor); + discrete_fg.push_back(df->inner()); + } + ordering.clear(); + for (size_t k = 1; k < self.K; k++) ordering += M(k); + discreteBayesNet = + *discrete_fg.eliminateSequential(ordering, EliminateForMPE); + } + + // Create ordering. + Ordering ordering; + for (size_t k = 1; k <= self.K; k++) ordering += X(k); + for (size_t k = 1; k < self.K; k++) ordering += M(k); + + // Eliminate partially. + HybridBayesNet::shared_ptr hybridBayesNet = + linearizedFactorGraph.eliminateSequential(ordering); + + CHECK(hybridBayesNet); + EXPECT_LONGS_EQUAL(5, hybridBayesNet->size()); + // p(x1 | x2, m1) + EXPECT(hybridBayesNet->at(0)->frontals() == KeyVector{X(1)}); + EXPECT(hybridBayesNet->at(0)->parents() == KeyVector({X(2), M(1)})); + // p(x2 | x3, m1, m2) + EXPECT(hybridBayesNet->at(1)->frontals() == KeyVector{X(2)}); + EXPECT(hybridBayesNet->at(1)->parents() == KeyVector({X(3), M(1), M(2)})); + // p(x3 | m1, m2) + EXPECT(hybridBayesNet->at(2)->frontals() == KeyVector{X(3)}); + EXPECT(hybridBayesNet->at(2)->parents() == KeyVector({M(1), M(2)})); + // P(m1 | m2) + EXPECT(hybridBayesNet->at(3)->frontals() == KeyVector{M(1)}); + EXPECT(hybridBayesNet->at(3)->parents() == KeyVector({M(2)})); + EXPECT( + dynamic_pointer_cast(hybridBayesNet->at(3)->inner()) + ->equals(*discreteBayesNet.at(0))); + // P(m2) + EXPECT(hybridBayesNet->at(4)->frontals() == KeyVector{M(2)}); + EXPECT_LONGS_EQUAL(0, hybridBayesNet->at(4)->nrParents()); + EXPECT( + dynamic_pointer_cast(hybridBayesNet->at(4)->inner()) + ->equals(*discreteBayesNet.at(1))); +} + +/**************************************************************************** + * Test printing + */ +TEST(HybridFactorGraph, Printing) { + Switching self(3); + + auto linearizedFactorGraph = self.linearizedFactorGraph; + + // Create ordering. + Ordering ordering; + for (size_t k = 1; k <= self.K; k++) ordering += X(k); + + // Eliminate partially. + HybridBayesNet::shared_ptr hybridBayesNet; + HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; + std::tie(hybridBayesNet, remainingFactorGraph) = + linearizedFactorGraph.eliminatePartialSequential(ordering); + + string expected_hybridFactorGraph = R"( +size: 7 +factor 0: Continuous [x1] + + A[x1] = [ + 10 +] + b = [ -10 ] + No noise model +factor 1: Hybrid [x1 x2; m1]{ + Choice(m1) + 0 Leaf : + A[x1] = [ + -1 +] + A[x2] = [ + 1 +] + b = [ -1 ] + No noise model + + 1 Leaf : + A[x1] = [ + -1 +] + A[x2] = [ + 1 +] + b = [ -0 ] + No noise model + +} +factor 2: Hybrid [x2 x3; m2]{ + Choice(m2) + 0 Leaf : + A[x2] = [ + -1 +] + A[x3] = [ + 1 +] + b = [ -1 ] + No noise model + + 1 Leaf : + A[x2] = [ + -1 +] + A[x3] = [ + 1 +] + b = [ -0 ] + No noise model + +} +factor 3: Continuous [x2] + + A[x2] = [ + 10 +] + b = [ -10 ] + No noise model +factor 4: Continuous [x3] + + A[x3] = [ + 10 +] + b = [ -10 ] + No noise model +factor 5: Discrete [m1] + P( m1 ): + Leaf 0.5 + +factor 6: Discrete [m2 m1] + P( m2 | m1 ): + Choice(m2) + 0 Choice(m1) + 0 0 Leaf 0.33333333 + 0 1 Leaf 0.6 + 1 Choice(m1) + 1 0 Leaf 0.66666667 + 1 1 Leaf 0.4 + +)"; + EXPECT(assert_print_equal(expected_hybridFactorGraph, linearizedFactorGraph)); + + // Expected output for hybridBayesNet. + string expected_hybridBayesNet = R"( +size: 3 +factor 0: Hybrid P( x1 | x2 m1) + Discrete Keys = (m1, 2), + Choice(m1) + 0 Leaf p(x1 | x2) + R = [ 10.0499 ] + S[x2] = [ -0.0995037 ] + d = [ -9.85087 ] + No noise model + + 1 Leaf p(x1 | x2) + R = [ 10.0499 ] + S[x2] = [ -0.0995037 ] + d = [ -9.95037 ] + No noise model + +factor 1: Hybrid P( x2 | x3 m1 m2) + Discrete Keys = (m1, 2), (m2, 2), + Choice(m2) + 0 Choice(m1) + 0 0 Leaf p(x2 | x3) + R = [ 10.099 ] + S[x3] = [ -0.0990196 ] + d = [ -9.99901 ] + No noise model + + 0 1 Leaf p(x2 | x3) + R = [ 10.099 ] + S[x3] = [ -0.0990196 ] + d = [ -9.90098 ] + No noise model + + 1 Choice(m1) + 1 0 Leaf p(x2 | x3) + R = [ 10.099 ] + S[x3] = [ -0.0990196 ] + d = [ -10.098 ] + No noise model + + 1 1 Leaf p(x2 | x3) + R = [ 10.099 ] + S[x3] = [ -0.0990196 ] + d = [ -10 ] + No noise model + +factor 2: Hybrid P( x3 | m1 m2) + Discrete Keys = (m1, 2), (m2, 2), + Choice(m2) + 0 Choice(m1) + 0 0 Leaf p(x3) + R = [ 10.0494 ] + d = [ -10.1489 ] + No noise model + + 0 1 Leaf p(x3) + R = [ 10.0494 ] + d = [ -10.1479 ] + No noise model + + 1 Choice(m1) + 1 0 Leaf p(x3) + R = [ 10.0494 ] + d = [ -10.0504 ] + No noise model + + 1 1 Leaf p(x3) + R = [ 10.0494 ] + d = [ -10.0494 ] + No noise model + +)"; + EXPECT(assert_print_equal(expected_hybridBayesNet, *hybridBayesNet)); +} + +/**************************************************************************** + * Simple PlanarSLAM example test with 2 poses and 2 landmarks (each pose + * connects to 1 landmark) to expose issue with default decision tree creation + * in hybrid elimination. The hybrid factor is between the poses X0 and X1. + * The issue arises if we eliminate a landmark variable first since it is not + * connected to a HybridFactor. + */ +TEST(HybridFactorGraph, DefaultDecisionTree) { + HybridNonlinearFactorGraph fg; + + // Add a prior on pose x1 at the origin. + // A prior factor consists of a mean and a noise model (covariance matrix) + Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin + auto priorNoise = noiseModel::Diagonal::Sigmas( + Vector3(0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta + fg.emplace_nonlinear>(X(0), prior, priorNoise); + + using PlanarMotionModel = BetweenFactor; + + // Add odometry factor + Pose2 odometry(2.0, 0.0, 0.0); + KeyVector contKeys = {X(0), X(1)}; + auto noise_model = noiseModel::Isotropic::Sigma(3, 1.0); + auto still = boost::make_shared(X(0), X(1), Pose2(0, 0, 0), + noise_model), + moving = boost::make_shared(X(0), X(1), odometry, + noise_model); + std::vector motion_models = {still, moving}; + // TODO(Varun) Make a templated constructor for MixtureFactor which does this? + std::vector components; + for (auto&& f : motion_models) { + components.push_back(boost::dynamic_pointer_cast(f)); + } + fg.emplace_hybrid( + contKeys, DiscreteKeys{gtsam::DiscreteKey(M(1), 2)}, components); + + // Add Range-Bearing measurements to from X0 to L0 and X1 to L1. + // create a noise model for the landmark measurements + auto measurementNoise = noiseModel::Diagonal::Sigmas( + Vector2(0.1, 0.2)); // 0.1 rad std on bearing, 20cm on range + // create the measurement values - indices are (pose id, landmark id) + Rot2 bearing11 = Rot2::fromDegrees(45), bearing22 = Rot2::fromDegrees(90); + double range11 = std::sqrt(4.0 + 4.0), range22 = 2.0; + + // Add Bearing-Range factors + fg.emplace_nonlinear>( + X(0), L(0), bearing11, range11, measurementNoise); + fg.emplace_nonlinear>( + X(1), L(1), bearing22, range22, measurementNoise); + + // Create (deliberately inaccurate) initial estimate + Values initialEstimate; + initialEstimate.insert(X(0), Pose2(0.5, 0.0, 0.2)); + initialEstimate.insert(X(1), Pose2(2.3, 0.1, -0.2)); + initialEstimate.insert(L(0), Point2(1.8, 2.1)); + initialEstimate.insert(L(1), Point2(4.1, 1.8)); + + // We want to eliminate variables not connected to DCFactors first. + Ordering ordering; + ordering += L(0); + ordering += L(1); + ordering += X(0); + ordering += X(1); + + HybridGaussianFactorGraph linearized = fg.linearize(initialEstimate); + gtsam::HybridBayesNet::shared_ptr hybridBayesNet; + gtsam::HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; + + // This should NOT fail + std::tie(hybridBayesNet, remainingFactorGraph) = + linearized.eliminatePartialSequential(ordering); + EXPECT_LONGS_EQUAL(4, hybridBayesNet->size()); + EXPECT_LONGS_EQUAL(1, remainingFactorGraph->size()); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/linear/GaussianISAM.h b/gtsam/linear/GaussianISAM.h index b77b26240..98bf5df88 100644 --- a/gtsam/linear/GaussianISAM.h +++ b/gtsam/linear/GaussianISAM.h @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /** - * @file SymbolicISAM.h + * @file GaussianISAM.h * @date July 29, 2013 * @author Frank Dellaert * @author Richard Roberts diff --git a/gtsam/navigation/CombinedImuFactor.cpp b/gtsam/navigation/CombinedImuFactor.cpp index 3fe2cf4d1..8d3a7dd31 100644 --- a/gtsam/navigation/CombinedImuFactor.cpp +++ b/gtsam/navigation/CombinedImuFactor.cpp @@ -93,9 +93,14 @@ void PreintegratedCombinedMeasurements::resetIntegration() { //------------------------------------------------------------------------------ void PreintegratedCombinedMeasurements::integrateMeasurement( const Vector3& measuredAcc, const Vector3& measuredOmega, double dt) { + if (dt <= 0) { + throw std::runtime_error( + "PreintegratedCombinedMeasurements::integrateMeasurement: dt <=0"); + } + // Update preintegrated measurements. - Matrix9 A; // overall Jacobian wrt preintegrated measurements (df/dx) - Matrix93 B, C; + Matrix9 A; // Jacobian wrt preintegrated measurements without bias (df/dx) + Matrix93 B, C; // Jacobian of state wrpt accel bias and omega bias respectively. PreintegrationType::update(measuredAcc, measuredOmega, dt, &A, &B, &C); // Update preintegrated measurements covariance: as in [2] we consider a first @@ -105,47 +110,78 @@ void PreintegratedCombinedMeasurements::integrateMeasurement( // and preintegrated measurements // Single Jacobians to propagate covariance - // TODO(frank): should we not also account for bias on position? - Matrix3 theta_H_biasOmega = -C.topRows<3>(); - Matrix3 vel_H_biasAcc = -B.bottomRows<3>(); + Matrix3 theta_H_biasOmega = C.topRows<3>(); + Matrix3 pos_H_biasAcc = B.middleRows<3>(3); + Matrix3 vel_H_biasAcc = B.bottomRows<3>(); + + Matrix3 theta_H_biasOmegaInit = -theta_H_biasOmega; + Matrix3 pos_H_biasAccInit = -pos_H_biasAcc; + Matrix3 vel_H_biasAccInit = -vel_H_biasAcc; // overall Jacobian wrt preintegrated measurements (df/dx) Eigen::Matrix F; F.setZero(); F.block<9, 9>(0, 0) = A; F.block<3, 3>(0, 12) = theta_H_biasOmega; + F.block<3, 3>(3, 9) = pos_H_biasAcc; F.block<3, 3>(6, 9) = vel_H_biasAcc; F.block<6, 6>(9, 9) = I_6x6; + // Update the uncertainty on the state (matrix F in [4]). + preintMeasCov_ = F * preintMeasCov_ * F.transpose(); + // propagate uncertainty // TODO(frank): use noiseModel routine so we can have arbitrary noise models. const Matrix3& aCov = p().accelerometerCovariance; const Matrix3& wCov = p().gyroscopeCovariance; const Matrix3& iCov = p().integrationCovariance; + const Matrix6& bInitCov = p().biasAccOmegaInt; // first order uncertainty propagation - // Optimized matrix multiplication (1/dt) * G * measurementCovariance * - // G.transpose() + // Optimized matrix mult: (1/dt) * G * measurementCovariance * G.transpose() Eigen::Matrix G_measCov_Gt; G_measCov_Gt.setZero(15, 15); + const Matrix3& bInitCov11 = bInitCov.block<3, 3>(0, 0) / dt; + const Matrix3& bInitCov12 = bInitCov.block<3, 3>(0, 3) / dt; + const Matrix3& bInitCov21 = bInitCov.block<3, 3>(3, 0) / dt; + const Matrix3& bInitCov22 = bInitCov.block<3, 3>(3, 3) / dt; + // BLOCK DIAGONAL TERMS - D_t_t(&G_measCov_Gt) = dt * iCov; - D_v_v(&G_measCov_Gt) = (1 / dt) * vel_H_biasAcc - * (aCov + p().biasAccOmegaInt.block<3, 3>(0, 0)) - * (vel_H_biasAcc.transpose()); - D_R_R(&G_measCov_Gt) = (1 / dt) * theta_H_biasOmega - * (wCov + p().biasAccOmegaInt.block<3, 3>(3, 3)) - * (theta_H_biasOmega.transpose()); + D_R_R(&G_measCov_Gt) = + (theta_H_biasOmega * (wCov / dt) * theta_H_biasOmega.transpose()) // + + + (theta_H_biasOmegaInit * bInitCov22 * theta_H_biasOmegaInit.transpose()); + + D_t_t(&G_measCov_Gt) = + (pos_H_biasAcc * (aCov / dt) * pos_H_biasAcc.transpose()) // + + (pos_H_biasAccInit * bInitCov11 * pos_H_biasAccInit.transpose()) // + + (dt * iCov); + + D_v_v(&G_measCov_Gt) = + (vel_H_biasAcc * (aCov / dt) * vel_H_biasAcc.transpose()) // + + (vel_H_biasAccInit * bInitCov11 * vel_H_biasAccInit.transpose()); + D_a_a(&G_measCov_Gt) = dt * p().biasAccCovariance; D_g_g(&G_measCov_Gt) = dt * p().biasOmegaCovariance; // OFF BLOCK DIAGONAL TERMS - Matrix3 temp = vel_H_biasAcc * p().biasAccOmegaInt.block<3, 3>(3, 0) - * theta_H_biasOmega.transpose(); - D_v_R(&G_measCov_Gt) = temp; - D_R_v(&G_measCov_Gt) = temp.transpose(); - preintMeasCov_ = F * preintMeasCov_ * F.transpose() + G_measCov_Gt; + D_R_t(&G_measCov_Gt) = + theta_H_biasOmegaInit * bInitCov21 * pos_H_biasAccInit.transpose(); + D_R_v(&G_measCov_Gt) = + theta_H_biasOmegaInit * bInitCov21 * vel_H_biasAccInit.transpose(); + D_t_R(&G_measCov_Gt) = + pos_H_biasAccInit * bInitCov12 * theta_H_biasOmegaInit.transpose(); + D_t_v(&G_measCov_Gt) = + (pos_H_biasAcc * (aCov / dt) * vel_H_biasAcc.transpose()) + + (pos_H_biasAccInit * bInitCov11 * vel_H_biasAccInit.transpose()); + D_v_R(&G_measCov_Gt) = + vel_H_biasAccInit * bInitCov12 * theta_H_biasOmegaInit.transpose(); + D_v_t(&G_measCov_Gt) = + (vel_H_biasAcc * (aCov / dt) * pos_H_biasAcc.transpose()) + + (vel_H_biasAccInit * bInitCov11 * pos_H_biasAccInit.transpose()); + + preintMeasCov_.noalias() += G_measCov_Gt; } //------------------------------------------------------------------------------ @@ -253,6 +289,5 @@ std::ostream& operator<<(std::ostream& os, const CombinedImuFactor& f) { os << " noise model sigmas: " << f.noiseModel_->sigmas().transpose(); return os; } -} - /// namespace gtsam +} // namespace gtsam diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index 54c5a7dbb..69d72ad9b 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -51,6 +51,7 @@ typedef ManifoldPreintegration PreintegrationType; * TRO, 28(1):61-76, 2012. * [3] L. Carlone, S. Williams, R. Roberts, "Preintegrated IMU factor: * Computation of the Jacobian Matrices", Tech. Report, 2013. + * Available in this repo as "PreintegratedIMUJacobians.pdf". * [4] C. Forster, L. Carlone, F. Dellaert, D. Scaramuzza, IMU Preintegration on * Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation, * Robotics: Science and Systems (RSS), 2015. @@ -61,7 +62,7 @@ typedef ManifoldPreintegration PreintegrationType; struct GTSAM_EXPORT PreintegrationCombinedParams : PreintegrationParams { Matrix3 biasAccCovariance; ///< continuous-time "Covariance" describing accelerometer bias random walk Matrix3 biasOmegaCovariance; ///< continuous-time "Covariance" describing gyroscope bias random walk - Matrix6 biasAccOmegaInt; ///< covariance of bias used for pre-integration + Matrix6 biasAccOmegaInt; ///< covariance of bias used as initial estimate. /// Default constructor makes uninitialized params struct. /// Used for serialization. @@ -92,11 +93,11 @@ struct GTSAM_EXPORT PreintegrationCombinedParams : PreintegrationParams { void setBiasAccCovariance(const Matrix3& cov) { biasAccCovariance=cov; } void setBiasOmegaCovariance(const Matrix3& cov) { biasOmegaCovariance=cov; } - void setBiasAccOmegaInt(const Matrix6& cov) { biasAccOmegaInt=cov; } + void setBiasAccOmegaInit(const Matrix6& cov) { biasAccOmegaInt=cov; } const Matrix3& getBiasAccCovariance() const { return biasAccCovariance; } const Matrix3& getBiasOmegaCovariance() const { return biasOmegaCovariance; } - const Matrix6& getBiasAccOmegaInt() const { return biasAccOmegaInt; } + const Matrix6& getBiasAccOmegaInit() const { return biasAccOmegaInt; } private: diff --git a/gtsam/navigation/ImuBias.h b/gtsam/navigation/ImuBias.h index 9346a4a77..936ae7782 100644 --- a/gtsam/navigation/ImuBias.h +++ b/gtsam/navigation/ImuBias.h @@ -105,7 +105,7 @@ public: /// @{ /** identity for group operation */ - static ConstantBias identity() { + static ConstantBias Identity() { return ConstantBias(); } diff --git a/gtsam/navigation/ImuFactor.cpp b/gtsam/navigation/ImuFactor.cpp index 28c0461b1..9b6affaaf 100644 --- a/gtsam/navigation/ImuFactor.cpp +++ b/gtsam/navigation/ImuFactor.cpp @@ -59,7 +59,7 @@ void PreintegratedImuMeasurements::integrateMeasurement( // Update preintegrated measurements (also get Jacobian) Matrix9 A; // overall Jacobian wrt preintegrated measurements (df/dx) - Matrix93 B, C; + Matrix93 B, C; // Jacobian of state wrpt accel bias and omega bias respectively. PreintegrationType::update(measuredAcc, measuredOmega, dt, &A, &B, &C); // first order covariance propagation: @@ -73,11 +73,13 @@ void PreintegratedImuMeasurements::integrateMeasurement( const Matrix3& iCov = p().integrationCovariance; // (1/dt) allows to pass from continuous time noise to discrete time noise + // Update the uncertainty on the state (matrix A in [4]). preintMeasCov_ = A * preintMeasCov_ * A.transpose(); + // These 2 updates account for uncertainty on the IMU measurement (matrix B in [4]). preintMeasCov_.noalias() += B * (aCov / dt) * B.transpose(); preintMeasCov_.noalias() += C * (wCov / dt) * C.transpose(); - // NOTE(frank): (Gi*dt)*(C/dt)*(Gi'*dt), with Gi << Z_3x3, I_3x3, Z_3x3 + // NOTE(frank): (Gi*dt)*(C/dt)*(Gi'*dt), with Gi << Z_3x3, I_3x3, Z_3x3 (9x3 matrix) preintMeasCov_.block<3, 3>(3, 3).noalias() += iCov * dt; } diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index c3b398e22..6765c8b42 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -53,6 +53,7 @@ typedef ManifoldPreintegration PreintegrationType; * TRO, 28(1):61-76, 2012. * [3] L. Carlone, S. Williams, R. Roberts, "Preintegrated IMU factor: * Computation of the Jacobian Matrices", Tech. Report, 2013. + * Available in this repo as "PreintegratedIMUJacobians.pdf". * [4] C. Forster, L. Carlone, F. Dellaert, D. Scaramuzza, "IMU Preintegration on * Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation", * Robotics: Science and Systems (RSS), 2015. diff --git a/gtsam/navigation/PreintegrationBase.cpp b/gtsam/navigation/PreintegrationBase.cpp index 8840c34e9..f6e9fccb8 100644 --- a/gtsam/navigation/PreintegrationBase.cpp +++ b/gtsam/navigation/PreintegrationBase.cpp @@ -157,9 +157,9 @@ Vector9 PreintegrationBase::computeError(const NavState& state_i, state_j.localCoordinates(predictedState_j, H2 ? &D_error_state_j : 0, H1 || H3 ? &D_error_predict : 0); - if (H1) *H1 << D_error_predict* D_predict_state_i; + if (H1) *H1 << D_error_predict * D_predict_state_i; if (H2) *H2 << D_error_state_j; - if (H3) *H3 << D_error_predict* D_predict_bias_i; + if (H3) *H3 << D_error_predict * D_predict_bias_i; return error; } diff --git a/gtsam/navigation/ScenarioRunner.cpp b/gtsam/navigation/ScenarioRunner.cpp index 3938ce86c..9d3e258de 100644 --- a/gtsam/navigation/ScenarioRunner.cpp +++ b/gtsam/navigation/ScenarioRunner.cpp @@ -15,8 +15,8 @@ * @author Frank Dellaert */ -#include #include +#include #include #include @@ -105,4 +105,62 @@ Matrix6 ScenarioRunner::estimateNoiseCovariance(size_t N) const { return Q / (N - 1); } +PreintegratedCombinedMeasurements CombinedScenarioRunner::integrate( + double T, const Bias& estimatedBias, bool corrupted) const { + gttic_(integrate); + PreintegratedCombinedMeasurements pim(p_, estimatedBias); + + const double dt = imuSampleTime(); + const size_t nrSteps = T / dt; + double t = 0; + for (size_t k = 0; k < nrSteps; k++, t += dt) { + Vector3 measuredOmega = + corrupted ? measuredAngularVelocity(t) : actualAngularVelocity(t); + Vector3 measuredAcc = + corrupted ? measuredSpecificForce(t) : actualSpecificForce(t); + pim.integrateMeasurement(measuredAcc, measuredOmega, dt); + } + + return pim; +} + +NavState CombinedScenarioRunner::predict( + const PreintegratedCombinedMeasurements& pim, + const Bias& estimatedBias) const { + const NavState state_i(scenario().pose(0), scenario().velocity_n(0)); + return pim.predict(state_i, estimatedBias); +} + +Eigen::Matrix CombinedScenarioRunner::estimateCovariance( + double T, size_t N, const Bias& estimatedBias) const { + gttic_(estimateCovariance); + + // Get predict prediction from ground truth measurements + NavState prediction = predict(integrate(T)); + + // Sample ! + Matrix samples(15, N); + Vector15 sum = Vector15::Zero(); + for (size_t i = 0; i < N; i++) { + auto pim = integrate(T, estimatedBias, true); + NavState sampled = predict(pim); + Vector15 xi = Vector15::Zero(); + xi << sampled.localCoordinates(prediction), + (estimatedBias_.vector() - estimatedBias.vector()); + samples.col(i) = xi; + sum += xi; + } + + // Compute MC covariance + Vector15 sampleMean = sum / N; + Eigen::Matrix Q; + Q.setZero(); + for (size_t i = 0; i < N; i++) { + Vector15 xi = samples.col(i) - sampleMean; + Q += xi * xi.transpose(); + } + + return Q / (N - 1); +} + } // namespace gtsam diff --git a/gtsam/navigation/ScenarioRunner.h b/gtsam/navigation/ScenarioRunner.h index 1577e36fe..cee5a54ab 100644 --- a/gtsam/navigation/ScenarioRunner.h +++ b/gtsam/navigation/ScenarioRunner.h @@ -16,9 +16,10 @@ */ #pragma once +#include +#include #include #include -#include namespace gtsam { @@ -66,10 +67,10 @@ class GTSAM_EXPORT ScenarioRunner { // also, uses g=10 for easy debugging const Vector3& gravity_n() const { return p_->n_gravity; } + const Scenario& scenario() const { return scenario_; } + // A gyro simply measures angular velocity in body frame - Vector3 actualAngularVelocity(double t) const { - return scenario_.omega_b(t); - } + Vector3 actualAngularVelocity(double t) const { return scenario_.omega_b(t); } // An accelerometer measures acceleration in body, but not gravity Vector3 actualSpecificForce(double t) const { @@ -106,4 +107,39 @@ class GTSAM_EXPORT ScenarioRunner { Matrix6 estimateNoiseCovariance(size_t N = 1000) const; }; +/* + * Simple class to test navigation scenarios with CombinedImuMeasurements. + * Takes a trajectory scenario as input, and can generate IMU measurements + */ +class GTSAM_EXPORT CombinedScenarioRunner : public ScenarioRunner { + public: + typedef boost::shared_ptr SharedParams; + + private: + const SharedParams p_; + const Bias estimatedBias_; + + public: + CombinedScenarioRunner(const Scenario& scenario, const SharedParams& p, + double imuSampleTime = 1.0 / 100.0, + const Bias& bias = Bias()) + : ScenarioRunner(scenario, static_cast(p), + imuSampleTime, bias), + p_(p), + estimatedBias_(bias) {} + + /// Integrate measurements for T seconds into a PIM + PreintegratedCombinedMeasurements integrate( + double T, const Bias& estimatedBias = Bias(), + bool corrupted = false) const; + + /// Predict predict given a PIM + NavState predict(const PreintegratedCombinedMeasurements& pim, + const Bias& estimatedBias = Bias()) const; + + /// Compute a Monte Carlo estimate of the predict covariance using N samples + Eigen::Matrix estimateCovariance( + double T, size_t N = 1000, const Bias& estimatedBias = Bias()) const; +}; + } // namespace gtsam diff --git a/gtsam/navigation/navigation.i b/gtsam/navigation/navigation.i index 6ede1645f..731cf3807 100644 --- a/gtsam/navigation/navigation.i +++ b/gtsam/navigation/navigation.i @@ -17,7 +17,7 @@ class ConstantBias { bool equals(const gtsam::imuBias::ConstantBias& expected, double tol) const; // Group - static gtsam::imuBias::ConstantBias identity(); + static gtsam::imuBias::ConstantBias Identity(); // Operator Overloads gtsam::imuBias::ConstantBias operator-() const; @@ -165,11 +165,11 @@ virtual class PreintegrationCombinedParams : gtsam::PreintegrationParams { void setBiasAccCovariance(Matrix cov); void setBiasOmegaCovariance(Matrix cov); - void setBiasAccOmegaInt(Matrix cov); + void setBiasAccOmegaInit(Matrix cov); Matrix getBiasAccCovariance() const ; Matrix getBiasOmegaCovariance() const ; - Matrix getBiasAccOmegaInt() const; + Matrix getBiasAccOmegaInit() const; }; diff --git a/gtsam/navigation/tests/testCombinedImuFactor.cpp b/gtsam/navigation/tests/testCombinedImuFactor.cpp index 2bbc2cc7c..aacfff0f0 100644 --- a/gtsam/navigation/tests/testCombinedImuFactor.cpp +++ b/gtsam/navigation/tests/testCombinedImuFactor.cpp @@ -16,18 +16,19 @@ * @author Frank Dellaert * @author Richard Roberts * @author Stephen Williams + * @author Varun Agrawal */ -#include -#include -#include -#include -#include -#include +#include #include #include - -#include +#include +#include +#include +#include +#include +#include +#include #include @@ -40,12 +41,15 @@ static boost::shared_ptr Params() { p->gyroscopeCovariance = kGyroSigma * kGyroSigma * I_3x3; p->accelerometerCovariance = kAccelSigma * kAccelSigma * I_3x3; p->integrationCovariance = 0.0001 * I_3x3; + p->biasAccCovariance = Z_3x3; + p->biasOmegaCovariance = Z_3x3; + p->biasAccOmegaInt = Z_6x6; return p; } -} +} // namespace testing /* ************************************************************************* */ -TEST( CombinedImuFactor, PreintegratedMeasurements ) { +TEST(CombinedImuFactor, PreintegratedMeasurements ) { // Linearization point Bias bias(Vector3(0, 0, 0), Vector3(0, 0, 0)); ///< Current estimate of acceleration and angular rate biases @@ -71,8 +75,9 @@ TEST( CombinedImuFactor, PreintegratedMeasurements ) { DOUBLES_EQUAL(expected1.deltaTij(), actual1.deltaTij(), tol); } + /* ************************************************************************* */ -TEST( CombinedImuFactor, ErrorWithBiases ) { +TEST(CombinedImuFactor, ErrorWithBiases ) { Bias bias(Vector3(0.2, 0, 0), Vector3(0, 0, 0.3)); // Biases (acc, rot) Bias bias2(Vector3(0.2, 0.2, 0), Vector3(1, 0, 0.3)); // Biases (acc, rot) Pose3 x1(Rot3::Expmap(Vector3(0, 0, M_PI / 4.0)), Point3(5.0, 1.0, -50.0)); @@ -203,6 +208,114 @@ TEST(CombinedImuFactor, PredictRotation) { EXPECT(assert_equal(expectedPose, actual.pose(), tol)); } +/* ************************************************************************* */ +// Testing covariance to check if all the jacobians are accounted for. +TEST(CombinedImuFactor, CheckCovariance) { + auto params = PreintegrationCombinedParams::MakeSharedU(9.81); + + params->setAccelerometerCovariance(pow(0.01, 2) * I_3x3); + params->setGyroscopeCovariance(pow(1.75e-4, 2) * I_3x3); + params->setIntegrationCovariance(pow(0.0, 2) * I_3x3); + params->setOmegaCoriolis(Vector3::Zero()); + + imuBias::ConstantBias currentBias; + + PreintegratedCombinedMeasurements actual(params, currentBias); + + // Measurements + Vector3 measuredAcc(0.1577, -0.8251, 9.6111); + Vector3 measuredOmega(-0.0210, 0.0311, 0.0145); + double deltaT = 0.01; + + actual.integrateMeasurement(measuredAcc, measuredOmega, deltaT); + + Eigen::Matrix expected; + expected << 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // + 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // + 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // + 0, 0, 0, 2.50025e-07, 0, 0, 5.0005e-05, 0, 0, 0, 0, 0, 0, 0, 0, // + 0, 0, 0, 0, 2.50025e-07, 0, 0, 5.0005e-05, 0, 0, 0, 0, 0, 0, 0, // + 0, 0, 0, 0, 0, 2.50025e-07, 0, 0, 5.0005e-05, 0, 0, 0, 0, 0, 0, // + 0, 0, 0, 5.0005e-05, 0, 0, 0.010001, 0, 0, 0, 0, 0, 0, 0, 0, // + 0, 0, 0, 0, 5.0005e-05, 0, 0, 0.010001, 0, 0, 0, 0, 0, 0, 0, // + 0, 0, 0, 0, 0, 5.0005e-05, 0, 0, 0.010001, 0, 0, 0, 0, 0, 0, // + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, // + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, // + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, // + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, // + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, // + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01; + + // regression + EXPECT(assert_equal(expected, actual.preintMeasCov())); +} + +// Test that the covariance values for the ImuFactor and the CombinedImuFactor +// (top-left 9x9) are the same +TEST(CombinedImuFactor, SameCovariance) { + // IMU measurements and time delta + Vector3 accMeas(0.1577, -0.8251, 9.6111); + Vector3 omegaMeas(-0.0210, 0.0311, 0.0145); + double deltaT = 0.01; + + // Assume zero bias + imuBias::ConstantBias currentBias; + + // Define params for ImuFactor + auto params = PreintegrationParams::MakeSharedU(); + params->setAccelerometerCovariance(pow(0.01, 2) * I_3x3); + params->setGyroscopeCovariance(pow(1.75e-4, 2) * I_3x3); + params->setIntegrationCovariance(pow(0, 2) * I_3x3); + params->setOmegaCoriolis(Vector3::Zero()); + + // The IMU preintegration object for ImuFactor + PreintegratedImuMeasurements pim(params, currentBias); + pim.integrateMeasurement(accMeas, omegaMeas, deltaT); + + // Define params for CombinedImuFactor + auto combined_params = PreintegrationCombinedParams::MakeSharedU(); + combined_params->setAccelerometerCovariance(pow(0.01, 2) * I_3x3); + combined_params->setGyroscopeCovariance(pow(1.75e-4, 2) * I_3x3); + // Set bias integration covariance explicitly to zero + combined_params->setIntegrationCovariance(Z_3x3); + combined_params->setOmegaCoriolis(Z_3x1); + // Set bias initial covariance explicitly to zero + combined_params->setBiasAccOmegaInit(Z_6x6); + + // The IMU preintegration object for CombinedImuFactor + PreintegratedCombinedMeasurements cpim(combined_params, currentBias); + cpim.integrateMeasurement(accMeas, omegaMeas, deltaT); + + // Assert if the noise covariance + EXPECT(assert_equal(pim.preintMeasCov(), + cpim.preintMeasCov().block(0, 0, 9, 9))); +} + +/* ************************************************************************* */ +TEST(CombinedImuFactor, Accelerating) { + const double a = 0.2, v = 50; + + // Set up body pointing towards y axis, and start at 10,20,0 with velocity + // going in X The body itself has Z axis pointing down + const Rot3 nRb(Point3(0, 1, 0), Point3(1, 0, 0), Point3(0, 0, -1)); + const Point3 initial_position(10, 20, 0); + const Vector3 initial_velocity(v, 0, 0); + + const AcceleratingScenario scenario(nRb, initial_position, initial_velocity, + Vector3(a, 0, 0)); + + const double T = 3.0; // seconds + + CombinedScenarioRunner runner(scenario, testing::Params(), T / 10); + + PreintegratedCombinedMeasurements pim = runner.integrate(T); + EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9)); + + auto estimatedCov = runner.estimateCovariance(T, 100); + Eigen::Matrix expected = pim.preintMeasCov(); + EXPECT(assert_equal(estimatedCov, expected, 0.1)); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/nonlinear/tests/testExpression.cpp b/gtsam/nonlinear/tests/testExpression.cpp index 92f4998a2..9987cca3f 100644 --- a/gtsam/nonlinear/tests/testExpression.cpp +++ b/gtsam/nonlinear/tests/testExpression.cpp @@ -122,7 +122,7 @@ class Class : public Point3 { enum {dimension = 3}; using Point3::Point3; const Vector3& vector() const { return *this; } - inline static Class identity() { return Class(0,0,0); } + inline static Class Identity() { return Class(0,0,0); } double norm(OptionalJacobian<1,3> H = boost::none) const { return norm3(*this, H); } @@ -285,7 +285,7 @@ TEST(Expression, compose2) { // Test compose with one arguments referring to constant rotation. TEST(Expression, compose3) { // Create expression - Rot3_ R1(Rot3::identity()), R2(3); + Rot3_ R1(Rot3::Identity()), R2(3); Rot3_ R3 = R1 * R2; // Check keys diff --git a/gtsam/slam/tests/testOrientedPlane3Factor.cpp b/gtsam/slam/tests/testOrientedPlane3Factor.cpp index 35c750408..502c62c11 100644 --- a/gtsam/slam/tests/testOrientedPlane3Factor.cpp +++ b/gtsam/slam/tests/testOrientedPlane3Factor.cpp @@ -217,7 +217,7 @@ TEST(OrientedPlane3Factor, Issue561Simplified) { // Setup prior factors // Note: If x0 is too far away from the origin (e.g. x=100) this test can fail. - Pose3 x0(Rot3::identity(), Vector3(10, -1, 1)); + Pose3 x0(Rot3::Identity(), Vector3(10, -1, 1)); auto x0_noise = noiseModel::Isotropic::Sigma(6, 0.01); graph.addPrior(X(0), x0, x0_noise); @@ -241,7 +241,7 @@ TEST(OrientedPlane3Factor, Issue561Simplified) { // Initial values // Just offset the initial pose by 1m. This is what we are trying to optimize. Values initialEstimate; - Pose3 x0_initial = x0.compose(Pose3(Rot3::identity(), Vector3(1,0,0))); + Pose3 x0_initial = x0.compose(Pose3(Rot3::Identity(), Vector3(1,0,0))); initialEstimate.insert(P(1), p1); initialEstimate.insert(P(2), p2); initialEstimate.insert(X(0), x0_initial); diff --git a/gtsam/slam/tests/testSmartProjectionRigFactor.cpp b/gtsam/slam/tests/testSmartProjectionRigFactor.cpp index b4876b27e..b142b2009 100644 --- a/gtsam/slam/tests/testSmartProjectionRigFactor.cpp +++ b/gtsam/slam/tests/testSmartProjectionRigFactor.cpp @@ -69,7 +69,7 @@ SmartProjectionParams params( TEST(SmartProjectionRigFactor, Constructor) { using namespace vanillaRig; boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); SmartRigFactor::shared_ptr factor1( new SmartRigFactor(model, cameraRig, params)); } @@ -89,7 +89,7 @@ TEST(SmartProjectionRigFactor, Constructor2) { TEST(SmartProjectionRigFactor, Constructor3) { using namespace vanillaRig; boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); SmartRigFactor::shared_ptr factor1( new SmartRigFactor(model, cameraRig, params)); factor1->add(measurement1, x1, cameraId1); @@ -99,7 +99,7 @@ TEST(SmartProjectionRigFactor, Constructor3) { TEST(SmartProjectionRigFactor, Constructor4) { using namespace vanillaRig; boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); SmartProjectionParams params2( gtsam::HESSIAN, gtsam::ZERO_ON_DEGENERACY); // only config that works with rig factors @@ -112,7 +112,7 @@ TEST(SmartProjectionRigFactor, Constructor4) { TEST(SmartProjectionRigFactor, Equals) { using namespace vanillaRig; boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); SmartRigFactor::shared_ptr factor1( new SmartRigFactor(model, cameraRig, params)); @@ -140,7 +140,7 @@ TEST(SmartProjectionRigFactor, noiseless) { Point2 level_uv_right = level_camera_right.project(landmark1); boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); SmartRigFactor factor(model, cameraRig, params); factor.add(level_uv, x1); // both taken from the same camera @@ -197,7 +197,7 @@ TEST(SmartProjectionRigFactor, noisy) { using namespace vanillaRig; boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); // Project two landmarks into two cameras Point2 pixelError(0.2, 0.2); @@ -323,7 +323,7 @@ TEST(SmartProjectionRigFactor, smartFactorWithMultipleCameras) { Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), Point3(1, 1, 1)); Pose3 body_T_sensor2 = Pose3(Rot3::Ypr(-M_PI / 5, 0., -M_PI / 2), Point3(0, 0, 1)); - Pose3 body_T_sensor3 = Pose3::identity(); + Pose3 body_T_sensor3 = Pose3(); boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig cameraRig->push_back(Camera(body_T_sensor1, sharedK)); @@ -407,7 +407,7 @@ TEST(SmartProjectionRigFactor, 3poses_smart_projection_factor) { Point2Vector measurements_cam1, measurements_cam2, measurements_cam3; boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK2)); + cameraRig->push_back(Camera(Pose3(), sharedK2)); // Project three landmarks into three cameras projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); @@ -495,7 +495,7 @@ TEST(SmartProjectionRigFactor, Factors) { FastVector cameraIds{0, 0}; boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); SmartRigFactor::shared_ptr smartFactor1 = boost::make_shared( model, cameraRig, params); @@ -578,7 +578,7 @@ TEST(SmartProjectionRigFactor, 3poses_iterative_smart_projection_factor) { // create smart factor boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); FastVector cameraIds{0, 0, 0}; SmartRigFactor::shared_ptr smartFactor1( new SmartRigFactor(model, cameraRig, params)); @@ -646,7 +646,7 @@ TEST(SmartProjectionRigFactor, landmarkDistance) { params.setEnableEPI(false); boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); FastVector cameraIds{0, 0, 0}; SmartRigFactor::shared_ptr smartFactor1( @@ -717,7 +717,7 @@ TEST(SmartProjectionRigFactor, dynamicOutlierRejection) { params.setDynamicOutlierRejectionThreshold(dynamicOutlierRejectionThreshold); boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); FastVector cameraIds{0, 0, 0}; SmartRigFactor::shared_ptr smartFactor1( @@ -783,7 +783,7 @@ TEST(SmartProjectionRigFactor, CheckHessian) { params.setDegeneracyMode(gtsam::ZERO_ON_DEGENERACY); boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); FastVector cameraIds{0, 0, 0}; SmartRigFactor::shared_ptr smartFactor1( @@ -859,7 +859,7 @@ TEST(SmartProjectionRigFactor, Hessian) { measurements_cam1.push_back(cam2_uv1); boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK2)); + cameraRig->push_back(Camera(Pose3(), sharedK2)); FastVector cameraIds{0, 0}; SmartProjectionParams params( @@ -889,7 +889,7 @@ TEST(SmartProjectionRigFactor, Hessian) { TEST(SmartProjectionRigFactor, ConstructorWithCal3Bundler) { using namespace bundlerPose; boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedBundlerK)); + cameraRig->push_back(Camera(Pose3(), sharedBundlerK)); SmartProjectionParams params; params.setDegeneracyMode(gtsam::ZERO_ON_DEGENERACY); @@ -917,7 +917,7 @@ TEST(SmartProjectionRigFactor, Cal3Bundler) { KeyVector views{x1, x2, x3}; boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedBundlerK)); + cameraRig->push_back(Camera(Pose3(), sharedBundlerK)); FastVector cameraIds{0, 0, 0}; SmartRigFactor::shared_ptr smartFactor1( @@ -988,7 +988,7 @@ TEST(SmartProjectionRigFactor, KeyVector keys{x1, x2, x3, x1}; boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); FastVector cameraIds{0, 0, 0, 0}; SmartRigFactor::shared_ptr smartFactor1( @@ -1116,7 +1116,7 @@ TEST(SmartProjectionRigFactor, optimization_3poses_measurementsFromSamePose) { // create inputs KeyVector keys{x1, x2, x3}; boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3(), sharedK)); FastVector cameraIds{0, 0, 0}; FastVector cameraIdsRedundant{0, 0, 0, 0}; @@ -1195,11 +1195,11 @@ TEST(SmartProjectionRigFactor, timing) { // Default cameras for simple derivatives static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0)); - Rot3 R = Rot3::identity(); + Rot3 R = Rot3::Identity(); Pose3 pose1 = Pose3(R, Point3(0, 0, 0)); Pose3 pose2 = Pose3(R, Point3(1, 0, 0)); Camera cam1(pose1, sharedKSimple), cam2(pose2, sharedKSimple); - Pose3 body_P_sensorId = Pose3::identity(); + Pose3 body_P_sensorId = Pose3(); boost::shared_ptr cameraRig(new Cameras()); // single camera in the rig cameraRig->push_back(Camera(body_P_sensorId, sharedKSimple)); @@ -1267,7 +1267,7 @@ TEST(SmartProjectionFactorP, optimization_3poses_sphericalCamera) { keys.push_back(x3); boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), emptyK)); + cameraRig->push_back(Camera(Pose3(), emptyK)); SmartProjectionParams params( gtsam::HESSIAN, @@ -1330,10 +1330,10 @@ TEST(SmartProjectionFactorP, optimization_3poses_sphericalCamera) { /* *************************************************************************/ TEST(SmartProjectionFactorP, timing_sphericalCamera) { // create common data - Rot3 R = Rot3::identity(); + Rot3 R = Rot3::Identity(); Pose3 pose1 = Pose3(R, Point3(0, 0, 0)); Pose3 pose2 = Pose3(R, Point3(1, 0, 0)); - Pose3 body_P_sensorId = Pose3::identity(); + Pose3 body_P_sensorId = Pose3(); Point3 landmark1(0, 0, 10); // create spherical data @@ -1423,7 +1423,7 @@ TEST(SmartProjectionFactorP, 2poses_rankTol) { boost::shared_ptr>> cameraRig( new CameraSet>()); // single camera in the rig - cameraRig->push_back(PinholePose(Pose3::identity(), sharedK)); + cameraRig->push_back(PinholePose(Pose3(), sharedK)); SmartRigFactor::shared_ptr smartFactor1( new SmartRigFactor(model, cameraRig, params)); @@ -1461,7 +1461,7 @@ TEST(SmartProjectionFactorP, 2poses_rankTol) { boost::shared_ptr>> cameraRig( new CameraSet>()); // single camera in the rig - cameraRig->push_back(PinholePose(Pose3::identity(), canonicalK)); + cameraRig->push_back(PinholePose(Pose3(), canonicalK)); SmartRigFactor::shared_ptr smartFactor1( new SmartRigFactor(model, cameraRig, params)); @@ -1498,7 +1498,7 @@ TEST(SmartProjectionFactorP, 2poses_rankTol) { boost::shared_ptr>> cameraRig( new CameraSet>()); // single camera in the rig - cameraRig->push_back(PinholePose(Pose3::identity(), canonicalK)); + cameraRig->push_back(PinholePose(Pose3(), canonicalK)); SmartRigFactor::shared_ptr smartFactor1( new SmartRigFactor(model, cameraRig, params)); @@ -1537,7 +1537,7 @@ TEST(SmartProjectionFactorP, 2poses_sphericalCamera_rankTol) { boost::shared_ptr> cameraRig( new CameraSet()); // single camera in the rig - cameraRig->push_back(SphericalCamera(Pose3::identity(), emptyK)); + cameraRig->push_back(SphericalCamera(Pose3(), emptyK)); // TRIANGULATION TEST WITH DEFAULT RANK TOL { // rankTol = 1 or 0.1 gives a degenerate point, which is undesirable for a diff --git a/gtsam_unstable/dynamics/tests/testVelocityConstraint.cpp b/gtsam_unstable/dynamics/tests/testVelocityConstraint.cpp index 6d8206177..b60b2f3ae 100644 --- a/gtsam_unstable/dynamics/tests/testVelocityConstraint.cpp +++ b/gtsam_unstable/dynamics/tests/testVelocityConstraint.cpp @@ -15,9 +15,9 @@ const Key x1 = 1, x2 = 2; const double dt = 1.0; PoseRTV origin, - pose1(Point3(0.5, 0.0, 0.0), Rot3::identity(), Velocity3(1.0, 0.0, 0.0)), + pose1(Point3(0.5, 0.0, 0.0), Rot3(), Velocity3(1.0, 0.0, 0.0)), pose1a(Point3(0.5, 0.0, 0.0)), - pose2(Point3(1.5, 0.0, 0.0), Rot3::identity(), Velocity3(1.0, 0.0, 0.0)); + pose2(Point3(1.5, 0.0, 0.0), Rot3(), Velocity3(1.0, 0.0, 0.0)); /* ************************************************************************* */ TEST( testVelocityConstraint, trapezoidal ) { diff --git a/gtsam_unstable/examples/GncPoseAveragingExample.cpp b/gtsam_unstable/examples/GncPoseAveragingExample.cpp index ad96934c8..730d342f0 100644 --- a/gtsam_unstable/examples/GncPoseAveragingExample.cpp +++ b/gtsam_unstable/examples/GncPoseAveragingExample.cpp @@ -53,7 +53,7 @@ int main(int argc, char** argv){ normal_distribution normalInliers(0.0, 0.05); Values initial; - initial.insert(0, Pose3::identity()); // identity pose as initialization + initial.insert(0, Pose3()); // identity pose as initialization // create ground truth pose Vector6 poseGtVector; diff --git a/gtsam_unstable/examples/ISAM2_SmartFactorStereo_IMU.cpp b/gtsam_unstable/examples/ISAM2_SmartFactorStereo_IMU.cpp index c0a102d11..16eda23d4 100644 --- a/gtsam_unstable/examples/ISAM2_SmartFactorStereo_IMU.cpp +++ b/gtsam_unstable/examples/ISAM2_SmartFactorStereo_IMU.cpp @@ -129,8 +129,8 @@ int main(int argc, char* argv[]) { // Pose prior - at identity auto priorPoseNoise = noiseModel::Diagonal::Sigmas( (Vector(6) << Vector3::Constant(0.1), Vector3::Constant(0.1)).finished()); - graph.addPrior(X(1), Pose3::identity(), priorPoseNoise); - initialEstimate.insert(X(0), Pose3::identity()); + graph.addPrior(X(1), Pose3::Identity(), priorPoseNoise); + initialEstimate.insert(X(0), Pose3::Identity()); // Bias prior graph.addPrior(B(1), imu.priorImuBias, @@ -157,7 +157,7 @@ int main(int argc, char* argv[]) { if (frame != lastFrame || in.eof()) { cout << "Running iSAM for frame: " << lastFrame << "\n"; - initialEstimate.insert(X(lastFrame), Pose3::identity()); + initialEstimate.insert(X(lastFrame), Pose3::Identity()); initialEstimate.insert(V(lastFrame), Vector3(0, 0, 0)); initialEstimate.insert(B(lastFrame), imu.prevBias); diff --git a/gtsam_unstable/geometry/Pose3Upright.h b/gtsam_unstable/geometry/Pose3Upright.h index d833c9cf4..037a6cc9d 100644 --- a/gtsam_unstable/geometry/Pose3Upright.h +++ b/gtsam_unstable/geometry/Pose3Upright.h @@ -95,7 +95,7 @@ public: /// @{ /// identity for group operation - static Pose3Upright identity() { return Pose3Upright(); } + static Pose3Upright Identity() { return Pose3Upright(); } /// inverse transformation with derivatives Pose3Upright inverse(boost::optional H1=boost::none) const; diff --git a/gtsam_unstable/gtsam_unstable.i b/gtsam_unstable/gtsam_unstable.i index 08cd45e18..c6dbd4ab6 100644 --- a/gtsam_unstable/gtsam_unstable.i +++ b/gtsam_unstable/gtsam_unstable.i @@ -130,7 +130,7 @@ class Pose3Upright { gtsam::Pose3Upright retract(Vector v) const; Vector localCoordinates(const gtsam::Pose3Upright& p2) const; - static gtsam::Pose3Upright identity(); + static gtsam::Pose3Upright Identity(); gtsam::Pose3Upright inverse() const; gtsam::Pose3Upright compose(const gtsam::Pose3Upright& p2) const; gtsam::Pose3Upright between(const gtsam::Pose3Upright& p2) const; diff --git a/gtsam_unstable/slam/tests/testLocalOrientedPlane3Factor.cpp b/gtsam_unstable/slam/tests/testLocalOrientedPlane3Factor.cpp index b97d56c7e..c2b526ecd 100644 --- a/gtsam_unstable/slam/tests/testLocalOrientedPlane3Factor.cpp +++ b/gtsam_unstable/slam/tests/testLocalOrientedPlane3Factor.cpp @@ -44,8 +44,8 @@ TEST(LocalOrientedPlane3Factor, lm_translation_error) { // Init pose and prior. Pose Prior is needed since a single plane measurement // does not fully constrain the pose - Pose3 init_pose = Pose3::identity(); - Pose3 anchor_pose = Pose3::identity(); + Pose3 init_pose = Pose3::Identity(); + Pose3 anchor_pose = Pose3::Identity(); graph.addPrior(X(0), init_pose, noiseModel::Isotropic::Sigma(6, 0.001)); graph.addPrior(X(1), anchor_pose, noiseModel::Isotropic::Sigma(6, 0.001)); @@ -89,7 +89,7 @@ TEST (LocalOrientedPlane3Factor, lm_rotation_error) { // Init pose and prior. Pose Prior is needed since a single plane measurement // does not fully constrain the pose - Pose3 init_pose = Pose3::identity(); + Pose3 init_pose = Pose3::Identity(); graph.addPrior(X(0), init_pose, noiseModel::Isotropic::Sigma(6, 0.001)); // Add two landmark measurements, differing in angle @@ -180,8 +180,8 @@ TEST(LocalOrientedPlane3Factor, Issue561Simplified) { NonlinearFactorGraph graph; // Setup prior factors - Pose3 x0(Rot3::identity(), Vector3(100, 30, 10)); // the "sensor pose" - Pose3 x1(Rot3::identity(), Vector3(90, 40, 5) ); // the "anchor pose" + Pose3 x0(Rot3::Identity(), Vector3(100, 30, 10)); // the "sensor pose" + Pose3 x1(Rot3::Identity(), Vector3(90, 40, 5) ); // the "anchor pose" auto x0_noise = noiseModel::Isotropic::Sigma(6, 0.01); auto x1_noise = noiseModel::Isotropic::Sigma(6, 0.01); @@ -213,7 +213,7 @@ TEST(LocalOrientedPlane3Factor, Issue561Simplified) { // Initial values // Just offset the initial pose by 1m. This is what we are trying to optimize. Values initialEstimate; - Pose3 x0_initial = x0.compose(Pose3(Rot3::identity(), Vector3(1, 0, 0))); + Pose3 x0_initial = x0.compose(Pose3(Rot3::Identity(), Vector3(1, 0, 0))); initialEstimate.insert(P(1), p1_in_x1); initialEstimate.insert(P(2), p2_in_x1); initialEstimate.insert(X(0), x0_initial); diff --git a/gtsam_unstable/slam/tests/testPartialPriorFactor.cpp b/gtsam_unstable/slam/tests/testPartialPriorFactor.cpp index 7ba8a0d04..615ed13aa 100644 --- a/gtsam_unstable/slam/tests/testPartialPriorFactor.cpp +++ b/gtsam_unstable/slam/tests/testPartialPriorFactor.cpp @@ -173,7 +173,7 @@ TEST(PartialPriorFactor, Constructors3) { /* ************************************************************************* */ TEST(PartialPriorFactor, JacobianAtIdentity3) { Key poseKey(1); - Pose3 measurement = Pose3::identity(); + Pose3 measurement = Pose3::Identity(); SharedNoiseModel model = NM::Isotropic::Sigma(1, 0.25); TestPartialPriorFactor3 factor(poseKey, kIndexTy, measurement.translation().y(), model); diff --git a/gtsam_unstable/slam/tests/testPoseToPointFactor.cpp b/gtsam_unstable/slam/tests/testPoseToPointFactor.cpp index 5aaaaec53..7345e2308 100644 --- a/gtsam_unstable/slam/tests/testPoseToPointFactor.cpp +++ b/gtsam_unstable/slam/tests/testPoseToPointFactor.cpp @@ -16,7 +16,7 @@ using namespace gtsam::noiseModel; /* ************************************************************************* */ // Verify zero error when there is no noise TEST(PoseToPointFactor, errorNoiseless_2D) { - Pose2 pose = Pose2::identity(); + Pose2 pose = Pose2::Identity(); Point2 point(1.0, 2.0); Point2 noise(0.0, 0.0); Point2 measured = point + noise; @@ -33,7 +33,7 @@ TEST(PoseToPointFactor, errorNoiseless_2D) { /* ************************************************************************* */ // Verify expected error in test scenario TEST(PoseToPointFactor, errorNoise_2D) { - Pose2 pose = Pose2::identity(); + Pose2 pose = Pose2::Identity(); Point2 point(1.0, 2.0); Point2 noise(-1.0, 0.5); Point2 measured = point + noise; @@ -86,7 +86,7 @@ TEST(PoseToPointFactor, jacobian_2D) { /* ************************************************************************* */ // Verify zero error when there is no noise TEST(PoseToPointFactor, errorNoiseless_3D) { - Pose3 pose = Pose3::identity(); + Pose3 pose = Pose3::Identity(); Point3 point(1.0, 2.0, 3.0); Point3 noise(0.0, 0.0, 0.0); Point3 measured = point + noise; @@ -103,7 +103,7 @@ TEST(PoseToPointFactor, errorNoiseless_3D) { /* ************************************************************************* */ // Verify expected error in test scenario TEST(PoseToPointFactor, errorNoise_3D) { - Pose3 pose = Pose3::identity(); + Pose3 pose = Pose3::Identity(); Point3 point(1.0, 2.0, 3.0); Point3 noise(-1.0, 0.5, 0.3); Point3 measured = point + noise; diff --git a/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp b/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp index b5962d777..b88a27c6c 100644 --- a/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp +++ b/gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp @@ -88,7 +88,7 @@ typedef SmartProjectionPoseFactorRollingShutter> TEST(SmartProjectionPoseFactorRollingShutter, Constructor) { using namespace vanillaPoseRS; boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS::shared_ptr factor1( new SmartFactorRS(model, cameraRig, params)); } @@ -97,7 +97,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, Constructor) { TEST(SmartProjectionPoseFactorRollingShutter, Constructor2) { using namespace vanillaPoseRS; boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); params.setRankTolerance(rankTol); SmartFactorRS factor1(model, cameraRig, params); } @@ -106,7 +106,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, Constructor2) { TEST(SmartProjectionPoseFactorRollingShutter, add) { using namespace vanillaPoseRS; boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS::shared_ptr factor1( new SmartFactorRS(model, cameraRig, params)); factor1->add(measurement1, x1, x2, interp_factor); @@ -230,7 +230,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, noiselessErrorAndJacobians) { // Project two landmarks into two cameras Point2 level_uv = cam1.project(landmark1); Point2 level_uv_right = cam2.project(landmark1); - Pose3 body_P_sensorId = Pose3::identity(); + Pose3 body_P_sensorId = Pose3::Identity(); boost::shared_ptr cameraRig(new Cameras()); cameraRig->push_back(Camera(body_P_sensorId, sharedK)); @@ -405,7 +405,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses) { interp_factors.push_back(interp_factor3); boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS::shared_ptr smartFactor1( new SmartFactorRS(model, cameraRig, params)); @@ -480,7 +480,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_multiCam) { boost::shared_ptr cameraRig(new Cameras()); cameraRig->push_back(Camera(body_P_sensor, sharedK)); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS::shared_ptr smartFactor1( new SmartFactorRS(model, cameraRig, params)); @@ -633,11 +633,11 @@ TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) { // Default cameras for simple derivatives static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0)); - Rot3 R = Rot3::identity(); + Rot3 R = Rot3::Identity(); Pose3 pose1 = Pose3(R, Point3(0, 0, 0)); Pose3 pose2 = Pose3(R, Point3(1, 0, 0)); Camera cam1(pose1, sharedKSimple), cam2(pose2, sharedKSimple); - Pose3 body_P_sensorId = Pose3::identity(); + Pose3 body_P_sensorId = Pose3::Identity(); // one landmarks 1m in front of camera Point3 landmark1(0, 0, 10); @@ -747,7 +747,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_EPI) { params.setEnableEPI(true); boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS smartFactor1(model, cameraRig, params); smartFactor1.add(measurements_lmk1, key_pairs, interp_factors); @@ -816,7 +816,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, params.setEnableEPI(false); boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS smartFactor1(model, cameraRig, params); smartFactor1.add(measurements_lmk1, key_pairs, interp_factors); @@ -894,7 +894,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, params.setEnableEPI(false); boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS::shared_ptr smartFactor1( new SmartFactorRS(model, cameraRig, params)); @@ -961,7 +961,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, interp_factors.push_back(interp_factor3); boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS::shared_ptr smartFactor1( new SmartFactorRS(model, cameraRig, params)); @@ -1102,7 +1102,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, interp_factors.push_back(interp_factor1); boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS::shared_ptr smartFactor1( new SmartFactorRS(model, cameraRig, params)); @@ -1261,7 +1261,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, interp_factors.at(0)); // we readd the first interp factor boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), sharedK)); + cameraRig->push_back(Camera(Pose3::Identity(), sharedK)); SmartFactorRS::shared_ptr smartFactor1( new SmartFactorRS(model, cameraRig, params)); @@ -1331,11 +1331,11 @@ TEST(SmartProjectionPoseFactorRollingShutter, timing) { gtsam::HESSIAN, gtsam::ZERO_ON_DEGENERACY); // only config that works with RS factors - Rot3 R = Rot3::identity(); + Rot3 R = Rot3::Identity(); Pose3 pose1 = Pose3(R, Point3(0, 0, 0)); Pose3 pose2 = Pose3(R, Point3(1, 0, 0)); Camera cam1(pose1, sharedKSimple), cam2(pose2, sharedKSimple); - Pose3 body_P_sensorId = Pose3::identity(); + Pose3 body_P_sensorId = Pose3::Identity(); // one landmarks 1m in front of camera Point3 landmark1(0, 0, 10); @@ -1431,7 +1431,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, params.setRankTolerance(0.1); boost::shared_ptr cameraRig(new Cameras()); - cameraRig->push_back(Camera(Pose3::identity(), emptyK)); + cameraRig->push_back(Camera(Pose3::Identity(), emptyK)); SmartFactorRS_spherical::shared_ptr smartFactor1( new SmartFactorRS_spherical(model, cameraRig, params)); diff --git a/gtsam_unstable/slam/tests/testSmartStereoFactor_iSAM2.cpp b/gtsam_unstable/slam/tests/testSmartStereoFactor_iSAM2.cpp index 107defb4c..98b18371f 100644 --- a/gtsam_unstable/slam/tests/testSmartStereoFactor_iSAM2.cpp +++ b/gtsam_unstable/slam/tests/testSmartStereoFactor_iSAM2.cpp @@ -198,10 +198,10 @@ TEST(testISAM2SmartFactor, Stereo_Batch) { // prior, for the first keyframe: if (kf_id == 0) { - batch_graph.addPrior(X(kf_id), Pose3::identity(), priorPoseNoise); + batch_graph.addPrior(X(kf_id), Pose3::Identity(), priorPoseNoise); } - batch_values.insert(X(kf_id), Pose3::identity()); + batch_values.insert(X(kf_id), Pose3::Identity()); } LevenbergMarquardtParams parameters; @@ -267,7 +267,7 @@ TEST(testISAM2SmartFactor, Stereo_iSAM2) { // Storage of smart factors: std::map smartFactors; - Pose3 lastKeyframePose = Pose3::identity(); + Pose3 lastKeyframePose = Pose3::Identity(); // Run one timestep at once: for (const auto &entries : dataset) { @@ -307,7 +307,7 @@ TEST(testISAM2SmartFactor, Stereo_iSAM2) { // prior, for the first keyframe: if (kf_id == 0) { - newFactors.addPrior(X(kf_id), Pose3::identity(), priorPoseNoise); + newFactors.addPrior(X(kf_id), Pose3::Identity(), priorPoseNoise); } // 2) Run iSAM2: diff --git a/gtsam_unstable/slam/tests/testSmartStereoProjectionFactorPP.cpp b/gtsam_unstable/slam/tests/testSmartStereoProjectionFactorPP.cpp index c71c19038..1dbd25666 100644 --- a/gtsam_unstable/slam/tests/testSmartStereoProjectionFactorPP.cpp +++ b/gtsam_unstable/slam/tests/testSmartStereoProjectionFactorPP.cpp @@ -181,8 +181,8 @@ TEST_UNSAFE( SmartStereoProjectionFactorPP, noiseless_error_identityExtrinsics ) Values values; values.insert(x1, w_Pose_cam1); values.insert(x2, w_Pose_cam2); - values.insert(body_P_cam1_key, Pose3::identity()); - values.insert(body_P_cam2_key, Pose3::identity()); + values.insert(body_P_cam1_key, Pose3::Identity()); + values.insert(body_P_cam2_key, Pose3::Identity()); SmartStereoProjectionFactorPP factor1(model); factor1.add(cam1_uv, x1, body_P_cam1_key, K2); @@ -426,7 +426,7 @@ TEST( SmartStereoProjectionFactorPP, 3poses_optimization_multipleExtrinsics ) { // Values Pose3 body_Pose_cam1 = Pose3(Rot3::Ypr(-M_PI, 1., 0.1),Point3(0, 1, 0)); Pose3 body_Pose_cam2 = Pose3(Rot3::Ypr(-M_PI / 4, 0.1, 1.0),Point3(1, 1, 1)); - Pose3 body_Pose_cam3 = Pose3::identity(); + Pose3 body_Pose_cam3 = Pose3::Identity(); Pose3 w_Pose_body1 = w_Pose_cam1.compose(body_Pose_cam1.inverse()); Pose3 w_Pose_body2 = w_Pose_cam2.compose(body_Pose_cam2.inverse()); Pose3 w_Pose_body3 = w_Pose_cam3.compose(body_Pose_cam3.inverse()); @@ -1147,7 +1147,7 @@ TEST( SmartStereoProjectionFactorPP, landmarkDistance ) { graph.push_back(smartFactor3); graph.addPrior(x1, pose1, noisePrior); graph.addPrior(x2, pose2, noisePrior); - graph.addPrior(body_P_cam_key, Pose3::identity(), noisePrior); + graph.addPrior(body_P_cam_key, Pose3::Identity(), noisePrior); // Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), Point3(0.5,0.1,0.3)); // noise from regular projection factor test below Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), @@ -1156,7 +1156,7 @@ TEST( SmartStereoProjectionFactorPP, landmarkDistance ) { values.insert(x1, pose1); values.insert(x2, pose2); values.insert(x3, pose3 * noise_pose); - values.insert(body_P_cam_key, Pose3::identity()); + values.insert(body_P_cam_key, Pose3::Identity()); // All smart factors are disabled and pose should remain where it is Values result; @@ -1245,7 +1245,7 @@ TEST( SmartStereoProjectionFactorPP, dynamicOutlierRejection ) { values.insert(x1, pose1); values.insert(x2, pose2); values.insert(x3, pose3); - values.insert(body_P_cam_key, Pose3::identity()); + values.insert(body_P_cam_key, Pose3::Identity()); EXPECT_DOUBLES_EQUAL(0, smartFactor1->error(values), 1e-9); EXPECT_DOUBLES_EQUAL(0, smartFactor2->error(values), 1e-9); @@ -1267,7 +1267,7 @@ TEST( SmartStereoProjectionFactorPP, dynamicOutlierRejection ) { Values result; LevenbergMarquardtOptimizer optimizer(graph, values, lm_params); result = optimizer.optimize(); - EXPECT(assert_equal(Pose3::identity(), result.at(body_P_cam_key))); + EXPECT(assert_equal(Pose3::Identity(), result.at(body_P_cam_key))); } /* ************************************************************************* */ diff --git a/tests/testExpressionFactor.cpp b/tests/testExpressionFactor.cpp index 6d23144aa..5b27eea4d 100644 --- a/tests/testExpressionFactor.cpp +++ b/tests/testExpressionFactor.cpp @@ -382,7 +382,7 @@ TEST(ExpressionFactor, compose2) { TEST(ExpressionFactor, compose3) { // Create expression - Rot3_ R1(Rot3::identity()), R2(3); + Rot3_ R1(Rot3::Identity()), R2(3); Rot3_ R3 = R1 * R2; // Create factor diff --git a/tests/testManifold.cpp b/tests/testManifold.cpp index ac3a09e36..a05c4b621 100644 --- a/tests/testManifold.cpp +++ b/tests/testManifold.cpp @@ -139,7 +139,7 @@ TEST(Manifold, DefaultChart) { Vector v3(3); v3 << 1, 1, 1; - Rot3 I = Rot3::identity(); + Rot3 I = Rot3::Identity(); Rot3 R = I.retract(v3); //DefaultChart chart5; EXPECT(assert_equal(v3, traits::Local(I, R))); diff --git a/timing/timeShonanFactor.cpp b/timing/timeShonanFactor.cpp index 207d54a4d..60dd6f47a 100644 --- a/timing/timeShonanFactor.cpp +++ b/timing/timeShonanFactor.cpp @@ -62,9 +62,9 @@ int main(int argc, char* argv[]) { // Build graph NonlinearFactorGraph graph; - // graph.add(NonlinearEquality(0, SOn::identity(4))); + // graph.add(NonlinearEquality(0, SOn::Identity(4))); auto priorModel = noiseModel::Isotropic::Sigma(6, 10000); - graph.add(PriorFactor(0, SOn::identity(4), priorModel)); + graph.add(PriorFactor(0, SOn::Identity(4), priorModel)); auto G = boost::make_shared(SOn::VectorizedGenerators(4)); for (const auto &m : measurements) { const auto &keys = m.keys(); @@ -92,7 +92,7 @@ int main(int argc, char* argv[]) { for (size_t i = 0; i < 100; i++) { gttic_(optimize); Values initial; - initial.insert(0, SOn::identity(4)); + initial.insert(0, SOn::Identity(4)); for (size_t j = 1; j < poses.size(); j++) { initial.insert(j, SOn::Random(rng, 4)); }