Merge branch 'develop' into feature/cameraTemplateForAllSmartFactors
commit
459c8f93a5
|
@ -113,9 +113,9 @@ function test ()
|
|||
|
||||
# Actual testing
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
make -j$(nproc)
|
||||
make -j$(nproc) check
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu)
|
||||
make -j$(sysctl -n hw.physicalcpu) check
|
||||
fi
|
||||
|
||||
finish
|
||||
|
|
|
@ -105,6 +105,11 @@ endif()
|
|||
GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
|
||||
export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake)
|
||||
|
||||
if (GTSAM_BUILD_UNSTABLE)
|
||||
GtsamMakeConfigFile(GTSAM_UNSTABLE "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
|
||||
export(TARGETS ${GTSAM_UNSTABLE_EXPORTED_TARGETS} FILE GTSAM_UNSTABLE-exports.cmake)
|
||||
endif()
|
||||
|
||||
# Check for doxygen availability - optional dependency
|
||||
find_package(Doxygen)
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@ function(GtsamMakeConfigFile PACKAGE_NAME)
|
|||
# here.
|
||||
if(NOT DEFINED ${PACKAGE_NAME}_VERSION AND DEFINED ${PACKAGE_NAME}_VERSION_STRING)
|
||||
set(${PACKAGE_NAME}_VERSION ${${PACKAGE_NAME}_VERSION_STRING})
|
||||
elseif(NOT DEFINED ${PACKAGE_NAME}_VERSION_STRING)
|
||||
set(${PACKAGE_NAME}_VERSION ${GTSAM_VERSION_STRING})
|
||||
endif()
|
||||
|
||||
# Version file
|
||||
|
|
|
@ -14,20 +14,21 @@ if(GTSAM_UNSTABLE_AVAILABLE)
|
|||
option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
|
||||
option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
|
||||
endif()
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF)
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
|
||||
endif()
|
||||
|
|
|
@ -7,9 +7,9 @@ if (GTSAM_WITH_TBB)
|
|||
if(TBB_FOUND)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
|
||||
if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
endif()
|
||||
# if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
# message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
# endif()
|
||||
|
||||
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
|
||||
set(TBB_GREATER_EQUAL_2020 1)
|
||||
|
|
388
doc/math.lyx
388
doc/math.lyx
|
@ -5082,6 +5082,394 @@ reference "ex:projection"
|
|||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of Adjoint
|
||||
\begin_inset CommandInset label
|
||||
LatexCommand label
|
||||
name "subsec:pose3_adjoint_deriv"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Consider
|
||||
\begin_inset Formula $f:SE(3)\times\mathbb{R}^{6}\rightarrow\mathbb{R}^{6}$
|
||||
\end_inset
|
||||
|
||||
is defined as
|
||||
\begin_inset Formula $f(T,\xi_{b})=Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
The derivative is notated (see Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
):
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
Df_{(T,\xi_{b})}(\xi,\delta\xi_{b})=D_{1}f_{(T,\xi_{b})}(\xi)+D_{2}f_{(T,\xi_{b})}(\delta\xi_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
First, computing
|
||||
\begin_inset Formula $D_{2}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
is easy, as its matrix is simply
|
||||
\begin_inset Formula $Ad_{T}$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
f(T,\xi_{b}+\delta\xi_{b})=Ad_{T}(\widehat{\xi_{b}+\delta\xi_{b}})=Ad_{T}(\hat{\xi}_{b})+Ad_{T}(\delta\hat{\xi}_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{2}f_{(T,\xi_{b})}(\xi_{b})=Ad_{T}
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
We will derive
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi)$
|
||||
\end_inset
|
||||
|
||||
using two approaches.
|
||||
In the first, we'll define
|
||||
\begin_inset Formula $g(T,\xi)\triangleq T\exp\hat{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
From Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{2}g_{(T,\xi)}(\xi) & =T\hat{\xi}\\
|
||||
D_{2}g_{(T,\xi)}^{-1}(\xi) & =-\hat{\xi}T^{-1}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Now we can use the definition of the Adjoint representation
|
||||
\begin_inset Formula $Ad_{g}\hat{\xi}=g\hat{\xi}g^{-1}$
|
||||
\end_inset
|
||||
|
||||
(aka conjugation by
|
||||
\begin_inset Formula $g$
|
||||
\end_inset
|
||||
|
||||
) then apply product rule and simplify:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{1}f_{(T,\xi_{b})}(\xi)=D_{1}\left(Ad_{T\exp(\hat{\xi})}\hat{\xi}_{b}\right)(\xi) & =D_{1}\left(g\hat{\xi}_{b}g^{-1}\right)(\xi)\\
|
||||
& =\left(D_{2}g_{(T,\xi)}(\xi)\right)\hat{\xi}_{b}g^{-1}(T,0)+g(T,0)\hat{\xi}_{b}\left(D_{2}g_{(T,\xi)}^{-1}(\xi)\right)\\
|
||||
& =T\hat{\xi}\hat{\xi}_{b}T^{-1}-T\hat{\xi}_{b}\hat{\xi}T^{-1}\\
|
||||
& =T\left(\hat{\xi}\hat{\xi}_{b}-\hat{\xi}_{b}\hat{\xi}\right)T^{-1}\\
|
||||
& =Ad_{T}(ad_{\hat{\xi}}\hat{\xi}_{b})\\
|
||||
& =-Ad_{T}(ad_{\hat{\xi}_{b}}\hat{\xi})\\
|
||||
D_{1}F_{(T,\xi_{b})} & =-(Ad_{T})(ad_{\hat{\xi}_{b}})
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Where
|
||||
\begin_inset Formula $ad_{\hat{\xi}}:\mathfrak{g}\rightarrow\mathfrak{g}$
|
||||
\end_inset
|
||||
|
||||
is the adjoint map of the lie algebra.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The second, perhaps more intuitive way of deriving
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
, would be to use the fact that the derivative at the origin
|
||||
\begin_inset Formula $D_{1}Ad_{I}\hat{\xi}_{b}=ad_{\hat{\xi}_{b}}$
|
||||
\end_inset
|
||||
|
||||
by definition of the adjoint
|
||||
\begin_inset Formula $ad_{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
Then applying the property
|
||||
\begin_inset Formula $Ad_{AB}=Ad_{A}Ad_{B}$
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{1}Ad_{T}\hat{\xi}_{b}(\xi)=D_{1}Ad_{T*I}\hat{\xi}_{b}(\xi)=Ad_{T}\left(D_{1}Ad_{I}\hat{\xi}_{b}(\xi)\right)=Ad_{T}\left(ad_{\hat{\xi}}(\hat{\xi}_{b})\right)=-Ad_{T}\left(ad_{\hat{\xi}_{b}}(\hat{\xi})\right)
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of AdjointTranspose
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The transpose of the Adjoint,
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}:\mathfrak{g^{*}\rightarrow g^{*}}$
|
||||
\end_inset
|
||||
|
||||
, is useful as a way to change the reference frame of vectors in the dual
|
||||
space
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
(note the
|
||||
\begin_inset Formula $^{*}$
|
||||
\end_inset
|
||||
|
||||
denoting that we are now in the dual space)
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
.
|
||||
To be more concrete, where
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
as
|
||||
\begin_inset Formula $Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\emph on
|
||||
twist
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame,
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph on
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
wrench
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
.
|
||||
It's difficult to apply a similar derivation as in Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "subsec:pose3_adjoint_deriv"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
for the derivative of
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
because
|
||||
\begin_inset Formula $Ad_{T}^{T}$
|
||||
\end_inset
|
||||
|
||||
cannot be naturally defined as a conjugation, so we resort to crunching
|
||||
through the algebra.
|
||||
The details are omitted but the result is a form that vaguely resembles
|
||||
(but does not exactly match)
|
||||
\begin_inset Formula $ad(Ad_{T}^{T}\hat{\xi}_{b}^{*})$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
\begin{bmatrix}\omega_{T}\\
|
||||
v_{T}
|
||||
\end{bmatrix}^{*} & \triangleq Ad_{T}^{T}\hat{\xi}_{b}^{*}\\
|
||||
D_{1}Ad_{T}^{T}\hat{\xi}_{b}^{*}(\xi) & =\begin{bmatrix}\hat{\omega}_{T} & \hat{v}_{T}\\
|
||||
\hat{v}_{T} & 0
|
||||
\end{bmatrix}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
|
|
BIN
doc/math.pdf
BIN
doc/math.pdf
Binary file not shown.
|
@ -11,21 +11,23 @@
|
|||
|
||||
/**
|
||||
* @file IMUKittiExampleGPS
|
||||
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
|
||||
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics
|
||||
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI
|
||||
* VISION BENCHMARK SUITE
|
||||
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ
|
||||
* Electronics
|
||||
*/
|
||||
|
||||
// GTSAM related includes.
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||
#include <gtsam/navigation/GPSFactor.h>
|
||||
#include <gtsam/navigation/ImuFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
#include <gtsam/nonlinear/ISAM2Params.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
|
@ -34,35 +36,35 @@
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
|
||||
struct KittiCalibration {
|
||||
double body_ptx;
|
||||
double body_pty;
|
||||
double body_ptz;
|
||||
double body_prx;
|
||||
double body_pry;
|
||||
double body_prz;
|
||||
double accelerometer_sigma;
|
||||
double gyroscope_sigma;
|
||||
double integration_sigma;
|
||||
double accelerometer_bias_sigma;
|
||||
double gyroscope_bias_sigma;
|
||||
double average_delta_t;
|
||||
double body_ptx;
|
||||
double body_pty;
|
||||
double body_ptz;
|
||||
double body_prx;
|
||||
double body_pry;
|
||||
double body_prz;
|
||||
double accelerometer_sigma;
|
||||
double gyroscope_sigma;
|
||||
double integration_sigma;
|
||||
double accelerometer_bias_sigma;
|
||||
double gyroscope_bias_sigma;
|
||||
double average_delta_t;
|
||||
};
|
||||
|
||||
struct ImuMeasurement {
|
||||
double time;
|
||||
double dt;
|
||||
Vector3 accelerometer;
|
||||
Vector3 gyroscope; // omega
|
||||
double time;
|
||||
double dt;
|
||||
Vector3 accelerometer;
|
||||
Vector3 gyroscope; // omega
|
||||
};
|
||||
|
||||
struct GpsMeasurement {
|
||||
double time;
|
||||
Vector3 position; // x,y,z
|
||||
double time;
|
||||
Vector3 position; // x,y,z
|
||||
};
|
||||
|
||||
const string output_filename = "IMUKittiExampleGPSResults.csv";
|
||||
|
@ -70,290 +72,313 @@ const string output_filename = "IMUKittiExampleGPSResults.csv";
|
|||
void loadKittiData(KittiCalibration& kitti_calibration,
|
||||
vector<ImuMeasurement>& imu_measurements,
|
||||
vector<GpsMeasurement>& gps_measurements) {
|
||||
string line;
|
||||
string line;
|
||||
|
||||
// Read IMU metadata and compute relative sensor pose transforms
|
||||
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
|
||||
// AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
|
||||
string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||
// Read IMU metadata and compute relative sensor pose transforms
|
||||
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma
|
||||
// GyroscopeSigma IntegrationSigma AccelerometerBiasSigma GyroscopeBiasSigma
|
||||
// AverageDeltaT
|
||||
string imu_metadata_file =
|
||||
findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||
|
||||
printf("-- Reading sensor metadata\n");
|
||||
printf("-- Reading sensor metadata\n");
|
||||
|
||||
getline(imu_metadata, line, '\n'); // ignore the first line
|
||||
getline(imu_metadata, line, '\n'); // ignore the first line
|
||||
|
||||
// Load Kitti calibration
|
||||
getline(imu_metadata, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&kitti_calibration.body_ptx,
|
||||
&kitti_calibration.body_pty,
|
||||
&kitti_calibration.body_ptz,
|
||||
&kitti_calibration.body_prx,
|
||||
&kitti_calibration.body_pry,
|
||||
&kitti_calibration.body_prz,
|
||||
&kitti_calibration.accelerometer_sigma,
|
||||
&kitti_calibration.gyroscope_sigma,
|
||||
&kitti_calibration.integration_sigma,
|
||||
&kitti_calibration.accelerometer_bias_sigma,
|
||||
&kitti_calibration.gyroscope_bias_sigma,
|
||||
&kitti_calibration.average_delta_t);
|
||||
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||
kitti_calibration.body_ptx,
|
||||
kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry,
|
||||
kitti_calibration.body_prz,
|
||||
kitti_calibration.accelerometer_sigma,
|
||||
kitti_calibration.gyroscope_sigma,
|
||||
kitti_calibration.integration_sigma,
|
||||
kitti_calibration.accelerometer_bias_sigma,
|
||||
kitti_calibration.gyroscope_bias_sigma,
|
||||
kitti_calibration.average_delta_t);
|
||||
// Load Kitti calibration
|
||||
getline(imu_metadata, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&kitti_calibration.body_ptx, &kitti_calibration.body_pty,
|
||||
&kitti_calibration.body_ptz, &kitti_calibration.body_prx,
|
||||
&kitti_calibration.body_pry, &kitti_calibration.body_prz,
|
||||
&kitti_calibration.accelerometer_sigma,
|
||||
&kitti_calibration.gyroscope_sigma,
|
||||
&kitti_calibration.integration_sigma,
|
||||
&kitti_calibration.accelerometer_bias_sigma,
|
||||
&kitti_calibration.gyroscope_bias_sigma,
|
||||
&kitti_calibration.average_delta_t);
|
||||
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||
kitti_calibration.body_ptx, kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz, kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry, kitti_calibration.body_prz,
|
||||
kitti_calibration.accelerometer_sigma,
|
||||
kitti_calibration.gyroscope_sigma, kitti_calibration.integration_sigma,
|
||||
kitti_calibration.accelerometer_bias_sigma,
|
||||
kitti_calibration.gyroscope_bias_sigma,
|
||||
kitti_calibration.average_delta_t);
|
||||
|
||||
// Read IMU data
|
||||
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
|
||||
printf("-- Reading IMU measurements from file\n");
|
||||
{
|
||||
ifstream imu_data(imu_data_file.c_str());
|
||||
getline(imu_data, line, '\n'); // ignore the first line
|
||||
// Read IMU data
|
||||
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
|
||||
printf("-- Reading IMU measurements from file\n");
|
||||
{
|
||||
ifstream imu_data(imu_data_file.c_str());
|
||||
getline(imu_data, line, '\n'); // ignore the first line
|
||||
|
||||
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0;
|
||||
while (!imu_data.eof()) {
|
||||
getline(imu_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&time, &dt,
|
||||
&acc_x, &acc_y, &acc_z,
|
||||
&gyro_x, &gyro_y, &gyro_z);
|
||||
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0,
|
||||
gyro_y = 0, gyro_z = 0;
|
||||
while (!imu_data.eof()) {
|
||||
getline(imu_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", &time, &dt,
|
||||
&acc_x, &acc_y, &acc_z, &gyro_x, &gyro_y, &gyro_z);
|
||||
|
||||
ImuMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.dt = dt;
|
||||
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
|
||||
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
|
||||
imu_measurements.push_back(measurement);
|
||||
}
|
||||
ImuMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.dt = dt;
|
||||
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
|
||||
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
|
||||
imu_measurements.push_back(measurement);
|
||||
}
|
||||
}
|
||||
|
||||
// Read GPS data
|
||||
// Time,X,Y,Z
|
||||
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
|
||||
printf("-- Reading GPS measurements from file\n");
|
||||
{
|
||||
ifstream gps_data(gps_data_file.c_str());
|
||||
getline(gps_data, line, '\n'); // ignore the first line
|
||||
// Read GPS data
|
||||
// Time,X,Y,Z
|
||||
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
|
||||
printf("-- Reading GPS measurements from file\n");
|
||||
{
|
||||
ifstream gps_data(gps_data_file.c_str());
|
||||
getline(gps_data, line, '\n'); // ignore the first line
|
||||
|
||||
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
|
||||
while (!gps_data.eof()) {
|
||||
getline(gps_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
|
||||
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
|
||||
while (!gps_data.eof()) {
|
||||
getline(gps_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
|
||||
|
||||
GpsMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.position = Vector3(gps_x, gps_y, gps_z);
|
||||
gps_measurements.push_back(measurement);
|
||||
}
|
||||
GpsMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.position = Vector3(gps_x, gps_y, gps_z);
|
||||
gps_measurements.push_back(measurement);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
KittiCalibration kitti_calibration;
|
||||
vector<ImuMeasurement> imu_measurements;
|
||||
vector<GpsMeasurement> gps_measurements;
|
||||
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||
KittiCalibration kitti_calibration;
|
||||
vector<ImuMeasurement> imu_measurements;
|
||||
vector<GpsMeasurement> gps_measurements;
|
||||
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||
|
||||
Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||
.finished();
|
||||
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||
printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same");
|
||||
exit(-1);
|
||||
}
|
||||
Vector6 BodyP =
|
||||
(Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz, kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||
.finished();
|
||||
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||
printf(
|
||||
"Currently only support IMUinBody is identity, i.e. IMU and body frame "
|
||||
"are the same");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
// Configure different variables
|
||||
// double t_offset = gps_measurements[0].time;
|
||||
size_t first_gps_pose = 1;
|
||||
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||
double g = 9.8;
|
||||
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||
// Configure different variables
|
||||
// double t_offset = gps_measurements[0].time;
|
||||
size_t first_gps_pose = 1;
|
||||
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||
double g = 9.8;
|
||||
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||
|
||||
// Configure noise models
|
||||
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0/0.07))
|
||||
.finished());
|
||||
// Configure noise models
|
||||
auto noise_model_gps = noiseModel::Diagonal::Precisions(
|
||||
(Vector6() << Vector3::Constant(0), Vector3::Constant(1.0 / 0.07))
|
||||
.finished());
|
||||
|
||||
// Set initial conditions for the estimated trajectory
|
||||
// initial pose is the reference frame (navigation frame)
|
||||
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||
// the vehicle is stationary at the beginning at position 0,0,0
|
||||
Vector3 current_velocity_global = Vector3::Zero();
|
||||
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||
// Set initial conditions for the estimated trajectory
|
||||
// initial pose is the reference frame (navigation frame)
|
||||
auto current_pose_global =
|
||||
Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||
// the vehicle is stationary at the beginning at position 0,0,0
|
||||
Vector3 current_velocity_global = Vector3::Zero();
|
||||
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||
|
||||
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0))
|
||||
.finished());
|
||||
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100),
|
||||
Vector3::Constant(5.00e-05))
|
||||
.finished());
|
||||
auto sigma_init_x = noiseModel::Diagonal::Precisions(
|
||||
(Vector6() << Vector3::Constant(0), Vector3::Constant(1.0)).finished());
|
||||
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas(
|
||||
(Vector6() << Vector3::Constant(0.100), Vector3::Constant(5.00e-05))
|
||||
.finished());
|
||||
|
||||
// Set IMU preintegration parameters
|
||||
Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||
Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||
// error committed in integrating position from velocities
|
||||
Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||
// Set IMU preintegration parameters
|
||||
Matrix33 measured_acc_cov =
|
||||
I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||
Matrix33 measured_omega_cov =
|
||||
I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||
// error committed in integrating position from velocities
|
||||
Matrix33 integration_error_cov =
|
||||
I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||
|
||||
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||
imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
|
||||
imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous
|
||||
imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
|
||||
imu_params->omegaCoriolis = w_coriolis;
|
||||
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||
imu_params->accelerometerCovariance =
|
||||
measured_acc_cov; // acc white noise in continuous
|
||||
imu_params->integrationCovariance =
|
||||
integration_error_cov; // integration uncertainty continuous
|
||||
imu_params->gyroscopeCovariance =
|
||||
measured_omega_cov; // gyro white noise in continuous
|
||||
imu_params->omegaCoriolis = w_coriolis;
|
||||
|
||||
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr;
|
||||
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement =
|
||||
nullptr;
|
||||
|
||||
// Set ISAM2 parameters and create ISAM2 solver object
|
||||
ISAM2Params isam_params;
|
||||
isam_params.factorization = ISAM2Params::CHOLESKY;
|
||||
isam_params.relinearizeSkip = 10;
|
||||
// Set ISAM2 parameters and create ISAM2 solver object
|
||||
ISAM2Params isam_params;
|
||||
isam_params.factorization = ISAM2Params::CHOLESKY;
|
||||
isam_params.relinearizeSkip = 10;
|
||||
|
||||
ISAM2 isam(isam_params);
|
||||
ISAM2 isam(isam_params);
|
||||
|
||||
// Create the factor graph and values object that will store new factors and values to add to the incremental graph
|
||||
NonlinearFactorGraph new_factors;
|
||||
Values new_values; // values storing the initial estimates of new nodes in the factor graph
|
||||
// Create the factor graph and values object that will store new factors and
|
||||
// values to add to the incremental graph
|
||||
NonlinearFactorGraph new_factors;
|
||||
Values new_values; // values storing the initial estimates of new nodes in
|
||||
// the factor graph
|
||||
|
||||
/// Main loop:
|
||||
/// (1) we read the measurements
|
||||
/// (2) we create the corresponding factors in the graph
|
||||
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||
printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n");
|
||||
size_t j = 0;
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
// At each non=IMU measurement we initialize a new node in the graph
|
||||
auto current_pose_key = X(i);
|
||||
auto current_vel_key = V(i);
|
||||
auto current_bias_key = B(i);
|
||||
double t = gps_measurements[i].time;
|
||||
/// Main loop:
|
||||
/// (1) we read the measurements
|
||||
/// (2) we create the corresponding factors in the graph
|
||||
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||
printf(
|
||||
"-- Starting main loop: inference is performed at each time step, but we "
|
||||
"plot trajectory every 10 steps\n");
|
||||
size_t j = 0;
|
||||
size_t included_imu_measurement_count = 0;
|
||||
|
||||
if (i == first_gps_pose) {
|
||||
// Create initial estimate and prior on initial pose, velocity, and biases
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x);
|
||||
new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v);
|
||||
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b);
|
||||
} else {
|
||||
double t_previous = gps_measurements[i-1].time;
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
// At each non=IMU measurement we initialize a new node in the graph
|
||||
auto current_pose_key = X(i);
|
||||
auto current_vel_key = V(i);
|
||||
auto current_bias_key = B(i);
|
||||
double t = gps_measurements[i].time;
|
||||
|
||||
// Summarize IMU data between the previous GPS measurement and now
|
||||
current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias);
|
||||
static size_t included_imu_measurement_count = 0;
|
||||
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||
if (imu_measurements[j].time >= t_previous) {
|
||||
current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer,
|
||||
imu_measurements[j].gyroscope,
|
||||
imu_measurements[j].dt);
|
||||
included_imu_measurement_count++;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
if (i == first_gps_pose) {
|
||||
// Create initial estimate and prior on initial pose, velocity, and biases
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(
|
||||
current_pose_key, current_pose_global, sigma_init_x);
|
||||
new_factors.emplace_shared<PriorFactor<Vector3>>(
|
||||
current_vel_key, current_velocity_global, sigma_init_v);
|
||||
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(
|
||||
current_bias_key, current_bias, sigma_init_b);
|
||||
} else {
|
||||
double t_previous = gps_measurements[i - 1].time;
|
||||
|
||||
// Create IMU factor
|
||||
auto previous_pose_key = X(i-1);
|
||||
auto previous_vel_key = V(i-1);
|
||||
auto previous_bias_key = B(i-1);
|
||||
// Summarize IMU data between the previous GPS measurement and now
|
||||
current_summarized_measurement =
|
||||
std::make_shared<PreintegratedImuMeasurements>(imu_params,
|
||||
current_bias);
|
||||
|
||||
new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key,
|
||||
current_pose_key, current_vel_key,
|
||||
previous_bias_key, *current_summarized_measurement);
|
||||
|
||||
// Bias evolution as given in the IMU metadata
|
||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() <<
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
|
||||
.finished());
|
||||
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key,
|
||||
current_bias_key,
|
||||
imuBias::ConstantBias(),
|
||||
sigma_between_b);
|
||||
|
||||
// Create GPS factor
|
||||
auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||
if ((i % gps_skip) == 0) {
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps);
|
||||
new_values.insert(current_pose_key, gps_pose);
|
||||
|
||||
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
|
||||
cout << gps_pose.translation();
|
||||
printf("\n\n");
|
||||
} else {
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
}
|
||||
|
||||
// Add initial values for velocity and bias based on the previous estimates
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
|
||||
// Update solver
|
||||
// =======================================================================
|
||||
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||
// first so that the heading becomes observable.
|
||||
if (i > (first_gps_pose + 2*gps_skip)) {
|
||||
printf("################ NEW FACTORS AT TIME %lf ################\n", t);
|
||||
new_factors.print();
|
||||
|
||||
isam.update(new_factors, new_values);
|
||||
|
||||
// Reset the newFactors and newValues list
|
||||
new_factors.resize(0);
|
||||
new_values.clear();
|
||||
|
||||
// Extract the result/current estimates
|
||||
Values result = isam.calculateEstimate();
|
||||
|
||||
current_pose_global = result.at<Pose3>(current_pose_key);
|
||||
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||
|
||||
printf("\n################ POSE AT TIME %lf ################\n", t);
|
||||
current_pose_global.print();
|
||||
printf("\n\n");
|
||||
}
|
||||
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||
if (imu_measurements[j].time >= t_previous) {
|
||||
current_summarized_measurement->integrateMeasurement(
|
||||
imu_measurements[j].accelerometer, imu_measurements[j].gyroscope,
|
||||
imu_measurements[j].dt);
|
||||
included_imu_measurement_count++;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
|
||||
// Create IMU factor
|
||||
auto previous_pose_key = X(i - 1);
|
||||
auto previous_vel_key = V(i - 1);
|
||||
auto previous_bias_key = B(i - 1);
|
||||
|
||||
new_factors.emplace_shared<ImuFactor>(
|
||||
previous_pose_key, previous_vel_key, current_pose_key,
|
||||
current_vel_key, previous_bias_key, *current_summarized_measurement);
|
||||
|
||||
// Bias evolution as given in the IMU metadata
|
||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas(
|
||||
(Vector6() << Vector3::Constant(
|
||||
sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.accelerometer_bias_sigma),
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.gyroscope_bias_sigma))
|
||||
.finished());
|
||||
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(
|
||||
previous_bias_key, current_bias_key, imuBias::ConstantBias(),
|
||||
sigma_between_b);
|
||||
|
||||
// Create GPS factor
|
||||
auto gps_pose =
|
||||
Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||
if ((i % gps_skip) == 0) {
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(
|
||||
current_pose_key, gps_pose, noise_model_gps);
|
||||
new_values.insert(current_pose_key, gps_pose);
|
||||
|
||||
printf("############ POSE INCLUDED AT TIME %.6lf ############\n",
|
||||
t);
|
||||
cout << gps_pose.translation();
|
||||
printf("\n\n");
|
||||
} else {
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
}
|
||||
|
||||
// Add initial values for velocity and bias based on the previous
|
||||
// estimates
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
|
||||
// Update solver
|
||||
// =======================================================================
|
||||
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||
// first so that the heading becomes observable.
|
||||
if (i > (first_gps_pose + 2 * gps_skip)) {
|
||||
printf("############ NEW FACTORS AT TIME %.6lf ############\n",
|
||||
t);
|
||||
new_factors.print();
|
||||
|
||||
isam.update(new_factors, new_values);
|
||||
|
||||
// Reset the newFactors and newValues list
|
||||
new_factors.resize(0);
|
||||
new_values.clear();
|
||||
|
||||
// Extract the result/current estimates
|
||||
Values result = isam.calculateEstimate();
|
||||
|
||||
current_pose_global = result.at<Pose3>(current_pose_key);
|
||||
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||
|
||||
printf("\n############ POSE AT TIME %lf ############\n", t);
|
||||
current_pose_global.print();
|
||||
printf("\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save results to file
|
||||
printf("\nWriting results to file...\n");
|
||||
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||
// Save results to file
|
||||
printf("\nWriting results to file...\n");
|
||||
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||
fprintf(fp_out,
|
||||
"#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||
|
||||
Values result = isam.calculateEstimate();
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
auto pose_key = X(i);
|
||||
auto vel_key = V(i);
|
||||
auto bias_key = B(i);
|
||||
Values result = isam.calculateEstimate();
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
auto pose_key = X(i);
|
||||
auto vel_key = V(i);
|
||||
auto bias_key = B(i);
|
||||
|
||||
auto pose = result.at<Pose3>(pose_key);
|
||||
auto velocity = result.at<Vector3>(vel_key);
|
||||
auto bias = result.at<imuBias::ConstantBias>(bias_key);
|
||||
auto pose = result.at<Pose3>(pose_key);
|
||||
auto velocity = result.at<Vector3>(vel_key);
|
||||
auto bias = result.at<imuBias::ConstantBias>(bias_key);
|
||||
|
||||
auto pose_quat = pose.rotation().toQuaternion();
|
||||
auto gps = gps_measurements[i].position;
|
||||
auto pose_quat = pose.rotation().toQuaternion();
|
||||
auto gps = gps_measurements[i].position;
|
||||
|
||||
cout << "State at #" << i << endl;
|
||||
cout << "Pose:" << endl << pose << endl;
|
||||
cout << "Velocity:" << endl << velocity << endl;
|
||||
cout << "Bias:" << endl << bias << endl;
|
||||
cout << "State at #" << i << endl;
|
||||
cout << "Pose:" << endl << pose << endl;
|
||||
cout << "Velocity:" << endl << velocity << endl;
|
||||
cout << "Bias:" << endl << bias << endl;
|
||||
|
||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||
gps_measurements[i].time,
|
||||
pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
|
||||
gps(0), gps(1), gps(2));
|
||||
}
|
||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||
gps_measurements[i].time, pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), gps(0),
|
||||
gps(1), gps(2));
|
||||
}
|
||||
|
||||
fclose(fp_out);
|
||||
fclose(fp_out);
|
||||
}
|
||||
|
|
|
@ -89,6 +89,13 @@ public:
|
|||
usurp(dynamic.data());
|
||||
}
|
||||
|
||||
/// Constructor that will resize a dynamic matrix (unless already correct)
|
||||
OptionalJacobian(Eigen::MatrixXd* dynamic) :
|
||||
map_(nullptr) {
|
||||
dynamic->resize(Rows, Cols); // no malloc if correct size
|
||||
usurp(dynamic->data());
|
||||
}
|
||||
|
||||
#ifndef OPTIONALJACOBIAN_NOBOOST
|
||||
|
||||
/// Constructor with boost::none just makes empty
|
||||
|
|
|
@ -24,40 +24,33 @@ using namespace std;
|
|||
using namespace gtsam;
|
||||
|
||||
//******************************************************************************
|
||||
#define TEST_CONSTRUCTOR(DIM1, DIM2, X, TRUTHY) \
|
||||
{ \
|
||||
OptionalJacobian<DIM1, DIM2> H(X); \
|
||||
EXPECT(H == TRUTHY); \
|
||||
}
|
||||
TEST( OptionalJacobian, Constructors ) {
|
||||
Matrix23 fixed;
|
||||
|
||||
OptionalJacobian<2, 3> H1;
|
||||
EXPECT(!H1);
|
||||
|
||||
OptionalJacobian<2, 3> H2(fixed);
|
||||
EXPECT(H2);
|
||||
|
||||
OptionalJacobian<2, 3> H3(&fixed);
|
||||
EXPECT(H3);
|
||||
|
||||
Matrix dynamic;
|
||||
OptionalJacobian<2, 3> H4(dynamic);
|
||||
EXPECT(H4);
|
||||
|
||||
OptionalJacobian<2, 3> H5(boost::none);
|
||||
EXPECT(!H5);
|
||||
|
||||
boost::optional<Matrix&> optional(dynamic);
|
||||
OptionalJacobian<2, 3> H6(optional);
|
||||
EXPECT(H6);
|
||||
|
||||
OptionalJacobian<2, 3> H;
|
||||
EXPECT(!H);
|
||||
|
||||
TEST_CONSTRUCTOR(2, 3, fixed, true);
|
||||
TEST_CONSTRUCTOR(2, 3, &fixed, true);
|
||||
TEST_CONSTRUCTOR(2, 3, dynamic, true);
|
||||
TEST_CONSTRUCTOR(2, 3, &dynamic, true);
|
||||
TEST_CONSTRUCTOR(2, 3, boost::none, false);
|
||||
TEST_CONSTRUCTOR(2, 3, optional, true);
|
||||
|
||||
// Test dynamic
|
||||
OptionalJacobian<-1, -1> H7;
|
||||
EXPECT(!H7);
|
||||
|
||||
OptionalJacobian<-1, -1> H8(dynamic);
|
||||
EXPECT(H8);
|
||||
|
||||
OptionalJacobian<-1, -1> H9(boost::none);
|
||||
EXPECT(!H9);
|
||||
|
||||
OptionalJacobian<-1, -1> H10(optional);
|
||||
EXPECT(H10);
|
||||
TEST_CONSTRUCTOR(-1, -1, dynamic, true);
|
||||
TEST_CONSTRUCTOR(-1, -1, boost::none, false);
|
||||
TEST_CONSTRUCTOR(-1, -1, optional, true);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
@ -101,6 +94,25 @@ TEST( OptionalJacobian, Fixed) {
|
|||
dynamic2.setOnes();
|
||||
test(dynamic2);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic2));
|
||||
|
||||
{ // Dynamic pointer
|
||||
// Passing in an empty matrix means we want it resized
|
||||
Matrix dynamic0;
|
||||
test(&dynamic0);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic0));
|
||||
|
||||
// Dynamic wrong size
|
||||
Matrix dynamic1(3, 5);
|
||||
dynamic1.setOnes();
|
||||
test(&dynamic1);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic1));
|
||||
|
||||
// Dynamic right size
|
||||
Matrix dynamic2(2, 5);
|
||||
dynamic2.setOnes();
|
||||
test(&dynamic2);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic2));
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
|
|
@ -158,9 +158,8 @@ void DepthFirstForestParallel(FOREST& forest, DATA& rootData,
|
|||
// Typedefs
|
||||
typedef typename FOREST::Node Node;
|
||||
|
||||
tbb::task::spawn_root_and_wait(
|
||||
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
|
||||
visitorPost, problemSizeThreshold));
|
||||
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
|
||||
visitorPost, problemSizeThreshold);
|
||||
#else
|
||||
DepthFirstForest(forest, rootData, visitorPre, visitorPost);
|
||||
#endif
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include <boost/make_shared.hpp>
|
||||
|
||||
#ifdef GTSAM_USE_TBB
|
||||
#include <tbb/task.h> // tbb::task, tbb::task_list
|
||||
#include <tbb/task_group.h> // tbb::task_group
|
||||
#include <tbb/scalable_allocator.h> // tbb::scalable_allocator
|
||||
|
||||
namespace gtsam {
|
||||
|
@ -34,7 +34,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
class PreOrderTask : public tbb::task
|
||||
class PreOrderTask
|
||||
{
|
||||
public:
|
||||
const boost::shared_ptr<NODE>& treeNode;
|
||||
|
@ -42,28 +42,30 @@ namespace gtsam {
|
|||
VISITOR_PRE& visitorPre;
|
||||
VISITOR_POST& visitorPost;
|
||||
int problemSizeThreshold;
|
||||
tbb::task_group& tg;
|
||||
bool makeNewTasks;
|
||||
|
||||
bool isPostOrderPhase;
|
||||
// Keep track of order phase across multiple calls to the same functor
|
||||
mutable bool isPostOrderPhase;
|
||||
|
||||
PreOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData,
|
||||
VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold,
|
||||
bool makeNewTasks = true)
|
||||
tbb::task_group& tg, bool makeNewTasks = true)
|
||||
: treeNode(treeNode),
|
||||
myData(myData),
|
||||
visitorPre(visitorPre),
|
||||
visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold),
|
||||
tg(tg),
|
||||
makeNewTasks(makeNewTasks),
|
||||
isPostOrderPhase(false) {}
|
||||
|
||||
tbb::task* execute() override
|
||||
void operator()() const
|
||||
{
|
||||
if(isPostOrderPhase)
|
||||
{
|
||||
// Run the post-order visitor since this task was recycled to run the post-order visitor
|
||||
(void) visitorPost(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -71,14 +73,10 @@ namespace gtsam {
|
|||
{
|
||||
if(!treeNode->children.empty())
|
||||
{
|
||||
// Allocate post-order task as a continuation
|
||||
isPostOrderPhase = true;
|
||||
recycle_as_continuation();
|
||||
|
||||
bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold);
|
||||
|
||||
tbb::task* firstChild = 0;
|
||||
tbb::task_list childTasks;
|
||||
// If we have child tasks, start subtasks and wait for them to complete
|
||||
tbb::task_group ctg;
|
||||
for(const boost::shared_ptr<NODE>& child: treeNode->children)
|
||||
{
|
||||
// Process child in a subtask. Important: Run visitorPre before calling
|
||||
|
@ -86,37 +84,30 @@ namespace gtsam {
|
|||
// allocated an extra child, this causes a TBB error.
|
||||
boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>(
|
||||
tbb::scalable_allocator<DATA>(), visitorPre(child, *myData));
|
||||
tbb::task* childTask =
|
||||
new (allocate_child()) PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, overThreshold);
|
||||
if (firstChild)
|
||||
childTasks.push_back(*childTask);
|
||||
else
|
||||
firstChild = childTask;
|
||||
ctg.run(PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, ctg, overThreshold));
|
||||
}
|
||||
ctg.wait();
|
||||
|
||||
// If we have child tasks, start subtasks and wait for them to complete
|
||||
set_ref_count((int)treeNode->children.size());
|
||||
spawn(childTasks);
|
||||
return firstChild;
|
||||
// Allocate post-order task as a continuation
|
||||
isPostOrderPhase = true;
|
||||
tg.run(*this);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Run the post-order visitor in this task if we have no children
|
||||
(void) visitorPost(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Process this node and its children in this task
|
||||
processNodeRecursively(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData)
|
||||
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData) const
|
||||
{
|
||||
for(const boost::shared_ptr<NODE>& child: node->children)
|
||||
{
|
||||
|
@ -131,7 +122,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<typename ROOTS, typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
class RootTask : public tbb::task
|
||||
class RootTask
|
||||
{
|
||||
public:
|
||||
const ROOTS& roots;
|
||||
|
@ -139,38 +130,31 @@ namespace gtsam {
|
|||
VISITOR_PRE& visitorPre;
|
||||
VISITOR_POST& visitorPost;
|
||||
int problemSizeThreshold;
|
||||
tbb::task_group& tg;
|
||||
RootTask(const ROOTS& roots, DATA& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost,
|
||||
int problemSizeThreshold) :
|
||||
int problemSizeThreshold, tbb::task_group& tg) :
|
||||
roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold) {}
|
||||
problemSizeThreshold(problemSizeThreshold), tg(tg) {}
|
||||
|
||||
tbb::task* execute() override
|
||||
void operator()() const
|
||||
{
|
||||
typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask;
|
||||
// Create data and tasks for our children
|
||||
tbb::task_list tasks;
|
||||
for(const boost::shared_ptr<NODE>& root: roots)
|
||||
{
|
||||
boost::shared_ptr<DATA> rootData = boost::allocate_shared<DATA>(tbb::scalable_allocator<DATA>(), visitorPre(root, myData));
|
||||
tasks.push_back(*new(allocate_child())
|
||||
PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold));
|
||||
tg.run(PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
|
||||
}
|
||||
// Set TBB ref count
|
||||
set_ref_count(1 + (int) roots.size());
|
||||
// Spawn tasks
|
||||
spawn_and_wait_for_all(tasks);
|
||||
// Return nullptr
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename NODE, typename ROOTS, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST>&
|
||||
CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
|
||||
void CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
|
||||
{
|
||||
typedef RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST> RootTask;
|
||||
return *new(tbb::task::allocate_root()) RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold);
|
||||
}
|
||||
tbb::task_group tg;
|
||||
tg.run_and_wait(RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -80,3 +80,6 @@
|
|||
|
||||
// Whether to use the system installed Metis instead of the provided one
|
||||
#cmakedefine GTSAM_USE_SYSTEM_METIS
|
||||
|
||||
// Toggle switch for BetweenFactor jacobian computation
|
||||
#cmakedefine GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
|
|
|
@ -63,6 +63,47 @@ Matrix6 Pose3::AdjointMap() const {
|
|||
return adj;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Calculate AdjointMap applied to xi_b, with Jacobians
|
||||
Vector6 Pose3::Adjoint(const Vector6& xi_b, OptionalJacobian<6, 6> H_pose,
|
||||
OptionalJacobian<6, 6> H_xib) const {
|
||||
const Matrix6 Ad = AdjointMap();
|
||||
|
||||
// Jacobians
|
||||
// D1 Ad_T(xi_b) = D1 Ad_T Ad_I(xi_b) = Ad_T * D1 Ad_I(xi_b) = Ad_T * ad_xi_b
|
||||
// D2 Ad_T(xi_b) = Ad_T
|
||||
// See docs/math.pdf for more details.
|
||||
// In D1 calculation, we could be more efficient by writing it out, but do not
|
||||
// for readability
|
||||
if (H_pose) *H_pose = -Ad * adjointMap(xi_b);
|
||||
if (H_xib) *H_xib = Ad;
|
||||
|
||||
return Ad * xi_b;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/// The dual version of Adjoint
|
||||
Vector6 Pose3::AdjointTranspose(const Vector6& x, OptionalJacobian<6, 6> H_pose,
|
||||
OptionalJacobian<6, 6> H_x) const {
|
||||
const Matrix6 &AdT = AdjointMap().transpose();
|
||||
const Vector6 &AdTx = AdT * x;
|
||||
|
||||
// Jacobians
|
||||
// See docs/math.pdf for more details.
|
||||
if (H_pose) {
|
||||
const auto &w_T_hat = skewSymmetric(AdTx.head<3>()),
|
||||
&v_T_hat = skewSymmetric(AdTx.tail<3>());
|
||||
*H_pose = (Matrix6() << w_T_hat, v_T_hat, //
|
||||
/* */ v_T_hat, Z_3x3)
|
||||
.finished();
|
||||
}
|
||||
if (H_x) {
|
||||
*H_x = AdT;
|
||||
}
|
||||
|
||||
return AdTx;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix6 Pose3::adjointMap(const Vector6& xi) {
|
||||
Matrix3 w_hat = skewSymmetric(xi(0), xi(1), xi(2));
|
||||
|
|
|
@ -145,15 +145,22 @@ public:
|
|||
* Calculate Adjoint map, transforming a twist in this pose's (i.e, body) frame to the world spatial frame
|
||||
* Ad_pose is 6*6 matrix that when applied to twist xi \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$, returns Ad_pose(xi)
|
||||
*/
|
||||
Matrix6 AdjointMap() const; /// FIXME Not tested - marked as incorrect
|
||||
Matrix6 AdjointMap() const;
|
||||
|
||||
/**
|
||||
* Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a body-fixed velocity, transforming it to the spatial frame
|
||||
* Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a
|
||||
* body-fixed velocity, transforming it to the spatial frame
|
||||
* \f$ \xi^s = g*\xi^b*g^{-1} = Ad_g * \xi^b \f$
|
||||
* Note that H_xib = AdjointMap()
|
||||
*/
|
||||
Vector6 Adjoint(const Vector6& xi_b) const {
|
||||
return AdjointMap() * xi_b;
|
||||
} /// FIXME Not tested - marked as incorrect
|
||||
Vector6 Adjoint(const Vector6& xi_b,
|
||||
OptionalJacobian<6, 6> H_this = boost::none,
|
||||
OptionalJacobian<6, 6> H_xib = boost::none) const;
|
||||
|
||||
/// The dual version of Adjoint
|
||||
Vector6 AdjointTranspose(const Vector6& x,
|
||||
OptionalJacobian<6, 6> H_this = boost::none,
|
||||
OptionalJacobian<6, 6> H_x = boost::none) const;
|
||||
|
||||
/**
|
||||
* Compute the [ad(w,v)] operator as defined in [Kobilarov09siggraph], pg 11
|
||||
|
|
|
@ -261,25 +261,59 @@ Vector3 SO3::Logmap(const SO3& Q, ChartJacobian H) {
|
|||
|
||||
// when trace == -1, i.e., when theta = +-pi, +-3pi, +-5pi, etc.
|
||||
// we do something special
|
||||
if (tr + 1.0 < 1e-10) {
|
||||
if (std::abs(R33 + 1.0) > 1e-5)
|
||||
omega = (M_PI / sqrt(2.0 + 2.0 * R33)) * Vector3(R13, R23, 1.0 + R33);
|
||||
else if (std::abs(R22 + 1.0) > 1e-5)
|
||||
omega = (M_PI / sqrt(2.0 + 2.0 * R22)) * Vector3(R12, 1.0 + R22, R32);
|
||||
else
|
||||
// if(std::abs(R.r1_.x()+1.0) > 1e-5) This is implicit
|
||||
omega = (M_PI / sqrt(2.0 + 2.0 * R11)) * Vector3(1.0 + R11, R21, R31);
|
||||
if (tr + 1.0 < 1e-3) {
|
||||
if (R33 > R22 && R33 > R11) {
|
||||
// R33 is the largest diagonal, a=3, b=1, c=2
|
||||
const double W = R21 - R12;
|
||||
const double Q1 = 2.0 + 2.0 * R33;
|
||||
const double Q2 = R31 + R13;
|
||||
const double Q3 = R23 + R32;
|
||||
const double r = sqrt(Q1);
|
||||
const double one_over_r = 1 / r;
|
||||
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
|
||||
const double sgn_w = W < 0 ? -1.0 : 1.0;
|
||||
const double mag = M_PI - (2 * sgn_w * W) / norm;
|
||||
const double scale = 0.5 * one_over_r * mag;
|
||||
omega = sgn_w * scale * Vector3(Q2, Q3, Q1);
|
||||
} else if (R22 > R11) {
|
||||
// R22 is the largest diagonal, a=2, b=3, c=1
|
||||
const double W = R13 - R31;
|
||||
const double Q1 = 2.0 + 2.0 * R22;
|
||||
const double Q2 = R23 + R32;
|
||||
const double Q3 = R12 + R21;
|
||||
const double r = sqrt(Q1);
|
||||
const double one_over_r = 1 / r;
|
||||
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
|
||||
const double sgn_w = W < 0 ? -1.0 : 1.0;
|
||||
const double mag = M_PI - (2 * sgn_w * W) / norm;
|
||||
const double scale = 0.5 * one_over_r * mag;
|
||||
omega = sgn_w * scale * Vector3(Q3, Q1, Q2);
|
||||
} else {
|
||||
// R11 is the largest diagonal, a=1, b=2, c=3
|
||||
const double W = R32 - R23;
|
||||
const double Q1 = 2.0 + 2.0 * R11;
|
||||
const double Q2 = R12 + R21;
|
||||
const double Q3 = R31 + R13;
|
||||
const double r = sqrt(Q1);
|
||||
const double one_over_r = 1 / r;
|
||||
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
|
||||
const double sgn_w = W < 0 ? -1.0 : 1.0;
|
||||
const double mag = M_PI - (2 * sgn_w * W) / norm;
|
||||
const double scale = 0.5 * one_over_r * mag;
|
||||
omega = sgn_w * scale * Vector3(Q1, Q2, Q3);
|
||||
}
|
||||
} else {
|
||||
double magnitude;
|
||||
const double tr_3 = tr - 3.0; // always negative
|
||||
if (tr_3 < -1e-7) {
|
||||
const double tr_3 = tr - 3.0; // could be non-negative if the matrix is off orthogonal
|
||||
if (tr_3 < -1e-6) {
|
||||
// this is the normal case -1 < trace < 3
|
||||
double theta = acos((tr - 1.0) / 2.0);
|
||||
magnitude = theta / (2.0 * sin(theta));
|
||||
} else {
|
||||
// when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0)
|
||||
// use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2)
|
||||
// see https://github.com/borglab/gtsam/issues/746 for details
|
||||
magnitude = 0.5 - tr_3 / 12.0;
|
||||
magnitude = 0.5 - tr_3 / 12.0 + tr_3*tr_3/60.0;
|
||||
}
|
||||
omega = magnitude * Vector3(R32 - R23, R13 - R31, R21 - R12);
|
||||
}
|
||||
|
|
|
@ -145,6 +145,81 @@ TEST(Pose3, Adjoint_full)
|
|||
EXPECT(assert_equal(expected3, Pose3::Expmap(xiprime3), 1e-6));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check Adjoint numerical derivatives
|
||||
TEST(Pose3, Adjoint_jacobians)
|
||||
{
|
||||
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
|
||||
|
||||
// Check evaluation sanity check
|
||||
EQUALITY(static_cast<gtsam::Vector>(T.AdjointMap() * xi), T.Adjoint(xi));
|
||||
EQUALITY(static_cast<gtsam::Vector>(T2.AdjointMap() * xi), T2.Adjoint(xi));
|
||||
EQUALITY(static_cast<gtsam::Vector>(T3.AdjointMap() * xi), T3.Adjoint(xi));
|
||||
|
||||
// Check jacobians
|
||||
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
|
||||
std::function<Vector6(const Pose3&, const Vector6&)> Adjoint_proxy =
|
||||
[&](const Pose3& T, const Vector6& xi) { return T.Adjoint(xi); };
|
||||
|
||||
T.Adjoint(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(Adjoint_proxy, T, xi);
|
||||
expectedH2 = numericalDerivative22(Adjoint_proxy, T, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T2.Adjoint(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(Adjoint_proxy, T2, xi);
|
||||
expectedH2 = numericalDerivative22(Adjoint_proxy, T2, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T3.Adjoint(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(Adjoint_proxy, T3, xi);
|
||||
expectedH2 = numericalDerivative22(Adjoint_proxy, T3, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check AdjointTranspose and jacobians
|
||||
TEST(Pose3, AdjointTranspose)
|
||||
{
|
||||
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
|
||||
|
||||
// Check evaluation
|
||||
EQUALITY(static_cast<Vector>(T.AdjointMap().transpose() * xi),
|
||||
T.AdjointTranspose(xi));
|
||||
EQUALITY(static_cast<Vector>(T2.AdjointMap().transpose() * xi),
|
||||
T2.AdjointTranspose(xi));
|
||||
EQUALITY(static_cast<Vector>(T3.AdjointMap().transpose() * xi),
|
||||
T3.AdjointTranspose(xi));
|
||||
|
||||
// Check jacobians
|
||||
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
|
||||
std::function<Vector6(const Pose3&, const Vector6&)> AdjointTranspose_proxy =
|
||||
[&](const Pose3& T, const Vector6& xi) {
|
||||
return T.AdjointTranspose(xi);
|
||||
};
|
||||
|
||||
T.AdjointTranspose(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T, xi);
|
||||
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T2.AdjointTranspose(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T2, xi);
|
||||
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T2, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T3.AdjointTranspose(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T3, xi);
|
||||
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T3, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// assert that T*wedge(xi)*T^-1 is equal to wedge(Ad_T(xi))
|
||||
TEST(Pose3, Adjoint_hat)
|
||||
|
|
|
@ -122,6 +122,21 @@ TEST( Rot3, AxisAngle)
|
|||
CHECK(assert_equal(expected,actual3,1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Rot3, AxisAngle2)
|
||||
{
|
||||
// constructor from a rotation matrix, as doubles in *row-major* order.
|
||||
Rot3 R1(-0.999957, 0.00922903, 0.00203116, 0.00926964, 0.999739, 0.0208927, -0.0018374, 0.0209105, -0.999781);
|
||||
|
||||
Unit3 actualAxis;
|
||||
double actualAngle;
|
||||
// convert Rot3 to quaternion using GTSAM
|
||||
std::tie(actualAxis, actualAngle) = R1.axisAngle();
|
||||
|
||||
double expectedAngle = 3.1396582;
|
||||
CHECK(assert_equal(expectedAngle, actualAngle, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Rot3, Rodrigues)
|
||||
{
|
||||
|
@ -181,13 +196,13 @@ TEST( Rot3, retract)
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Rot3, log) {
|
||||
TEST( Rot3, log) {
|
||||
static const double PI = boost::math::constants::pi<double>();
|
||||
Vector w;
|
||||
Rot3 R;
|
||||
|
||||
#define CHECK_OMEGA(X, Y, Z) \
|
||||
w = (Vector(3) << X, Y, Z).finished(); \
|
||||
w = (Vector(3) << (X), (Y), (Z)).finished(); \
|
||||
R = Rot3::Rodrigues(w); \
|
||||
EXPECT(assert_equal(w, Rot3::Logmap(R), 1e-12));
|
||||
|
||||
|
@ -219,17 +234,17 @@ TEST(Rot3, log) {
|
|||
CHECK_OMEGA(0, 0, PI)
|
||||
|
||||
// Windows and Linux have flipped sign in quaternion mode
|
||||
#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS)
|
||||
//#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS)
|
||||
w = (Vector(3) << x * PI, y * PI, z * PI).finished();
|
||||
R = Rot3::Rodrigues(w);
|
||||
EXPECT(assert_equal(Vector(-w), Rot3::Logmap(R), 1e-12));
|
||||
#else
|
||||
CHECK_OMEGA(x * PI, y * PI, z * PI)
|
||||
#endif
|
||||
//#else
|
||||
// CHECK_OMEGA(x * PI, y * PI, z * PI)
|
||||
//#endif
|
||||
|
||||
// Check 360 degree rotations
|
||||
#define CHECK_OMEGA_ZERO(X, Y, Z) \
|
||||
w = (Vector(3) << X, Y, Z).finished(); \
|
||||
w = (Vector(3) << (X), (Y), (Z)).finished(); \
|
||||
R = Rot3::Rodrigues(w); \
|
||||
EXPECT(assert_equal((Vector)Z_3x1, Rot3::Logmap(R)));
|
||||
|
||||
|
@ -247,15 +262,15 @@ TEST(Rot3, log) {
|
|||
// Rot3's Logmap returns different, but equivalent compacted
|
||||
// axis-angle vectors depending on whether Rot3 is implemented
|
||||
// by Quaternions or SO3.
|
||||
#if defined(GTSAM_USE_QUATERNIONS)
|
||||
// Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees
|
||||
EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211),
|
||||
#if defined(GTSAM_USE_QUATERNIONS)
|
||||
// Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees
|
||||
EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211),
|
||||
(Vector)Rot3::Logmap(Rlund), 1e-8));
|
||||
#else
|
||||
// SO3 will be approximate because of the non-orthogonality
|
||||
EXPECT(assert_equal(Vector3(0.264452, -0.742197708, -3.04098184),
|
||||
(Vector)Rot3::Logmap(Rlund), 1e-8));
|
||||
#else
|
||||
// SO3 does not bound angle resulting in ~180.1 degrees
|
||||
EXPECT(assert_equal(Vector3(-0.264544406, 0.742217405, 3.04117314),
|
||||
(Vector)Rot3::Logmap(Rlund), 1e-8));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -510,4 +510,33 @@ namespace gtsam {
|
|||
return optimize(function);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GaussianFactorGraph::printErrors(
|
||||
const VectorValues& values, const std::string& str,
|
||||
const KeyFormatter& keyFormatter,
|
||||
const std::function<bool(const Factor* /*factor*/,
|
||||
double /*whitenedError*/, size_t /*index*/)>&
|
||||
printCondition) const {
|
||||
cout << str << "size: " << size() << endl << endl;
|
||||
for (size_t i = 0; i < (*this).size(); i++) {
|
||||
const sharedFactor& factor = (*this)[i];
|
||||
const double errorValue =
|
||||
(factor != nullptr ? (*this)[i]->error(values) : .0);
|
||||
if (!printCondition(factor.get(), errorValue, i))
|
||||
continue; // User-provided filter did not pass
|
||||
|
||||
stringstream ss;
|
||||
ss << "Factor " << i << ": ";
|
||||
if (factor == nullptr) {
|
||||
cout << "nullptr"
|
||||
<< "\n";
|
||||
} else {
|
||||
factor->print(ss.str(), keyFormatter);
|
||||
cout << "error = " << errorValue << "\n";
|
||||
}
|
||||
cout << endl; // only one "endl" at end might be faster, \n for each
|
||||
// factor
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -375,6 +375,14 @@ namespace gtsam {
|
|||
/** In-place version e <- A*x that takes an iterator. */
|
||||
void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const;
|
||||
|
||||
void printErrors(
|
||||
const VectorValues& x,
|
||||
const std::string& str = "GaussianFactorGraph: ",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const std::function<bool(const Factor* /*factor*/,
|
||||
double /*whitenedError*/, size_t /*index*/)>&
|
||||
printCondition =
|
||||
[](const Factor*, double, size_t) { return true; }) const;
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
|
|
@ -221,6 +221,7 @@ class VectorValues {
|
|||
//Constructors
|
||||
VectorValues();
|
||||
VectorValues(const gtsam::VectorValues& other);
|
||||
VectorValues(const gtsam::VectorValues& first, const gtsam::VectorValues& second);
|
||||
|
||||
//Named Constructors
|
||||
static gtsam::VectorValues Zero(const gtsam::VectorValues& model);
|
||||
|
@ -400,6 +401,7 @@ class GaussianFactorGraph {
|
|||
// error and probability
|
||||
double error(const gtsam::VectorValues& c) const;
|
||||
double probPrime(const gtsam::VectorValues& c) const;
|
||||
void printErrors(const gtsam::VectorValues& c, string str = "GaussianFactorGraph: ", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const;
|
||||
|
||||
gtsam::GaussianFactorGraph clone() const;
|
||||
gtsam::GaussianFactorGraph negate() const;
|
||||
|
|
|
@ -88,6 +88,8 @@ virtual class PreintegratedRotationParams {
|
|||
virtual class PreintegrationParams : gtsam::PreintegratedRotationParams {
|
||||
PreintegrationParams(Vector n_gravity);
|
||||
|
||||
gtsam::Vector n_gravity;
|
||||
|
||||
static gtsam::PreintegrationParams* MakeSharedD(double g);
|
||||
static gtsam::PreintegrationParams* MakeSharedU(double g);
|
||||
static gtsam::PreintegrationParams* MakeSharedD(); // default g = 9.81
|
||||
|
|
|
@ -246,6 +246,18 @@ struct apply_compose {
|
|||
return x.compose(y, H1, H2);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct apply_compose<double> {
|
||||
double operator()(const double& x, const double& y,
|
||||
OptionalJacobian<1, 1> H1 = boost::none,
|
||||
OptionalJacobian<1, 1> H2 = boost::none) const {
|
||||
if (H1) H1->setConstant(y);
|
||||
if (H2) H2->setConstant(x);
|
||||
return x * y;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
// Global methods:
|
||||
|
|
|
@ -293,6 +293,19 @@ TEST(Expression, compose3) {
|
|||
EXPECT(expected == R3.keys());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Test compose with double type (should be multiplication).
|
||||
TEST(Expression, compose4) {
|
||||
// Create expression
|
||||
gtsam::Key key = 1;
|
||||
Double_ R1(key), R2(key);
|
||||
Double_ R3 = R1 * R2;
|
||||
|
||||
// Check keys
|
||||
set<Key> expected = list_of(1);
|
||||
EXPECT(expected == R3.keys());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Test with ternary function.
|
||||
Rot3 composeThree(const Rot3& R1, const Rot3& R2, const Rot3& R3, OptionalJacobian<3, 3> H1,
|
||||
|
|
|
@ -103,7 +103,7 @@ namespace gtsam {
|
|||
boost::none, boost::optional<Matrix&> H2 = boost::none) const override {
|
||||
T hx = traits<T>::Between(p1, p2, H1, H2); // h(x)
|
||||
// manifold equivalent of h(x)-z -> log(z,h(x))
|
||||
#ifdef SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
#ifdef GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
typename traits<T>::ChartJacobian::Jacobian Hlocal;
|
||||
Vector rval = traits<T>::Local(measured_, hx, boost::none, (H1 || H2) ? &Hlocal : 0);
|
||||
if (H1) *H1 = Hlocal * (*H1);
|
||||
|
|
|
@ -138,4 +138,21 @@ Point2_ uncalibrate(const Expression<CALIBRATION>& K, const Point2_& xy_hat) {
|
|||
return Point2_(K, &CALIBRATION::uncalibrate, xy_hat);
|
||||
}
|
||||
|
||||
|
||||
/// logmap
|
||||
// TODO(dellaert): Should work but fails because of a type deduction conflict.
|
||||
// template <typename T>
|
||||
// gtsam::Expression<typename gtsam::traits<T>::TangentVector> logmap(
|
||||
// const gtsam::Expression<T> &x1, const gtsam::Expression<T> &x2) {
|
||||
// return gtsam::Expression<typename gtsam::traits<T>::TangentVector>(
|
||||
// x1, &T::logmap, x2);
|
||||
// }
|
||||
|
||||
template <typename T>
|
||||
gtsam::Expression<typename gtsam::traits<T>::TangentVector> logmap(
|
||||
const gtsam::Expression<T> &x1, const gtsam::Expression<T> &x2) {
|
||||
return Expression<typename gtsam::traits<T>::TangentVector>(
|
||||
gtsam::traits<T>::Logmap, between(x1, x2));
|
||||
}
|
||||
|
||||
} // \namespace gtsam
|
||||
|
|
|
@ -36,7 +36,7 @@ static const Matrix I = I_1x1;
|
|||
static const Matrix I3 = I_3x3;
|
||||
|
||||
static const noiseModel::Diagonal::shared_ptr priorOrientationNoise =
|
||||
noiseModel::Diagonal::Sigmas((Vector(1) << 0).finished());
|
||||
noiseModel::Diagonal::Sigmas(Vector1(0));
|
||||
static const noiseModel::Diagonal::shared_ptr priorPose2Noise =
|
||||
noiseModel::Diagonal::Variances(Vector3(1e-6, 1e-6, 1e-8));
|
||||
|
||||
|
|
|
@ -334,5 +334,11 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor {
|
|||
|
||||
Vector evaluateError(const T& R1, const T& R2);
|
||||
};
|
||||
|
||||
|
||||
#include <gtsam/slam/lago.h>
|
||||
namespace lago {
|
||||
gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph, bool useOdometricPath = true);
|
||||
gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& initialGuess);
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -58,6 +58,13 @@ TEST(SlamExpressions, unrotate) {
|
|||
const Point3_ q_ = unrotate(R_, p_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SlamExpressions, logmap) {
|
||||
Pose3_ T1_(0);
|
||||
Pose3_ T2_(1);
|
||||
const Vector6_ l = logmap(T1_, T2_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
@ -100,12 +100,12 @@ endif()
|
|||
|
||||
install(
|
||||
TARGETS gtsam_unstable
|
||||
EXPORT GTSAM-exports
|
||||
EXPORT GTSAM_UNSTABLE-exports
|
||||
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||
list(APPEND GTSAM_EXPORTED_TARGETS gtsam_unstable)
|
||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||
list(APPEND GTSAM_UNSTABLE_EXPORTED_TARGETS gtsam_unstable)
|
||||
set(GTSAM_UNSTABLE_EXPORTED_TARGETS "${GTSAM_UNSTABLE_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||
|
||||
# Build examples
|
||||
add_subdirectory(examples)
|
||||
|
|
|
@ -65,8 +65,10 @@ set(interface_headers
|
|||
${PROJECT_SOURCE_DIR}/gtsam/basis/basis.i
|
||||
)
|
||||
|
||||
set(GTSAM_PYTHON_TARGET gtsam_py)
|
||||
set(GTSAM_PYTHON_UNSTABLE_TARGET gtsam_unstable_py)
|
||||
|
||||
pybind_wrap(gtsam_py # target
|
||||
pybind_wrap(${GTSAM_PYTHON_TARGET} # target
|
||||
"${interface_headers}" # interface_headers
|
||||
"gtsam.cpp" # generated_cpp
|
||||
"gtsam" # module_name
|
||||
|
@ -78,7 +80,7 @@ pybind_wrap(gtsam_py # target
|
|||
ON # use_boost
|
||||
)
|
||||
|
||||
set_target_properties(gtsam_py PROPERTIES
|
||||
set_target_properties(${GTSAM_PYTHON_TARGET} PROPERTIES
|
||||
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
|
||||
INSTALL_RPATH_USE_LINK_PATH TRUE
|
||||
OUTPUT_NAME "gtsam"
|
||||
|
@ -90,7 +92,7 @@ set_target_properties(gtsam_py PROPERTIES
|
|||
set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam)
|
||||
|
||||
# Symlink all tests .py files to build folder.
|
||||
create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
|
||||
copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
|
||||
"${GTSAM_MODULE_PATH}")
|
||||
|
||||
# Common directory for data/datasets stored with the package.
|
||||
|
@ -98,7 +100,7 @@ create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
|
|||
file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}")
|
||||
|
||||
# Add gtsam as a dependency to the install target
|
||||
set(GTSAM_PYTHON_DEPENDENCIES gtsam_py)
|
||||
set(GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_TARGET})
|
||||
|
||||
|
||||
if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
||||
|
@ -122,7 +124,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
gtsam::CameraSetCal3Fisheye
|
||||
gtsam::KeyPairDoubleMap)
|
||||
|
||||
pybind_wrap(gtsam_unstable_py # target
|
||||
pybind_wrap(${GTSAM_PYTHON_UNSTABLE_TARGET} # target
|
||||
${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header
|
||||
"gtsam_unstable.cpp" # generated_cpp
|
||||
"gtsam_unstable" # module_name
|
||||
|
@ -134,7 +136,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
ON # use_boost
|
||||
)
|
||||
|
||||
set_target_properties(gtsam_unstable_py PROPERTIES
|
||||
set_target_properties(${GTSAM_PYTHON_UNSTABLE_TARGET} PROPERTIES
|
||||
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
|
||||
INSTALL_RPATH_USE_LINK_PATH TRUE
|
||||
OUTPUT_NAME "gtsam_unstable"
|
||||
|
@ -146,11 +148,11 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable)
|
||||
|
||||
# Symlink all tests .py files to build folder.
|
||||
create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable"
|
||||
copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable"
|
||||
"${GTSAM_UNSTABLE_MODULE_PATH}")
|
||||
|
||||
# Add gtsam_unstable to the install target
|
||||
list(APPEND GTSAM_PYTHON_DEPENDENCIES gtsam_unstable_py)
|
||||
list(APPEND GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_UNSTABLE_TARGET})
|
||||
|
||||
endif()
|
||||
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
from . import utils
|
||||
from .gtsam import *
|
||||
from .utils import findExampleDataFile
|
||||
"""Module definition file for GTSAM"""
|
||||
|
||||
# pylint: disable=import-outside-toplevel, global-variable-not-assigned, possibly-unused-variable, import-error, import-self
|
||||
|
||||
import sys
|
||||
|
||||
from gtsam import gtsam, utils
|
||||
from gtsam.gtsam import *
|
||||
from gtsam.utils import findExampleDataFile
|
||||
|
||||
|
||||
def _init():
|
||||
|
@ -13,7 +19,7 @@ def _init():
|
|||
def Point2(x=np.nan, y=np.nan):
|
||||
"""Shim for the deleted Point2 type."""
|
||||
if isinstance(x, np.ndarray):
|
||||
assert x.shape == (2,), "Point2 takes 2-vector"
|
||||
assert x.shape == (2, ), "Point2 takes 2-vector"
|
||||
return x # "copy constructor"
|
||||
return np.array([x, y], dtype=float)
|
||||
|
||||
|
@ -22,7 +28,7 @@ def _init():
|
|||
def Point3(x=np.nan, y=np.nan, z=np.nan):
|
||||
"""Shim for the deleted Point3 type."""
|
||||
if isinstance(x, np.ndarray):
|
||||
assert x.shape == (3,), "Point3 takes 3-vector"
|
||||
assert x.shape == (3, ), "Point3 takes 3-vector"
|
||||
return x # "copy constructor"
|
||||
return np.array([x, y, z], dtype=float)
|
||||
|
||||
|
|
|
@ -9,15 +9,17 @@ CustomFactor demo that simulates a 1-D sensor fusion task.
|
|||
Author: Fan Jiang, Frank Dellaert
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
from typing import List, Optional
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
|
||||
from typing import List, Optional
|
||||
from functools import partial
|
||||
I = np.eye(1)
|
||||
|
||||
|
||||
def simulate_car():
|
||||
# Simulate a car for one second
|
||||
def simulate_car() -> List[float]:
|
||||
"""Simulate a car for one second"""
|
||||
x0 = 0
|
||||
dt = 0.25 # 4 Hz, typical GPS
|
||||
v = 144 * 1000 / 3600 # 144 km/hour = 90mph, pretty fast
|
||||
|
@ -26,46 +28,9 @@ def simulate_car():
|
|||
return x
|
||||
|
||||
|
||||
x = simulate_car()
|
||||
print(f"Simulated car trajectory: {x}")
|
||||
|
||||
# %%
|
||||
add_noise = True # set this to False to run with "perfect" measurements
|
||||
|
||||
# GPS measurements
|
||||
sigma_gps = 3.0 # assume GPS is +/- 3m
|
||||
g = [x[k] + (np.random.normal(scale=sigma_gps) if add_noise else 0)
|
||||
for k in range(5)]
|
||||
|
||||
# Odometry measurements
|
||||
sigma_odo = 0.1 # assume Odometry is 10cm accurate at 4Hz
|
||||
o = [x[k + 1] - x[k] + (np.random.normal(scale=sigma_odo) if add_noise else 0)
|
||||
for k in range(4)]
|
||||
|
||||
# Landmark measurements:
|
||||
sigma_lm = 1 # assume landmark measurement is accurate up to 1m
|
||||
|
||||
# Assume first landmark is at x=5, we measure it at time k=0
|
||||
lm_0 = 5.0
|
||||
z_0 = x[0] - lm_0 + (np.random.normal(scale=sigma_lm) if add_noise else 0)
|
||||
|
||||
# Assume other landmark is at x=28, we measure it at time k=3
|
||||
lm_3 = 28.0
|
||||
z_3 = x[3] - lm_3 + (np.random.normal(scale=sigma_lm) if add_noise else 0)
|
||||
|
||||
unknown = [gtsam.symbol('x', k) for k in range(5)]
|
||||
|
||||
print("unknowns = ", list(map(gtsam.DefaultKeyFormatter, unknown)))
|
||||
|
||||
# We now can use nonlinear factor graphs
|
||||
factor_graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add factors for GPS measurements
|
||||
I = np.eye(1)
|
||||
gps_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_gps)
|
||||
|
||||
|
||||
def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobians: Optional[List[np.ndarray]]):
|
||||
def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor,
|
||||
values: gtsam.Values,
|
||||
jacobians: Optional[List[np.ndarray]]) -> float:
|
||||
"""GPS Factor error function
|
||||
:param measurement: GPS measurement, to be filled with `partial`
|
||||
:param this: gtsam.CustomFactor handle
|
||||
|
@ -82,36 +47,9 @@ def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobia
|
|||
return error
|
||||
|
||||
|
||||
# Add the GPS factors
|
||||
for k in range(5):
|
||||
gf = gtsam.CustomFactor(gps_model, [unknown[k]], partial(error_gps, np.array([g[k]])))
|
||||
factor_graph.add(gf)
|
||||
|
||||
# New Values container
|
||||
v = gtsam.Values()
|
||||
|
||||
# Add initial estimates to the Values container
|
||||
for i in range(5):
|
||||
v.insert(unknown[i], np.array([0.0]))
|
||||
|
||||
# Initialize optimizer
|
||||
params = gtsam.GaussNewtonParams()
|
||||
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
|
||||
|
||||
# Optimize the factor graph
|
||||
result = optimizer.optimize()
|
||||
|
||||
# calculate the error from ground truth
|
||||
error = np.array([(result.atVector(unknown[k]) - x[k])[0] for k in range(5)])
|
||||
|
||||
print("Result with only GPS")
|
||||
print(result, np.round(error, 2), f"\nJ(X)={0.5 * np.sum(np.square(error))}")
|
||||
|
||||
# Adding odometry will improve things a lot
|
||||
odo_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_odo)
|
||||
|
||||
|
||||
def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobians: Optional[List[np.ndarray]]):
|
||||
def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor,
|
||||
values: gtsam.Values,
|
||||
jacobians: Optional[List[np.ndarray]]) -> float:
|
||||
"""Odometry Factor error function
|
||||
:param measurement: Odometry measurement, to be filled with `partial`
|
||||
:param this: gtsam.CustomFactor handle
|
||||
|
@ -130,25 +68,9 @@ def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobi
|
|||
return error
|
||||
|
||||
|
||||
for k in range(4):
|
||||
odof = gtsam.CustomFactor(odo_model, [unknown[k], unknown[k + 1]], partial(error_odom, np.array([o[k]])))
|
||||
factor_graph.add(odof)
|
||||
|
||||
params = gtsam.GaussNewtonParams()
|
||||
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
|
||||
|
||||
result = optimizer.optimize()
|
||||
|
||||
error = np.array([(result.atVector(unknown[k]) - x[k])[0] for k in range(5)])
|
||||
|
||||
print("Result with GPS+Odometry")
|
||||
print(result, np.round(error, 2), f"\nJ(X)={0.5 * np.sum(np.square(error))}")
|
||||
|
||||
# This is great, but GPS noise is still apparent, so now we add the two landmarks
|
||||
lm_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_lm)
|
||||
|
||||
|
||||
def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobians: Optional[List[np.ndarray]]):
|
||||
def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor,
|
||||
values: gtsam.Values,
|
||||
jacobians: Optional[List[np.ndarray]]) -> float:
|
||||
"""Landmark Factor error function
|
||||
:param measurement: Landmark measurement, to be filled with `partial`
|
||||
:param this: gtsam.CustomFactor handle
|
||||
|
@ -165,15 +87,120 @@ def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobian
|
|||
return error
|
||||
|
||||
|
||||
factor_graph.add(gtsam.CustomFactor(lm_model, [unknown[0]], partial(error_lm, np.array([lm_0 + z_0]))))
|
||||
factor_graph.add(gtsam.CustomFactor(lm_model, [unknown[3]], partial(error_lm, np.array([lm_3 + z_3]))))
|
||||
def main():
|
||||
"""Main runner."""
|
||||
|
||||
params = gtsam.GaussNewtonParams()
|
||||
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
|
||||
x = simulate_car()
|
||||
print(f"Simulated car trajectory: {x}")
|
||||
|
||||
result = optimizer.optimize()
|
||||
add_noise = True # set this to False to run with "perfect" measurements
|
||||
|
||||
error = np.array([(result.atVector(unknown[k]) - x[k])[0] for k in range(5)])
|
||||
# GPS measurements
|
||||
sigma_gps = 3.0 # assume GPS is +/- 3m
|
||||
g = [
|
||||
x[k] + (np.random.normal(scale=sigma_gps) if add_noise else 0)
|
||||
for k in range(5)
|
||||
]
|
||||
|
||||
print("Result with GPS+Odometry+Landmark")
|
||||
print(result, np.round(error, 2), f"\nJ(X)={0.5 * np.sum(np.square(error))}")
|
||||
# Odometry measurements
|
||||
sigma_odo = 0.1 # assume Odometry is 10cm accurate at 4Hz
|
||||
o = [
|
||||
x[k + 1] - x[k] +
|
||||
(np.random.normal(scale=sigma_odo) if add_noise else 0)
|
||||
for k in range(4)
|
||||
]
|
||||
|
||||
# Landmark measurements:
|
||||
sigma_lm = 1 # assume landmark measurement is accurate up to 1m
|
||||
|
||||
# Assume first landmark is at x=5, we measure it at time k=0
|
||||
lm_0 = 5.0
|
||||
z_0 = x[0] - lm_0 + (np.random.normal(scale=sigma_lm) if add_noise else 0)
|
||||
|
||||
# Assume other landmark is at x=28, we measure it at time k=3
|
||||
lm_3 = 28.0
|
||||
z_3 = x[3] - lm_3 + (np.random.normal(scale=sigma_lm) if add_noise else 0)
|
||||
|
||||
unknown = [gtsam.symbol('x', k) for k in range(5)]
|
||||
|
||||
print("unknowns = ", list(map(gtsam.DefaultKeyFormatter, unknown)))
|
||||
|
||||
# We now can use nonlinear factor graphs
|
||||
factor_graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add factors for GPS measurements
|
||||
gps_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_gps)
|
||||
|
||||
# Add the GPS factors
|
||||
for k in range(5):
|
||||
gf = gtsam.CustomFactor(gps_model, [unknown[k]],
|
||||
partial(error_gps, np.array([g[k]])))
|
||||
factor_graph.add(gf)
|
||||
|
||||
# New Values container
|
||||
v = gtsam.Values()
|
||||
|
||||
# Add initial estimates to the Values container
|
||||
for i in range(5):
|
||||
v.insert(unknown[i], np.array([0.0]))
|
||||
|
||||
# Initialize optimizer
|
||||
params = gtsam.GaussNewtonParams()
|
||||
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
|
||||
|
||||
# Optimize the factor graph
|
||||
result = optimizer.optimize()
|
||||
|
||||
# calculate the error from ground truth
|
||||
error = np.array([(result.atVector(unknown[k]) - x[k])[0]
|
||||
for k in range(5)])
|
||||
|
||||
print("Result with only GPS")
|
||||
print(result, np.round(error, 2),
|
||||
f"\nJ(X)={0.5 * np.sum(np.square(error))}")
|
||||
|
||||
# Adding odometry will improve things a lot
|
||||
odo_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_odo)
|
||||
|
||||
for k in range(4):
|
||||
odof = gtsam.CustomFactor(odo_model, [unknown[k], unknown[k + 1]],
|
||||
partial(error_odom, np.array([o[k]])))
|
||||
factor_graph.add(odof)
|
||||
|
||||
params = gtsam.GaussNewtonParams()
|
||||
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
|
||||
|
||||
result = optimizer.optimize()
|
||||
|
||||
error = np.array([(result.atVector(unknown[k]) - x[k])[0]
|
||||
for k in range(5)])
|
||||
|
||||
print("Result with GPS+Odometry")
|
||||
print(result, np.round(error, 2),
|
||||
f"\nJ(X)={0.5 * np.sum(np.square(error))}")
|
||||
|
||||
# This is great, but GPS noise is still apparent, so now we add the two landmarks
|
||||
lm_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_lm)
|
||||
|
||||
factor_graph.add(
|
||||
gtsam.CustomFactor(lm_model, [unknown[0]],
|
||||
partial(error_lm, np.array([lm_0 + z_0]))))
|
||||
factor_graph.add(
|
||||
gtsam.CustomFactor(lm_model, [unknown[3]],
|
||||
partial(error_lm, np.array([lm_3 + z_3]))))
|
||||
|
||||
params = gtsam.GaussNewtonParams()
|
||||
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
|
||||
|
||||
result = optimizer.optimize()
|
||||
|
||||
error = np.array([(result.atVector(unknown[k]) - x[k])[0]
|
||||
for k in range(5)])
|
||||
|
||||
print("Result with GPS+Odometry+Landmark")
|
||||
print(result, np.round(error, 2),
|
||||
f"\nJ(X)={0.5 * np.sum(np.square(error))}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -13,13 +13,8 @@ Author: Mandy Xie
|
|||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
# ENU Origin is where the plane was in hold next to runway
|
||||
lat0 = 33.86998
|
||||
lon0 = -84.30626
|
||||
|
@ -29,28 +24,34 @@ h0 = 274
|
|||
GPS_NOISE = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
|
||||
PRIOR_NOISE = gtsam.noiseModel.Isotropic.Sigma(6, 0.25)
|
||||
|
||||
# Create an empty nonlinear factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add a prior on the first point, setting it to the origin
|
||||
# A prior factor consists of a mean and a noise model (covariance matrix)
|
||||
priorMean = gtsam.Pose3() # prior at origin
|
||||
graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE))
|
||||
def main():
|
||||
"""Main runner."""
|
||||
# Create an empty nonlinear factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add GPS factors
|
||||
gps = gtsam.Point3(lat0, lon0, h0)
|
||||
graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE))
|
||||
print("\nFactor Graph:\n{}".format(graph))
|
||||
# Add a prior on the first point, setting it to the origin
|
||||
# A prior factor consists of a mean and a noise model (covariance matrix)
|
||||
priorMean = gtsam.Pose3() # prior at origin
|
||||
graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE))
|
||||
|
||||
# Create the data structure to hold the initialEstimate estimate to the solution
|
||||
# For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initial = gtsam.Values()
|
||||
initial.insert(1, gtsam.Pose3())
|
||||
print("\nInitial Estimate:\n{}".format(initial))
|
||||
# Add GPS factors
|
||||
gps = gtsam.Point3(lat0, lon0, h0)
|
||||
graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE))
|
||||
print("\nFactor Graph:\n{}".format(graph))
|
||||
|
||||
# optimize using Levenberg-Marquardt optimization
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
print("\nFinal Result:\n{}".format(result))
|
||||
# Create the data structure to hold the initialEstimate estimate to the solution
|
||||
# For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initial = gtsam.Values()
|
||||
initial.insert(1, gtsam.Pose3())
|
||||
print("\nInitial Estimate:\n{}".format(initial))
|
||||
|
||||
# optimize using Levenberg-Marquardt optimization
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
print("\nFinal Result:\n{}".format(result))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -0,0 +1,366 @@
|
|||
"""
|
||||
Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
|
||||
|
||||
Author: Varun Agrawal
|
||||
"""
|
||||
import argparse
|
||||
from typing import List, Tuple
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam import ISAM2, Pose3, noiseModel
|
||||
from gtsam.symbol_shorthand import B, V, X
|
||||
|
||||
GRAVITY = 9.8
|
||||
|
||||
|
||||
class KittiCalibration:
|
||||
"""Class to hold KITTI calibration info."""
|
||||
def __init__(self, body_ptx: float, body_pty: float, body_ptz: float,
|
||||
body_prx: float, body_pry: float, body_prz: float,
|
||||
accelerometer_sigma: float, gyroscope_sigma: float,
|
||||
integration_sigma: float, accelerometer_bias_sigma: float,
|
||||
gyroscope_bias_sigma: float, average_delta_t: float):
|
||||
self.bodyTimu = Pose3(gtsam.Rot3.RzRyRx(body_prx, body_pry, body_prz),
|
||||
gtsam.Point3(body_ptx, body_pty, body_ptz))
|
||||
self.accelerometer_sigma = accelerometer_sigma
|
||||
self.gyroscope_sigma = gyroscope_sigma
|
||||
self.integration_sigma = integration_sigma
|
||||
self.accelerometer_bias_sigma = accelerometer_bias_sigma
|
||||
self.gyroscope_bias_sigma = gyroscope_bias_sigma
|
||||
self.average_delta_t = average_delta_t
|
||||
|
||||
|
||||
class ImuMeasurement:
|
||||
"""An instance of an IMU measurement."""
|
||||
def __init__(self, time: float, dt: float, accelerometer: gtsam.Point3,
|
||||
gyroscope: gtsam.Point3):
|
||||
self.time = time
|
||||
self.dt = dt
|
||||
self.accelerometer = accelerometer
|
||||
self.gyroscope = gyroscope
|
||||
|
||||
|
||||
class GpsMeasurement:
|
||||
"""An instance of a GPS measurement."""
|
||||
def __init__(self, time: float, position: gtsam.Point3):
|
||||
self.time = time
|
||||
self.position = position
|
||||
|
||||
|
||||
def loadImuData(imu_data_file: str) -> List[ImuMeasurement]:
|
||||
"""Helper to load the IMU data."""
|
||||
# Read IMU data
|
||||
# Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||
imu_data_file = gtsam.findExampleDataFile(imu_data_file)
|
||||
imu_measurements = []
|
||||
|
||||
print("-- Reading IMU measurements from file")
|
||||
with open(imu_data_file, encoding='UTF-8') as imu_data:
|
||||
data = imu_data.readlines()
|
||||
for i in range(1, len(data)): # ignore the first line
|
||||
time, dt, acc_x, acc_y, acc_z, gyro_x, gyro_y, gyro_z = map(
|
||||
float, data[i].split(' '))
|
||||
imu_measurement = ImuMeasurement(
|
||||
time, dt, np.asarray([acc_x, acc_y, acc_z]),
|
||||
np.asarray([gyro_x, gyro_y, gyro_z]))
|
||||
imu_measurements.append(imu_measurement)
|
||||
|
||||
return imu_measurements
|
||||
|
||||
|
||||
def loadGpsData(gps_data_file: str) -> List[GpsMeasurement]:
|
||||
"""Helper to load the GPS data."""
|
||||
# Read GPS data
|
||||
# Time,X,Y,Z
|
||||
gps_data_file = gtsam.findExampleDataFile(gps_data_file)
|
||||
gps_measurements = []
|
||||
|
||||
print("-- Reading GPS measurements from file")
|
||||
with open(gps_data_file, encoding='UTF-8') as gps_data:
|
||||
data = gps_data.readlines()
|
||||
for i in range(1, len(data)):
|
||||
time, x, y, z = map(float, data[i].split(','))
|
||||
gps_measurement = GpsMeasurement(time, np.asarray([x, y, z]))
|
||||
gps_measurements.append(gps_measurement)
|
||||
|
||||
return gps_measurements
|
||||
|
||||
|
||||
def loadKittiData(
|
||||
imu_data_file: str = "KittiEquivBiasedImu.txt",
|
||||
gps_data_file: str = "KittiGps_converted.txt",
|
||||
imu_metadata_file: str = "KittiEquivBiasedImu_metadata.txt"
|
||||
) -> Tuple[KittiCalibration, List[ImuMeasurement], List[GpsMeasurement]]:
|
||||
"""
|
||||
Load the KITTI Dataset.
|
||||
"""
|
||||
# Read IMU metadata and compute relative sensor pose transforms
|
||||
# BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma
|
||||
# GyroscopeSigma IntegrationSigma AccelerometerBiasSigma GyroscopeBiasSigma
|
||||
# AverageDeltaT
|
||||
imu_metadata_file = gtsam.findExampleDataFile(imu_metadata_file)
|
||||
with open(imu_metadata_file, encoding='UTF-8') as imu_metadata:
|
||||
print("-- Reading sensor metadata")
|
||||
line = imu_metadata.readline() # Ignore the first line
|
||||
line = imu_metadata.readline().strip()
|
||||
data = list(map(float, line.split(' ')))
|
||||
kitti_calibration = KittiCalibration(*data)
|
||||
print("IMU metadata:", data)
|
||||
|
||||
imu_measurements = loadImuData(imu_data_file)
|
||||
gps_measurements = loadGpsData(gps_data_file)
|
||||
|
||||
return kitti_calibration, imu_measurements, gps_measurements
|
||||
|
||||
|
||||
def getImuParams(kitti_calibration: KittiCalibration):
|
||||
"""Get the IMU parameters from the KITTI calibration data."""
|
||||
w_coriolis = np.zeros(3)
|
||||
|
||||
# Set IMU preintegration parameters
|
||||
measured_acc_cov = np.eye(3) * np.power(
|
||||
kitti_calibration.accelerometer_sigma, 2)
|
||||
measured_omega_cov = np.eye(3) * np.power(
|
||||
kitti_calibration.gyroscope_sigma, 2)
|
||||
# error committed in integrating position from velocities
|
||||
integration_error_cov = np.eye(3) * np.power(
|
||||
kitti_calibration.integration_sigma, 2)
|
||||
|
||||
imu_params = gtsam.PreintegrationParams.MakeSharedU(GRAVITY)
|
||||
# acc white noise in continuous
|
||||
imu_params.setAccelerometerCovariance(measured_acc_cov)
|
||||
# integration uncertainty continuous
|
||||
imu_params.setIntegrationCovariance(integration_error_cov)
|
||||
# gyro white noise in continuous
|
||||
imu_params.setGyroscopeCovariance(measured_omega_cov)
|
||||
imu_params.setOmegaCoriolis(w_coriolis)
|
||||
|
||||
return imu_params
|
||||
|
||||
|
||||
def save_results(isam: gtsam.ISAM2, output_filename: str, first_gps_pose: int,
|
||||
gps_measurements: List[GpsMeasurement]):
|
||||
"""Write the results from `isam` to `output_filename`."""
|
||||
# Save results to file
|
||||
print("Writing results to file...")
|
||||
with open(output_filename, 'w', encoding='UTF-8') as fp_out:
|
||||
fp_out.write(
|
||||
"#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n")
|
||||
|
||||
result = isam.calculateEstimate()
|
||||
for i in range(first_gps_pose, len(gps_measurements)):
|
||||
pose_key = X(i)
|
||||
vel_key = V(i)
|
||||
bias_key = B(i)
|
||||
|
||||
pose = result.atPose3(pose_key)
|
||||
velocity = result.atVector(vel_key)
|
||||
bias = result.atConstantBias(bias_key)
|
||||
|
||||
pose_quat = pose.rotation().toQuaternion()
|
||||
gps = gps_measurements[i].position
|
||||
|
||||
print(f"State at #{i}")
|
||||
print(f"Pose:\n{pose}")
|
||||
print(f"Velocity:\n{velocity}")
|
||||
print(f"Bias:\n{bias}")
|
||||
|
||||
fp_out.write("{},{},{},{},{},{},{},{},{},{},{}\n".format(
|
||||
gps_measurements[i].time, pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
|
||||
gps[0], gps[1], gps[2]))
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--output_filename",
|
||||
default="IMUKittiExampleGPSResults.csv")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def optimize(gps_measurements: List[GpsMeasurement],
|
||||
imu_measurements: List[ImuMeasurement],
|
||||
sigma_init_x: gtsam.noiseModel.Diagonal,
|
||||
sigma_init_v: gtsam.noiseModel.Diagonal,
|
||||
sigma_init_b: gtsam.noiseModel.Diagonal,
|
||||
noise_model_gps: gtsam.noiseModel.Diagonal,
|
||||
kitti_calibration: KittiCalibration, first_gps_pose: int,
|
||||
gps_skip: int) -> gtsam.ISAM2:
|
||||
"""Run ISAM2 optimization on the measurements."""
|
||||
# Set initial conditions for the estimated trajectory
|
||||
# initial pose is the reference frame (navigation frame)
|
||||
current_pose_global = Pose3(gtsam.Rot3(),
|
||||
gps_measurements[first_gps_pose].position)
|
||||
|
||||
# the vehicle is stationary at the beginning at position 0,0,0
|
||||
current_velocity_global = np.zeros(3)
|
||||
current_bias = gtsam.imuBias.ConstantBias() # init with zero bias
|
||||
|
||||
imu_params = getImuParams(kitti_calibration)
|
||||
|
||||
# Set ISAM2 parameters and create ISAM2 solver object
|
||||
isam_params = gtsam.ISAM2Params()
|
||||
isam_params.setFactorization("CHOLESKY")
|
||||
isam_params.setRelinearizeSkip(10)
|
||||
|
||||
isam = gtsam.ISAM2(isam_params)
|
||||
|
||||
# Create the factor graph and values object that will store new factors and
|
||||
# values to add to the incremental graph
|
||||
new_factors = gtsam.NonlinearFactorGraph()
|
||||
# values storing the initial estimates of new nodes in the factor graph
|
||||
new_values = gtsam.Values()
|
||||
|
||||
# Main loop:
|
||||
# (1) we read the measurements
|
||||
# (2) we create the corresponding factors in the graph
|
||||
# (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||
print("-- Starting main loop: inference is performed at each time step, "
|
||||
"but we plot trajectory every 10 steps")
|
||||
|
||||
j = 0
|
||||
included_imu_measurement_count = 0
|
||||
|
||||
for i in range(first_gps_pose, len(gps_measurements)):
|
||||
# At each non=IMU measurement we initialize a new node in the graph
|
||||
current_pose_key = X(i)
|
||||
current_vel_key = V(i)
|
||||
current_bias_key = B(i)
|
||||
t = gps_measurements[i].time
|
||||
|
||||
if i == first_gps_pose:
|
||||
# Create initial estimate and prior on initial pose, velocity, and biases
|
||||
new_values.insert(current_pose_key, current_pose_global)
|
||||
new_values.insert(current_vel_key, current_velocity_global)
|
||||
new_values.insert(current_bias_key, current_bias)
|
||||
|
||||
new_factors.addPriorPose3(current_pose_key, current_pose_global,
|
||||
sigma_init_x)
|
||||
new_factors.addPriorVector(current_vel_key,
|
||||
current_velocity_global, sigma_init_v)
|
||||
new_factors.addPriorConstantBias(current_bias_key, current_bias,
|
||||
sigma_init_b)
|
||||
else:
|
||||
t_previous = gps_measurements[i - 1].time
|
||||
|
||||
# Summarize IMU data between the previous GPS measurement and now
|
||||
current_summarized_measurement = gtsam.PreintegratedImuMeasurements(
|
||||
imu_params, current_bias)
|
||||
|
||||
while (j < len(imu_measurements)
|
||||
and imu_measurements[j].time <= t):
|
||||
if imu_measurements[j].time >= t_previous:
|
||||
current_summarized_measurement.integrateMeasurement(
|
||||
imu_measurements[j].accelerometer,
|
||||
imu_measurements[j].gyroscope, imu_measurements[j].dt)
|
||||
included_imu_measurement_count += 1
|
||||
j += 1
|
||||
|
||||
# Create IMU factor
|
||||
previous_pose_key = X(i - 1)
|
||||
previous_vel_key = V(i - 1)
|
||||
previous_bias_key = B(i - 1)
|
||||
|
||||
new_factors.push_back(
|
||||
gtsam.ImuFactor(previous_pose_key, previous_vel_key,
|
||||
current_pose_key, current_vel_key,
|
||||
previous_bias_key,
|
||||
current_summarized_measurement))
|
||||
|
||||
# Bias evolution as given in the IMU metadata
|
||||
sigma_between_b = gtsam.noiseModel.Diagonal.Sigmas(
|
||||
np.asarray([
|
||||
np.sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.accelerometer_bias_sigma
|
||||
] * 3 + [
|
||||
np.sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.gyroscope_bias_sigma
|
||||
] * 3))
|
||||
|
||||
new_factors.push_back(
|
||||
gtsam.BetweenFactorConstantBias(previous_bias_key,
|
||||
current_bias_key,
|
||||
gtsam.imuBias.ConstantBias(),
|
||||
sigma_between_b))
|
||||
|
||||
# Create GPS factor
|
||||
gps_pose = Pose3(current_pose_global.rotation(),
|
||||
gps_measurements[i].position)
|
||||
if (i % gps_skip) == 0:
|
||||
new_factors.addPriorPose3(current_pose_key, gps_pose,
|
||||
noise_model_gps)
|
||||
new_values.insert(current_pose_key, gps_pose)
|
||||
|
||||
print(f"############ POSE INCLUDED AT TIME {t} ############")
|
||||
print(gps_pose.translation(), "\n")
|
||||
else:
|
||||
new_values.insert(current_pose_key, current_pose_global)
|
||||
|
||||
# Add initial values for velocity and bias based on the previous
|
||||
# estimates
|
||||
new_values.insert(current_vel_key, current_velocity_global)
|
||||
new_values.insert(current_bias_key, current_bias)
|
||||
|
||||
# Update solver
|
||||
# =======================================================================
|
||||
# We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||
# first so that the heading becomes observable.
|
||||
if i > (first_gps_pose + 2 * gps_skip):
|
||||
print(f"############ NEW FACTORS AT TIME {t:.6f} ############")
|
||||
new_factors.print()
|
||||
|
||||
isam.update(new_factors, new_values)
|
||||
|
||||
# Reset the newFactors and newValues list
|
||||
new_factors.resize(0)
|
||||
new_values.clear()
|
||||
|
||||
# Extract the result/current estimates
|
||||
result = isam.calculateEstimate()
|
||||
|
||||
current_pose_global = result.atPose3(current_pose_key)
|
||||
current_velocity_global = result.atVector(current_vel_key)
|
||||
current_bias = result.atConstantBias(current_bias_key)
|
||||
|
||||
print(f"############ POSE AT TIME {t} ############")
|
||||
current_pose_global.print()
|
||||
print("\n")
|
||||
|
||||
return isam
|
||||
|
||||
|
||||
def main():
|
||||
"""Main runner."""
|
||||
args = parse_args()
|
||||
kitti_calibration, imu_measurements, gps_measurements = loadKittiData()
|
||||
|
||||
if not kitti_calibration.bodyTimu.equals(Pose3(), 1e-8):
|
||||
raise ValueError(
|
||||
"Currently only support IMUinBody is identity, i.e. IMU and body frame are the same"
|
||||
)
|
||||
|
||||
# Configure different variables
|
||||
first_gps_pose = 1
|
||||
gps_skip = 10
|
||||
|
||||
# Configure noise models
|
||||
noise_model_gps = noiseModel.Diagonal.Precisions(
|
||||
np.asarray([0, 0, 0] + [1.0 / 0.07] * 3))
|
||||
|
||||
sigma_init_x = noiseModel.Diagonal.Precisions(
|
||||
np.asarray([0, 0, 0, 1, 1, 1]))
|
||||
sigma_init_v = noiseModel.Diagonal.Sigmas(np.ones(3) * 1000.0)
|
||||
sigma_init_b = noiseModel.Diagonal.Sigmas(
|
||||
np.asarray([0.1] * 3 + [5.00e-05] * 3))
|
||||
|
||||
isam = optimize(gps_measurements, imu_measurements, sigma_init_x,
|
||||
sigma_init_v, sigma_init_b, noise_model_gps,
|
||||
kitti_calibration, first_gps_pose, gps_skip)
|
||||
|
||||
save_results(isam, args.output_filename, first_gps_pose, gps_measurements)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -10,31 +10,51 @@ A script validating and demonstrating the ImuFactor inference.
|
|||
Author: Frank Dellaert, Varun Agrawal
|
||||
"""
|
||||
|
||||
# pylint: disable=no-name-in-module,unused-import,arguments-differ
|
||||
# pylint: disable=no-name-in-module,unused-import,arguments-differ,import-error,wrong-import-order
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import math
|
||||
|
||||
import gtsam
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
import gtsam
|
||||
from gtsam.symbol_shorthand import B, V, X
|
||||
from gtsam.utils.plot import plot_pose3
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
from PreintegrationExample import POSES_FIG, PreintegrationExample
|
||||
|
||||
BIAS_KEY = B(0)
|
||||
GRAVITY = 9.81
|
||||
|
||||
np.set_printoptions(precision=3, suppress=True)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser("ImuFactorExample.py")
|
||||
parser.add_argument("--twist_scenario",
|
||||
default="sick_twist",
|
||||
choices=("zero_twist", "forward_twist", "loop_twist",
|
||||
"sick_twist"))
|
||||
parser.add_argument("--time",
|
||||
"-T",
|
||||
default=12,
|
||||
type=int,
|
||||
help="Total navigation time in seconds")
|
||||
parser.add_argument("--compute_covariances",
|
||||
default=False,
|
||||
action='store_true')
|
||||
parser.add_argument("--verbose", default=False, action='store_true')
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
class ImuFactorExample(PreintegrationExample):
|
||||
"""Class to run example of the Imu Factor."""
|
||||
def __init__(self, twist_scenario="sick_twist"):
|
||||
def __init__(self, twist_scenario: str = "sick_twist"):
|
||||
self.velocity = np.array([2, 0, 0])
|
||||
self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1)
|
||||
self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
|
||||
|
@ -51,19 +71,30 @@ class ImuFactorExample(PreintegrationExample):
|
|||
gyroBias = np.array([0.1, 0.3, -0.1])
|
||||
bias = gtsam.imuBias.ConstantBias(accBias, gyroBias)
|
||||
|
||||
params = gtsam.PreintegrationParams.MakeSharedU(GRAVITY)
|
||||
|
||||
# Some arbitrary noise sigmas
|
||||
gyro_sigma = 1e-3
|
||||
accel_sigma = 1e-3
|
||||
I_3x3 = np.eye(3)
|
||||
params.setGyroscopeCovariance(gyro_sigma**2 * I_3x3)
|
||||
params.setAccelerometerCovariance(accel_sigma**2 * I_3x3)
|
||||
params.setIntegrationCovariance(1e-7**2 * I_3x3)
|
||||
|
||||
dt = 1e-2
|
||||
super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario],
|
||||
bias, dt)
|
||||
bias, params, dt)
|
||||
|
||||
def addPrior(self, i, graph):
|
||||
"""Add priors at time step `i`."""
|
||||
def addPrior(self, i: int, graph: gtsam.NonlinearFactorGraph):
|
||||
"""Add a prior on the navigation state at time `i`."""
|
||||
state = self.scenario.navState(i)
|
||||
graph.push_back(
|
||||
gtsam.PriorFactorPose3(X(i), state.pose(), self.priorNoise))
|
||||
graph.push_back(
|
||||
gtsam.PriorFactorVector(V(i), state.velocity(), self.velNoise))
|
||||
|
||||
def optimize(self, graph, initial):
|
||||
def optimize(self, graph: gtsam.NonlinearFactorGraph,
|
||||
initial: gtsam.Values):
|
||||
"""Optimize using Levenberg-Marquardt optimization."""
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
params.setVerbosityLM("SUMMARY")
|
||||
|
@ -71,24 +102,49 @@ class ImuFactorExample(PreintegrationExample):
|
|||
result = optimizer.optimize()
|
||||
return result
|
||||
|
||||
def plot(self, result):
|
||||
"""Plot resulting poses."""
|
||||
def plot(self,
|
||||
values: gtsam.Values,
|
||||
title: str = "Estimated Trajectory",
|
||||
fignum: int = POSES_FIG + 1,
|
||||
show: bool = False):
|
||||
"""
|
||||
Plot poses in values.
|
||||
|
||||
Args:
|
||||
values: The values object with the poses to plot.
|
||||
title: The title of the plot.
|
||||
fignum: The matplotlib figure number.
|
||||
POSES_FIG is a value from the PreintegrationExample which we simply increment to generate a new figure.
|
||||
show: Flag indicating whether to display the figure.
|
||||
"""
|
||||
i = 0
|
||||
while result.exists(X(i)):
|
||||
pose_i = result.atPose3(X(i))
|
||||
plot_pose3(POSES_FIG + 1, pose_i, 1)
|
||||
while values.exists(X(i)):
|
||||
pose_i = values.atPose3(X(i))
|
||||
plot_pose3(fignum, pose_i, 1)
|
||||
i += 1
|
||||
plt.title("Estimated Trajectory")
|
||||
plt.title(title)
|
||||
|
||||
gtsam.utils.plot.set_axes_equal(POSES_FIG + 1)
|
||||
gtsam.utils.plot.set_axes_equal(fignum)
|
||||
|
||||
print("Bias Values", result.atConstantBias(BIAS_KEY))
|
||||
print("Bias Values", values.atConstantBias(BIAS_KEY))
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
def run(self, T=12, compute_covariances=False, verbose=True):
|
||||
"""Main runner."""
|
||||
if show:
|
||||
plt.show()
|
||||
|
||||
def run(self,
|
||||
T: int = 12,
|
||||
compute_covariances: bool = False,
|
||||
verbose: bool = True):
|
||||
"""
|
||||
Main runner.
|
||||
|
||||
Args:
|
||||
T: Total trajectory time.
|
||||
compute_covariances: Flag indicating whether to compute marginal covariances.
|
||||
verbose: Flag indicating if printing should be verbose.
|
||||
"""
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# initialize data structure for pre-integrated IMU measurements
|
||||
|
@ -173,25 +229,11 @@ class ImuFactorExample(PreintegrationExample):
|
|||
print("Covariance on vel {}:\n{}\n".format(
|
||||
i, marginals.marginalCovariance(V(i))))
|
||||
|
||||
self.plot(result)
|
||||
self.plot(result, show=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser("ImuFactorExample.py")
|
||||
parser.add_argument("--twist_scenario",
|
||||
default="sick_twist",
|
||||
choices=("zero_twist", "forward_twist", "loop_twist",
|
||||
"sick_twist"))
|
||||
parser.add_argument("--time",
|
||||
"-T",
|
||||
default=12,
|
||||
type=int,
|
||||
help="Total time in seconds")
|
||||
parser.add_argument("--compute_covariances",
|
||||
default=False,
|
||||
action='store_true')
|
||||
parser.add_argument("--verbose", default=False, action='store_true')
|
||||
args = parser.parse_args()
|
||||
args = parse_args()
|
||||
|
||||
ImuFactorExample(args.twist_scenario).run(args.time,
|
||||
args.compute_covariances,
|
||||
|
|
|
@ -13,57 +13,60 @@ Author: Frank Dellaert
|
|||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
# Create noise models
|
||||
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
|
||||
# Create an empty nonlinear factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add a prior on the first pose, setting it to the origin
|
||||
# A prior factor consists of a mean and a noise model (covariance matrix)
|
||||
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin
|
||||
graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))
|
||||
def main():
|
||||
"""Main runner"""
|
||||
# Create an empty nonlinear factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add odometry factors
|
||||
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
|
||||
# For simplicity, we will use the same noise model for each odometry factor
|
||||
# Create odometry (Between) factors between consecutive poses
|
||||
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, ODOMETRY_NOISE))
|
||||
print("\nFactor Graph:\n{}".format(graph))
|
||||
# Add a prior on the first pose, setting it to the origin
|
||||
# A prior factor consists of a mean and a noise model (covariance matrix)
|
||||
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin
|
||||
graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))
|
||||
|
||||
# Create the data structure to hold the initialEstimate estimate to the solution
|
||||
# For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initial = gtsam.Values()
|
||||
initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
|
||||
print("\nInitial Estimate:\n{}".format(initial))
|
||||
# Add odometry factors
|
||||
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
|
||||
# For simplicity, we will use the same noise model for each odometry factor
|
||||
# Create odometry (Between) factors between consecutive poses
|
||||
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, ODOMETRY_NOISE))
|
||||
print("\nFactor Graph:\n{}".format(graph))
|
||||
|
||||
# optimize using Levenberg-Marquardt optimization
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
print("\nFinal Result:\n{}".format(result))
|
||||
# Create the data structure to hold the initialEstimate estimate to the solution
|
||||
# For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initial = gtsam.Values()
|
||||
initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
|
||||
print("\nInitial Estimate:\n{}".format(initial))
|
||||
|
||||
# 5. Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for i in range(1, 4):
|
||||
print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i)))
|
||||
|
||||
fig = plt.figure(0)
|
||||
for i in range(1, 4):
|
||||
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i))
|
||||
plt.axis('equal')
|
||||
plt.show()
|
||||
# optimize using Levenberg-Marquardt optimization
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
print("\nFinal Result:\n{}".format(result))
|
||||
|
||||
# 5. Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for i in range(1, 4):
|
||||
print("X{} covariance:\n{}\n".format(i,
|
||||
marginals.marginalCovariance(i)))
|
||||
|
||||
for i in range(1, 4):
|
||||
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
|
||||
marginals.marginalCovariance(i))
|
||||
plt.axis('equal')
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -13,69 +13,85 @@ Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
|
|||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.symbol_shorthand import X, L
|
||||
import numpy as np
|
||||
from gtsam.symbol_shorthand import L, X
|
||||
|
||||
# Create noise models
|
||||
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2]))
|
||||
|
||||
# Create an empty nonlinear factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Create the keys corresponding to unknown variables in the factor graph
|
||||
X1 = X(1)
|
||||
X2 = X(2)
|
||||
X3 = X(3)
|
||||
L1 = L(4)
|
||||
L2 = L(5)
|
||||
def main():
|
||||
"""Main runner"""
|
||||
|
||||
# Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model
|
||||
graph.add(gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE))
|
||||
# Create an empty nonlinear factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add odometry factors between X1,X2 and X2,X3, respectively
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
X1, X2, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
X2, X3, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE))
|
||||
# Create the keys corresponding to unknown variables in the factor graph
|
||||
X1 = X(1)
|
||||
X2 = X(2)
|
||||
X3 = X(3)
|
||||
L1 = L(4)
|
||||
L2 = L(5)
|
||||
|
||||
# Add Range-Bearing measurements to two different landmarks L1 and L2
|
||||
graph.add(gtsam.BearingRangeFactor2D(
|
||||
X1, L1, gtsam.Rot2.fromDegrees(45), np.sqrt(4.0+4.0), MEASUREMENT_NOISE))
|
||||
graph.add(gtsam.BearingRangeFactor2D(
|
||||
X2, L1, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE))
|
||||
graph.add(gtsam.BearingRangeFactor2D(
|
||||
X3, L2, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE))
|
||||
# Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model
|
||||
graph.add(
|
||||
gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE))
|
||||
|
||||
# Print graph
|
||||
print("Factor Graph:\n{}".format(graph))
|
||||
# Add odometry factors between X1,X2 and X2,X3, respectively
|
||||
graph.add(
|
||||
gtsam.BetweenFactorPose2(X1, X2, gtsam.Pose2(2.0, 0.0, 0.0),
|
||||
ODOMETRY_NOISE))
|
||||
graph.add(
|
||||
gtsam.BetweenFactorPose2(X2, X3, gtsam.Pose2(2.0, 0.0, 0.0),
|
||||
ODOMETRY_NOISE))
|
||||
|
||||
# Create (deliberately inaccurate) initial estimate
|
||||
initial_estimate = gtsam.Values()
|
||||
initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))
|
||||
initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20))
|
||||
initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10))
|
||||
initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10))
|
||||
initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80))
|
||||
# Add Range-Bearing measurements to two different landmarks L1 and L2
|
||||
graph.add(
|
||||
gtsam.BearingRangeFactor2D(X1, L1, gtsam.Rot2.fromDegrees(45),
|
||||
np.sqrt(4.0 + 4.0), MEASUREMENT_NOISE))
|
||||
graph.add(
|
||||
gtsam.BearingRangeFactor2D(X2, L1, gtsam.Rot2.fromDegrees(90), 2.0,
|
||||
MEASUREMENT_NOISE))
|
||||
graph.add(
|
||||
gtsam.BearingRangeFactor2D(X3, L2, gtsam.Rot2.fromDegrees(90), 2.0,
|
||||
MEASUREMENT_NOISE))
|
||||
|
||||
# Print
|
||||
print("Initial Estimate:\n{}".format(initial_estimate))
|
||||
# Print graph
|
||||
print("Factor Graph:\n{}".format(graph))
|
||||
|
||||
# Optimize using Levenberg-Marquardt optimization. The optimizer
|
||||
# accepts an optional set of configuration parameters, controlling
|
||||
# things like convergence criteria, the type of linear system solver
|
||||
# to use, and the amount of information displayed during optimization.
|
||||
# Here we will use the default set of parameters. See the
|
||||
# documentation for the full set of parameters.
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params)
|
||||
result = optimizer.optimize()
|
||||
print("\nFinal Result:\n{}".format(result))
|
||||
# Create (deliberately inaccurate) initial estimate
|
||||
initial_estimate = gtsam.Values()
|
||||
initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))
|
||||
initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20))
|
||||
initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10))
|
||||
initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10))
|
||||
initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80))
|
||||
|
||||
# Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for (key, str) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"), (L2, "L2")]:
|
||||
print("{} covariance:\n{}\n".format(str, marginals.marginalCovariance(key)))
|
||||
# Print
|
||||
print("Initial Estimate:\n{}".format(initial_estimate))
|
||||
|
||||
# Optimize using Levenberg-Marquardt optimization. The optimizer
|
||||
# accepts an optional set of configuration parameters, controlling
|
||||
# things like convergence criteria, the type of linear system solver
|
||||
# to use, and the amount of information displayed during optimization.
|
||||
# Here we will use the default set of parameters. See the
|
||||
# documentation for the full set of parameters.
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
|
||||
params)
|
||||
result = optimizer.optimize()
|
||||
print("\nFinal Result:\n{}".format(result))
|
||||
|
||||
# Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for (key, s) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"),
|
||||
(L2, "L2")]:
|
||||
print("{} covariance:\n{}\n".format(s,
|
||||
marginals.marginalCovariance(key)))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -0,0 +1,178 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose SLAM example using iSAM2 in the 2D plane.
|
||||
Author: Jerred Chen, Yusuf Ali
|
||||
Modeled after:
|
||||
- VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
|
||||
- Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
|
||||
key: int):
|
||||
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
|
||||
|
||||
# Print the current estimates computed using iSAM2.
|
||||
print("*"*50 + f"\nInference after State {key+1}:\n")
|
||||
print(current_estimate)
|
||||
|
||||
# Compute the marginals for all states in the graph.
|
||||
marginals = gtsam.Marginals(graph, current_estimate)
|
||||
|
||||
# Plot the newly updated iSAM2 inference.
|
||||
fig = plt.figure(0)
|
||||
axes = fig.gca()
|
||||
plt.cla()
|
||||
|
||||
i = 1
|
||||
while current_estimate.exists(i):
|
||||
gtsam_plot.plot_pose2(0, current_estimate.atPose2(i), 0.5, marginals.marginalCovariance(i))
|
||||
i += 1
|
||||
|
||||
plt.axis('equal')
|
||||
axes.set_xlim(-1, 5)
|
||||
axes.set_ylim(-1, 3)
|
||||
plt.pause(1)
|
||||
|
||||
def determine_loop_closure(odom: np.ndarray, current_estimate: gtsam.Values,
|
||||
key: int, xy_tol=0.6, theta_tol=17) -> int:
|
||||
"""Simple brute force approach which iterates through previous states
|
||||
and checks for loop closure.
|
||||
|
||||
Args:
|
||||
odom: Vector representing noisy odometry (x, y, theta) measurement in the body frame.
|
||||
current_estimate: The current estimates computed by iSAM2.
|
||||
key: Key corresponding to the current state estimate of the robot.
|
||||
xy_tol: Optional argument for the x-y measurement tolerance, in meters.
|
||||
theta_tol: Optional argument for the theta measurement tolerance, in degrees.
|
||||
Returns:
|
||||
k: The key of the state which is helping add the loop closure constraint.
|
||||
If loop closure is not found, then None is returned.
|
||||
"""
|
||||
if current_estimate:
|
||||
prev_est = current_estimate.atPose2(key+1)
|
||||
rotated_odom = prev_est.rotation().matrix() @ odom[:2]
|
||||
curr_xy = np.array([prev_est.x() + rotated_odom[0],
|
||||
prev_est.y() + rotated_odom[1]])
|
||||
curr_theta = prev_est.theta() + odom[2]
|
||||
for k in range(1, key+1):
|
||||
pose_xy = np.array([current_estimate.atPose2(k).x(),
|
||||
current_estimate.atPose2(k).y()])
|
||||
pose_theta = current_estimate.atPose2(k).theta()
|
||||
if (abs(pose_xy - curr_xy) <= xy_tol).all() and \
|
||||
(abs(pose_theta - curr_theta) <= theta_tol*np.pi/180):
|
||||
return k
|
||||
|
||||
def Pose2SLAM_ISAM2_example():
|
||||
"""Perform 2D SLAM given the ground truth changes in pose as well as
|
||||
simple loop closure detection."""
|
||||
plt.ion()
|
||||
|
||||
# Declare the 2D translational standard deviations of the prior factor's Gaussian model, in meters.
|
||||
prior_xy_sigma = 0.3
|
||||
|
||||
# Declare the 2D rotational standard deviation of the prior factor's Gaussian model, in degrees.
|
||||
prior_theta_sigma = 5
|
||||
|
||||
# Declare the 2D translational standard deviations of the odometry factor's Gaussian model, in meters.
|
||||
odometry_xy_sigma = 0.2
|
||||
|
||||
# Declare the 2D rotational standard deviation of the odometry factor's Gaussian model, in degrees.
|
||||
odometry_theta_sigma = 5
|
||||
|
||||
# Although this example only uses linear measurements and Gaussian noise models, it is important
|
||||
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
|
||||
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
|
||||
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_xy_sigma,
|
||||
prior_xy_sigma,
|
||||
prior_theta_sigma*np.pi/180]))
|
||||
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_xy_sigma,
|
||||
odometry_xy_sigma,
|
||||
odometry_theta_sigma*np.pi/180]))
|
||||
|
||||
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
initial_estimate = gtsam.Values()
|
||||
|
||||
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
|
||||
# update calls are required to perform the relinearization.
|
||||
parameters = gtsam.ISAM2Params()
|
||||
parameters.setRelinearizeThreshold(0.1)
|
||||
parameters.setRelinearizeSkip(1)
|
||||
isam = gtsam.ISAM2(parameters)
|
||||
|
||||
# Create the ground truth odometry measurements of the robot during the trajectory.
|
||||
true_odometry = [(2, 0, 0),
|
||||
(2, 0, math.pi/2),
|
||||
(2, 0, math.pi/2),
|
||||
(2, 0, math.pi/2),
|
||||
(2, 0, math.pi/2)]
|
||||
|
||||
# Corrupt the odometry measurements with gaussian noise to create noisy odometry measurements.
|
||||
odometry_measurements = [np.random.multivariate_normal(true_odom, ODOMETRY_NOISE.covariance())
|
||||
for true_odom in true_odometry]
|
||||
|
||||
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
|
||||
# iSAM2 incremental optimization.
|
||||
graph.push_back(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
|
||||
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
|
||||
# Initialize the current estimate which is used during the incremental inference loop.
|
||||
current_estimate = initial_estimate
|
||||
|
||||
for i in range(len(true_odometry)):
|
||||
|
||||
# Obtain the noisy odometry that is received by the robot and corrupted by gaussian noise.
|
||||
noisy_odom_x, noisy_odom_y, noisy_odom_theta = odometry_measurements[i]
|
||||
|
||||
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
|
||||
loop = determine_loop_closure(odometry_measurements[i], current_estimate, i, xy_tol=0.8, theta_tol=25)
|
||||
|
||||
# Add a binary factor in between two existing states if loop closure is detected.
|
||||
# Otherwise, add a binary factor between a newly observed state and the previous state.
|
||||
if loop:
|
||||
graph.push_back(gtsam.BetweenFactorPose2(i + 1, loop,
|
||||
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
|
||||
else:
|
||||
graph.push_back(gtsam.BetweenFactorPose2(i + 1, i + 2,
|
||||
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
|
||||
|
||||
# Compute and insert the initialization estimate for the current pose using the noisy odometry measurement.
|
||||
computed_estimate = current_estimate.atPose2(i + 1).compose(gtsam.Pose2(noisy_odom_x,
|
||||
noisy_odom_y,
|
||||
noisy_odom_theta))
|
||||
initial_estimate.insert(i + 2, computed_estimate)
|
||||
|
||||
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
|
||||
isam.update(graph, initial_estimate)
|
||||
current_estimate = isam.calculateEstimate()
|
||||
|
||||
# Report all current state estimates from the iSAM2 optimzation.
|
||||
report_on_progress(graph, current_estimate, i)
|
||||
initial_estimate.clear()
|
||||
|
||||
# Print the final covariance matrix for each pose after completing inference on the trajectory.
|
||||
marginals = gtsam.Marginals(graph, current_estimate)
|
||||
i = 1
|
||||
for i in range(1, len(true_odometry)+1):
|
||||
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
Pose2SLAM_ISAM2_example()
|
|
@ -15,82 +15,88 @@ from __future__ import print_function
|
|||
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
def vector3(x, y, z):
|
||||
"""Create 3d double numpy array."""
|
||||
return np.array([x, y, z], dtype=float)
|
||||
def main():
|
||||
"""Main runner."""
|
||||
# Create noise models
|
||||
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(gtsam.Point3(0.3, 0.3, 0.1))
|
||||
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(
|
||||
gtsam.Point3(0.2, 0.2, 0.1))
|
||||
|
||||
# Create noise models
|
||||
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.3, 0.3, 0.1))
|
||||
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.2, 0.2, 0.1))
|
||||
# 1. Create a factor graph container and add factors to it
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# 1. Create a factor graph container and add factors to it
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
# 2a. Add a prior on the first pose, setting it to the origin
|
||||
# A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix)
|
||||
graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
|
||||
|
||||
# 2a. Add a prior on the first pose, setting it to the origin
|
||||
# A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix)
|
||||
graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
|
||||
# 2b. Add odometry factors
|
||||
# Create odometry (Between) factors between consecutive poses
|
||||
graph.add(
|
||||
gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE))
|
||||
graph.add(
|
||||
gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2),
|
||||
ODOMETRY_NOISE))
|
||||
graph.add(
|
||||
gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2),
|
||||
ODOMETRY_NOISE))
|
||||
graph.add(
|
||||
gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2),
|
||||
ODOMETRY_NOISE))
|
||||
|
||||
# 2b. Add odometry factors
|
||||
# Create odometry (Between) factors between consecutive poses
|
||||
graph.add(gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
2, 3, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
3, 4, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
4, 5, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE))
|
||||
# 2c. Add the loop closure constraint
|
||||
# This factor encodes the fact that we have returned to the same pose. In real
|
||||
# systems, these constraints may be identified in many ways, such as appearance-based
|
||||
# techniques with camera images. We will use another Between Factor to enforce this constraint:
|
||||
graph.add(
|
||||
gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2, 0, math.pi / 2),
|
||||
ODOMETRY_NOISE))
|
||||
print("\nFactor Graph:\n{}".format(graph)) # print
|
||||
|
||||
# 2c. Add the loop closure constraint
|
||||
# This factor encodes the fact that we have returned to the same pose. In real
|
||||
# systems, these constraints may be identified in many ways, such as appearance-based
|
||||
# techniques with camera images. We will use another Between Factor to enforce this constraint:
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
5, 2, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE))
|
||||
print("\nFactor Graph:\n{}".format(graph)) # print
|
||||
# 3. Create the data structure to hold the initial_estimate estimate to the
|
||||
# solution. For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initial_estimate = gtsam.Values()
|
||||
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
|
||||
initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
|
||||
initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))
|
||||
print("\nInitial Estimate:\n{}".format(initial_estimate)) # print
|
||||
|
||||
# 3. Create the data structure to hold the initial_estimate estimate to the
|
||||
# solution. For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initial_estimate = gtsam.Values()
|
||||
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
|
||||
initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
|
||||
initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))
|
||||
print("\nInitial Estimate:\n{}".format(initial_estimate)) # print
|
||||
# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
|
||||
# The optimizer accepts an optional set of configuration parameters,
|
||||
# controlling things like convergence criteria, the type of linear
|
||||
# system solver to use, and the amount of information displayed during
|
||||
# optimization. We will set a few parameters as a demonstration.
|
||||
parameters = gtsam.GaussNewtonParams()
|
||||
|
||||
# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
|
||||
# The optimizer accepts an optional set of configuration parameters,
|
||||
# controlling things like convergence criteria, the type of linear
|
||||
# system solver to use, and the amount of information displayed during
|
||||
# optimization. We will set a few parameters as a demonstration.
|
||||
parameters = gtsam.GaussNewtonParams()
|
||||
# Stop iterating once the change in error between steps is less than this value
|
||||
parameters.setRelativeErrorTol(1e-5)
|
||||
# Do not perform more than N iteration steps
|
||||
parameters.setMaxIterations(100)
|
||||
# Create the optimizer ...
|
||||
optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters)
|
||||
# ... and optimize
|
||||
result = optimizer.optimize()
|
||||
print("Final Result:\n{}".format(result))
|
||||
|
||||
# Stop iterating once the change in error between steps is less than this value
|
||||
parameters.setRelativeErrorTol(1e-5)
|
||||
# Do not perform more than N iteration steps
|
||||
parameters.setMaxIterations(100)
|
||||
# Create the optimizer ...
|
||||
optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters)
|
||||
# ... and optimize
|
||||
result = optimizer.optimize()
|
||||
print("Final Result:\n{}".format(result))
|
||||
# 5. Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for i in range(1, 6):
|
||||
print("X{} covariance:\n{}\n".format(i,
|
||||
marginals.marginalCovariance(i)))
|
||||
|
||||
# 5. Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for i in range(1, 6):
|
||||
print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i)))
|
||||
for i in range(1, 6):
|
||||
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
|
||||
marginals.marginalCovariance(i))
|
||||
|
||||
fig = plt.figure(0)
|
||||
for i in range(1, 6):
|
||||
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i))
|
||||
plt.axis('equal')
|
||||
plt.show()
|
||||
|
||||
plt.axis('equal')
|
||||
plt.show()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -12,77 +12,86 @@ and does the optimization. Output is written on a file, in g2o format
|
|||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import math
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
import gtsam
|
||||
import matplotlib.pyplot as plt
|
||||
from gtsam.utils import plot
|
||||
|
||||
|
||||
def vector3(x, y, z):
|
||||
"""Create 3d double numpy array."""
|
||||
return np.array([x, y, z], dtype=float)
|
||||
def main():
|
||||
"""Main runner."""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A 2D Pose SLAM example that reads input from g2o, "
|
||||
"converts it to a factor graph and does the optimization. "
|
||||
"Output is written on a file, in g2o format")
|
||||
parser.add_argument('-i', '--input', help='input file g2o format')
|
||||
parser.add_argument(
|
||||
'-o',
|
||||
'--output',
|
||||
help="the path to the output file with optimized graph")
|
||||
parser.add_argument('-m',
|
||||
'--maxiter',
|
||||
type=int,
|
||||
help="maximum number of iterations for optimizer")
|
||||
parser.add_argument('-k',
|
||||
'--kernel',
|
||||
choices=['none', 'huber', 'tukey'],
|
||||
default="none",
|
||||
help="Type of kernel used")
|
||||
parser.add_argument("-p",
|
||||
"--plot",
|
||||
action="store_true",
|
||||
help="Flag to plot results")
|
||||
args = parser.parse_args()
|
||||
|
||||
g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\
|
||||
else args.input
|
||||
|
||||
maxIterations = 100 if args.maxiter is None else args.maxiter
|
||||
|
||||
is3D = False
|
||||
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
|
||||
assert args.kernel == "none", "Supplied kernel type is not yet implemented"
|
||||
|
||||
# Add prior on the pose having index (key) = 0
|
||||
priorModel = gtsam.noiseModel.Diagonal.Variances(gtsam.Point3(1e-6, 1e-6, 1e-8))
|
||||
graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))
|
||||
|
||||
params = gtsam.GaussNewtonParams()
|
||||
params.setVerbosity("Termination")
|
||||
params.setMaxIterations(maxIterations)
|
||||
# parameters.setRelativeErrorTol(1e-5)
|
||||
# Create the optimizer ...
|
||||
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
|
||||
# ... and optimize
|
||||
result = optimizer.optimize()
|
||||
|
||||
print("Optimization complete")
|
||||
print("initial error = ", graph.error(initial))
|
||||
print("final error = ", graph.error(result))
|
||||
|
||||
if args.output is None:
|
||||
print("\nFactor Graph:\n{}".format(graph))
|
||||
print("\nInitial Estimate:\n{}".format(initial))
|
||||
print("Final Result:\n{}".format(result))
|
||||
else:
|
||||
outputFile = args.output
|
||||
print("Writing results to file: ", outputFile)
|
||||
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
|
||||
gtsam.writeG2o(graphNoKernel, result, outputFile)
|
||||
print("Done!")
|
||||
|
||||
if args.plot:
|
||||
resultPoses = gtsam.utilities.extractPose2(result)
|
||||
for i in range(resultPoses.shape[0]):
|
||||
plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :]))
|
||||
plt.show()
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A 2D Pose SLAM example that reads input from g2o, "
|
||||
"converts it to a factor graph and does the optimization. "
|
||||
"Output is written on a file, in g2o format")
|
||||
parser.add_argument('-i', '--input', help='input file g2o format')
|
||||
parser.add_argument('-o', '--output',
|
||||
help="the path to the output file with optimized graph")
|
||||
parser.add_argument('-m', '--maxiter', type=int,
|
||||
help="maximum number of iterations for optimizer")
|
||||
parser.add_argument('-k', '--kernel', choices=['none', 'huber', 'tukey'],
|
||||
default="none", help="Type of kernel used")
|
||||
parser.add_argument("-p", "--plot", action="store_true",
|
||||
help="Flag to plot results")
|
||||
args = parser.parse_args()
|
||||
|
||||
g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\
|
||||
else args.input
|
||||
|
||||
maxIterations = 100 if args.maxiter is None else args.maxiter
|
||||
|
||||
is3D = False
|
||||
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
|
||||
assert args.kernel == "none", "Supplied kernel type is not yet implemented"
|
||||
|
||||
# Add prior on the pose having index (key) = 0
|
||||
priorModel = gtsam.noiseModel.Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8))
|
||||
graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))
|
||||
|
||||
params = gtsam.GaussNewtonParams()
|
||||
params.setVerbosity("Termination")
|
||||
params.setMaxIterations(maxIterations)
|
||||
# parameters.setRelativeErrorTol(1e-5)
|
||||
# Create the optimizer ...
|
||||
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
|
||||
# ... and optimize
|
||||
result = optimizer.optimize()
|
||||
|
||||
print("Optimization complete")
|
||||
print("initial error = ", graph.error(initial))
|
||||
print("final error = ", graph.error(result))
|
||||
|
||||
|
||||
if args.output is None:
|
||||
print("\nFactor Graph:\n{}".format(graph))
|
||||
print("\nInitial Estimate:\n{}".format(initial))
|
||||
print("Final Result:\n{}".format(result))
|
||||
else:
|
||||
outputFile = args.output
|
||||
print("Writing results to file: ", outputFile)
|
||||
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
|
||||
gtsam.writeG2o(graphNoKernel, result, outputFile)
|
||||
print ("Done!")
|
||||
|
||||
if args.plot:
|
||||
resultPoses = gtsam.utilities.extractPose2(result)
|
||||
for i in range(resultPoses.shape[0]):
|
||||
plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :]))
|
||||
plt.show()
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
"""
|
||||
GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
See LICENSE for the license information
|
||||
|
||||
A 2D Pose SLAM example that reads input from g2o, and solve the Pose2 problem
|
||||
using LAGO (Linear Approximation for Graph Optimization).
|
||||
Output is written to a file, in g2o format
|
||||
|
||||
Reference:
|
||||
L. Carlone, R. Aragues, J. Castellanos, and B. Bona, A fast and accurate
|
||||
approximation for planar pose graph optimization, IJRR, 2014.
|
||||
|
||||
L. Carlone, R. Aragues, J.A. Castellanos, and B. Bona, A linear approximation
|
||||
for graph-based simultaneous localization and mapping, RSS, 2011.
|
||||
|
||||
Author: Luca Carlone (C++), John Lambert (Python)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from argparse import Namespace
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import Point3, Pose2, PriorFactorPose2, Values
|
||||
|
||||
|
||||
def run(args: Namespace) -> None:
|
||||
"""Run LAGO on input data stored in g2o file."""
|
||||
g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None else args.input
|
||||
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
graph, initial = gtsam.readG2o(g2oFile)
|
||||
|
||||
# Add prior on the pose having index (key) = 0
|
||||
priorModel = gtsam.noiseModel.Diagonal.Variances(Point3(1e-6, 1e-6, 1e-8))
|
||||
graph.add(PriorFactorPose2(0, Pose2(), priorModel))
|
||||
print(graph)
|
||||
|
||||
print("Computing LAGO estimate")
|
||||
estimateLago: Values = gtsam.lago.initialize(graph)
|
||||
print("done!")
|
||||
|
||||
if args.output is None:
|
||||
estimateLago.print("estimateLago")
|
||||
else:
|
||||
outputFile = args.output
|
||||
print("Writing results to file: ", outputFile)
|
||||
graphNoKernel = gtsam.NonlinearFactorGraph()
|
||||
graphNoKernel, initial2 = gtsam.readG2o(g2oFile)
|
||||
gtsam.writeG2o(graphNoKernel, estimateLago, outputFile)
|
||||
print("Done! ")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A 2D Pose SLAM example that reads input from g2o, "
|
||||
"converts it to a factor graph and does the optimization. "
|
||||
"Output is written on a file, in g2o format"
|
||||
)
|
||||
parser.add_argument("-i", "--input", help="input file g2o format")
|
||||
parser.add_argument("-o", "--output", help="the path to the output file with optimized graph")
|
||||
args = parser.parse_args()
|
||||
run(args)
|
|
@ -0,0 +1,208 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose SLAM example using iSAM2 in 3D space.
|
||||
Author: Jerred Chen
|
||||
Modeled after:
|
||||
- VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
|
||||
- Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
|
||||
key: int):
|
||||
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
|
||||
|
||||
# Print the current estimates computed using iSAM2.
|
||||
print("*"*50 + f"\nInference after State {key+1}:\n")
|
||||
print(current_estimate)
|
||||
|
||||
# Compute the marginals for all states in the graph.
|
||||
marginals = gtsam.Marginals(graph, current_estimate)
|
||||
|
||||
# Plot the newly updated iSAM2 inference.
|
||||
fig = plt.figure(0)
|
||||
axes = fig.gca(projection='3d')
|
||||
plt.cla()
|
||||
|
||||
i = 1
|
||||
while current_estimate.exists(i):
|
||||
gtsam_plot.plot_pose3(0, current_estimate.atPose3(i), 10,
|
||||
marginals.marginalCovariance(i))
|
||||
i += 1
|
||||
|
||||
axes.set_xlim3d(-30, 45)
|
||||
axes.set_ylim3d(-30, 45)
|
||||
axes.set_zlim3d(-30, 45)
|
||||
plt.pause(1)
|
||||
|
||||
def create_poses() -> List[gtsam.Pose3]:
|
||||
"""Creates ground truth poses of the robot."""
|
||||
P0 = np.array([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]])
|
||||
P1 = np.array([[0, -1, 0, 15],
|
||||
[1, 0, 0, 15],
|
||||
[0, 0, 1, 20],
|
||||
[0, 0, 0, 1]])
|
||||
P2 = np.array([[np.cos(np.pi/4), 0, np.sin(np.pi/4), 30],
|
||||
[0, 1, 0, 30],
|
||||
[-np.sin(np.pi/4), 0, np.cos(np.pi/4), 30],
|
||||
[0, 0, 0, 1]])
|
||||
P3 = np.array([[0, 1, 0, 30],
|
||||
[0, 0, -1, 0],
|
||||
[-1, 0, 0, -15],
|
||||
[0, 0, 0, 1]])
|
||||
P4 = np.array([[-1, 0, 0, 0],
|
||||
[0, -1, 0, -10],
|
||||
[0, 0, 1, -10],
|
||||
[0, 0, 0, 1]])
|
||||
P5 = P0[:]
|
||||
|
||||
return [gtsam.Pose3(P0), gtsam.Pose3(P1), gtsam.Pose3(P2),
|
||||
gtsam.Pose3(P3), gtsam.Pose3(P4), gtsam.Pose3(P5)]
|
||||
|
||||
def determine_loop_closure(odom_tf: gtsam.Pose3, current_estimate: gtsam.Values,
|
||||
key: int, xyz_tol=0.6, rot_tol=17) -> int:
|
||||
"""Simple brute force approach which iterates through previous states
|
||||
and checks for loop closure.
|
||||
|
||||
Args:
|
||||
odom_tf: The noisy odometry transformation measurement in the body frame.
|
||||
current_estimate: The current estimates computed by iSAM2.
|
||||
key: Key corresponding to the current state estimate of the robot.
|
||||
xyz_tol: Optional argument for the translational tolerance, in meters.
|
||||
rot_tol: Optional argument for the rotational tolerance, in degrees.
|
||||
Returns:
|
||||
k: The key of the state which is helping add the loop closure constraint.
|
||||
If loop closure is not found, then None is returned.
|
||||
"""
|
||||
if current_estimate:
|
||||
prev_est = current_estimate.atPose3(key+1)
|
||||
curr_est = prev_est.compose(odom_tf)
|
||||
for k in range(1, key+1):
|
||||
pose = current_estimate.atPose3(k)
|
||||
if (abs(pose.matrix()[:3,:3] - curr_est.matrix()[:3,:3]) <= rot_tol*np.pi/180).all() and \
|
||||
(abs(pose.matrix()[:3,3] - curr_est.matrix()[:3,3]) <= xyz_tol).all():
|
||||
return k
|
||||
|
||||
def Pose3_ISAM2_example():
|
||||
"""Perform 3D SLAM given ground truth poses as well as simple
|
||||
loop closure detection."""
|
||||
plt.ion()
|
||||
|
||||
# Declare the 3D translational standard deviations of the prior factor's Gaussian model, in meters.
|
||||
prior_xyz_sigma = 0.3
|
||||
|
||||
# Declare the 3D rotational standard deviations of the prior factor's Gaussian model, in degrees.
|
||||
prior_rpy_sigma = 5
|
||||
|
||||
# Declare the 3D translational standard deviations of the odometry factor's Gaussian model, in meters.
|
||||
odometry_xyz_sigma = 0.2
|
||||
|
||||
# Declare the 3D rotational standard deviations of the odometry factor's Gaussian model, in degrees.
|
||||
odometry_rpy_sigma = 5
|
||||
|
||||
# Although this example only uses linear measurements and Gaussian noise models, it is important
|
||||
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
|
||||
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
|
||||
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_rpy_sigma*np.pi/180,
|
||||
prior_rpy_sigma*np.pi/180,
|
||||
prior_rpy_sigma*np.pi/180,
|
||||
prior_xyz_sigma,
|
||||
prior_xyz_sigma,
|
||||
prior_xyz_sigma]))
|
||||
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_rpy_sigma*np.pi/180,
|
||||
odometry_rpy_sigma*np.pi/180,
|
||||
odometry_rpy_sigma*np.pi/180,
|
||||
odometry_xyz_sigma,
|
||||
odometry_xyz_sigma,
|
||||
odometry_xyz_sigma]))
|
||||
|
||||
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
initial_estimate = gtsam.Values()
|
||||
|
||||
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
|
||||
# update calls are required to perform the relinearization.
|
||||
parameters = gtsam.ISAM2Params()
|
||||
parameters.setRelinearizeThreshold(0.1)
|
||||
parameters.setRelinearizeSkip(1)
|
||||
isam = gtsam.ISAM2(parameters)
|
||||
|
||||
# Create the ground truth poses of the robot trajectory.
|
||||
true_poses = create_poses()
|
||||
|
||||
# Create the ground truth odometry transformations, xyz translations, and roll-pitch-yaw rotations
|
||||
# between each robot pose in the trajectory.
|
||||
odometry_tf = [true_poses[i-1].transformPoseTo(true_poses[i]) for i in range(1, len(true_poses))]
|
||||
odometry_xyz = [(odometry_tf[i].x(), odometry_tf[i].y(), odometry_tf[i].z()) for i in range(len(odometry_tf))]
|
||||
odometry_rpy = [odometry_tf[i].rotation().rpy() for i in range(len(odometry_tf))]
|
||||
|
||||
# Corrupt xyz translations and roll-pitch-yaw rotations with gaussian noise to create noisy odometry measurements.
|
||||
noisy_measurements = [np.random.multivariate_normal(np.hstack((odometry_rpy[i],odometry_xyz[i])), \
|
||||
ODOMETRY_NOISE.covariance()) for i in range(len(odometry_tf))]
|
||||
|
||||
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
|
||||
# iSAM2 incremental optimization.
|
||||
graph.push_back(gtsam.PriorFactorPose3(1, true_poses[0], PRIOR_NOISE))
|
||||
initial_estimate.insert(1, true_poses[0].compose(gtsam.Pose3(
|
||||
gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25), gtsam.Point3(0.05, -0.10, 0.20))))
|
||||
|
||||
# Initialize the current estimate which is used during the incremental inference loop.
|
||||
current_estimate = initial_estimate
|
||||
for i in range(len(odometry_tf)):
|
||||
|
||||
# Obtain the noisy translation and rotation that is received by the robot and corrupted by gaussian noise.
|
||||
noisy_odometry = noisy_measurements[i]
|
||||
|
||||
# Compute the noisy odometry transformation according to the xyz translation and roll-pitch-yaw rotation.
|
||||
noisy_tf = gtsam.Pose3(gtsam.Rot3.RzRyRx(noisy_odometry[:3]), noisy_odometry[3:6].reshape(-1,1))
|
||||
|
||||
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
|
||||
loop = determine_loop_closure(noisy_tf, current_estimate, i, xyz_tol=18, rot_tol=30)
|
||||
|
||||
# Add a binary factor in between two existing states if loop closure is detected.
|
||||
# Otherwise, add a binary factor between a newly observed state and the previous state.
|
||||
if loop:
|
||||
graph.push_back(gtsam.BetweenFactorPose3(i + 1, loop, noisy_tf, ODOMETRY_NOISE))
|
||||
else:
|
||||
graph.push_back(gtsam.BetweenFactorPose3(i + 1, i + 2, noisy_tf, ODOMETRY_NOISE))
|
||||
|
||||
# Compute and insert the initialization estimate for the current pose using a noisy odometry measurement.
|
||||
noisy_estimate = current_estimate.atPose3(i + 1).compose(noisy_tf)
|
||||
initial_estimate.insert(i + 2, noisy_estimate)
|
||||
|
||||
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
|
||||
isam.update(graph, initial_estimate)
|
||||
current_estimate = isam.calculateEstimate()
|
||||
|
||||
# Report all current state estimates from the iSAM2 optimization.
|
||||
report_on_progress(graph, current_estimate, i)
|
||||
initial_estimate.clear()
|
||||
|
||||
# Print the final covariance matrix for each pose after completing inference.
|
||||
marginals = gtsam.Marginals(graph, current_estimate)
|
||||
i = 1
|
||||
while current_estimate.exists(i):
|
||||
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
|
||||
i += 1
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
if __name__ == '__main__':
|
||||
Pose3_ISAM2_example()
|
|
@ -8,13 +8,14 @@
|
|||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
import gtsam
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from gtsam.utils import plot
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
|
||||
def vector6(x, y, z, a, b, c):
|
||||
|
@ -22,50 +23,62 @@ def vector6(x, y, z, a, b, c):
|
|||
return np.array([x, y, z, a, b, c], dtype=float)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A 3D Pose SLAM example that reads input from g2o, and "
|
||||
"initializes Pose3")
|
||||
parser.add_argument('-i', '--input', help='input file g2o format')
|
||||
parser.add_argument('-o', '--output',
|
||||
help="the path to the output file with optimized graph")
|
||||
parser.add_argument("-p", "--plot", action="store_true",
|
||||
help="Flag to plot results")
|
||||
args = parser.parse_args()
|
||||
def main():
|
||||
"""Main runner."""
|
||||
|
||||
g2oFile = gtsam.findExampleDataFile("pose3example.txt") if args.input is None \
|
||||
else args.input
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A 3D Pose SLAM example that reads input from g2o, and "
|
||||
"initializes Pose3")
|
||||
parser.add_argument('-i', '--input', help='input file g2o format')
|
||||
parser.add_argument(
|
||||
'-o',
|
||||
'--output',
|
||||
help="the path to the output file with optimized graph")
|
||||
parser.add_argument("-p",
|
||||
"--plot",
|
||||
action="store_true",
|
||||
help="Flag to plot results")
|
||||
args = parser.parse_args()
|
||||
|
||||
is3D = True
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
g2oFile = gtsam.findExampleDataFile("pose3example.txt") if args.input is None \
|
||||
else args.input
|
||||
|
||||
# Add Prior on the first key
|
||||
priorModel = gtsam.noiseModel.Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6,
|
||||
1e-4, 1e-4, 1e-4))
|
||||
is3D = True
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
|
||||
print("Adding prior to g2o file ")
|
||||
firstKey = initial.keys()[0]
|
||||
graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel))
|
||||
# Add Prior on the first key
|
||||
priorModel = gtsam.noiseModel.Diagonal.Variances(
|
||||
vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4))
|
||||
|
||||
params = gtsam.GaussNewtonParams()
|
||||
params.setVerbosity("Termination") # this will show info about stopping conds
|
||||
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
print("Optimization complete")
|
||||
print("Adding prior to g2o file ")
|
||||
firstKey = initial.keys()[0]
|
||||
graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel))
|
||||
|
||||
print("initial error = ", graph.error(initial))
|
||||
print("final error = ", graph.error(result))
|
||||
params = gtsam.GaussNewtonParams()
|
||||
params.setVerbosity(
|
||||
"Termination") # this will show info about stopping conds
|
||||
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
print("Optimization complete")
|
||||
|
||||
if args.output is None:
|
||||
print("Final Result:\n{}".format(result))
|
||||
else:
|
||||
outputFile = args.output
|
||||
print("Writing results to file: ", outputFile)
|
||||
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
|
||||
gtsam.writeG2o(graphNoKernel, result, outputFile)
|
||||
print ("Done!")
|
||||
print("initial error = ", graph.error(initial))
|
||||
print("final error = ", graph.error(result))
|
||||
|
||||
if args.plot:
|
||||
resultPoses = gtsam.utilities.allPose3s(result)
|
||||
for i in range(resultPoses.size()):
|
||||
plot.plot_pose3(1, resultPoses.atPose3(i))
|
||||
plt.show()
|
||||
if args.output is None:
|
||||
print("Final Result:\n{}".format(result))
|
||||
else:
|
||||
outputFile = args.output
|
||||
print("Writing results to file: ", outputFile)
|
||||
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
|
||||
gtsam.writeG2o(graphNoKernel, result, outputFile)
|
||||
print("Done!")
|
||||
|
||||
if args.plot:
|
||||
resultPoses = gtsam.utilities.allPose3s(result)
|
||||
for i in range(resultPoses.size()):
|
||||
plot.plot_pose3(1, resultPoses.atPose3(i))
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -13,23 +13,29 @@ Author: Luca Carlone, Frank Dellaert (python port)
|
|||
|
||||
from __future__ import print_function
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
# Read graph from file
|
||||
g2oFile = gtsam.findExampleDataFile("pose3example.txt")
|
||||
def main():
|
||||
"""Main runner."""
|
||||
# Read graph from file
|
||||
g2oFile = gtsam.findExampleDataFile("pose3example.txt")
|
||||
|
||||
is3D = True
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
is3D = True
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
|
||||
# Add prior on the first key. TODO: assumes first key ios z
|
||||
priorModel = gtsam.noiseModel.Diagonal.Variances(
|
||||
np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
|
||||
firstKey = initial.keys()[0]
|
||||
graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel))
|
||||
# Add prior on the first key. TODO: assumes first key ios z
|
||||
priorModel = gtsam.noiseModel.Diagonal.Variances(
|
||||
np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
|
||||
firstKey = initial.keys()[0]
|
||||
graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel))
|
||||
|
||||
# Initializing Pose3 - chordal relaxation"
|
||||
initialization = gtsam.InitializePose3.initialize(graph)
|
||||
# Initializing Pose3 - chordal relaxation
|
||||
initialization = gtsam.InitializePose3.initialize(graph)
|
||||
|
||||
print(initialization)
|
||||
print(initialization)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -5,10 +5,14 @@ All Rights Reserved
|
|||
|
||||
See LICENSE for the license information
|
||||
|
||||
A script validating the Preintegration of IMU measurements
|
||||
A script validating the Preintegration of IMU measurements.
|
||||
|
||||
Authors: Frank Dellaert, Varun Agrawal.
|
||||
"""
|
||||
|
||||
import math
|
||||
# pylint: disable=invalid-name,unused-import,wrong-import-order
|
||||
|
||||
from typing import Optional, Sequence
|
||||
|
||||
import gtsam
|
||||
import matplotlib.pyplot as plt
|
||||
|
@ -18,25 +22,28 @@ from mpl_toolkits.mplot3d import Axes3D
|
|||
|
||||
IMU_FIG = 1
|
||||
POSES_FIG = 2
|
||||
GRAVITY = 10
|
||||
|
||||
|
||||
class PreintegrationExample(object):
|
||||
|
||||
class PreintegrationExample:
|
||||
"""Base class for all preintegration examples."""
|
||||
@staticmethod
|
||||
def defaultParams(g):
|
||||
def defaultParams(g: float):
|
||||
"""Create default parameters with Z *up* and realistic noise parameters"""
|
||||
params = gtsam.PreintegrationParams.MakeSharedU(g)
|
||||
kGyroSigma = math.radians(0.5) / 60 # 0.5 degree ARW
|
||||
kGyroSigma = np.radians(0.5) / 60 # 0.5 degree ARW
|
||||
kAccelSigma = 0.1 / 60 # 10 cm VRW
|
||||
params.setGyroscopeCovariance(
|
||||
kGyroSigma ** 2 * np.identity(3, float))
|
||||
params.setAccelerometerCovariance(
|
||||
kAccelSigma ** 2 * np.identity(3, float))
|
||||
params.setIntegrationCovariance(
|
||||
0.0000001 ** 2 * np.identity(3, float))
|
||||
params.setGyroscopeCovariance(kGyroSigma**2 * np.identity(3, float))
|
||||
params.setAccelerometerCovariance(kAccelSigma**2 *
|
||||
np.identity(3, float))
|
||||
params.setIntegrationCovariance(0.0000001**2 * np.identity(3, float))
|
||||
return params
|
||||
|
||||
def __init__(self, twist=None, bias=None, dt=1e-2):
|
||||
def __init__(self,
|
||||
twist: Optional[np.ndarray] = None,
|
||||
bias: Optional[gtsam.imuBias.ConstantBias] = None,
|
||||
params: Optional[gtsam.PreintegrationParams] = None,
|
||||
dt: float = 1e-2):
|
||||
"""Initialize with given twist, a pair(angularVelocityVector, velocityVector)."""
|
||||
|
||||
# setup interactive plotting
|
||||
|
@ -48,7 +55,7 @@ class PreintegrationExample(object):
|
|||
else:
|
||||
# default = loop with forward velocity 2m/s, while pitching up
|
||||
# with angular velocity 30 degree/sec (negative in FLU)
|
||||
W = np.array([0, -math.radians(30), 0])
|
||||
W = np.array([0, -np.radians(30), 0])
|
||||
V = np.array([2, 0, 0])
|
||||
|
||||
self.scenario = gtsam.ConstantTwistScenario(W, V)
|
||||
|
@ -58,9 +65,11 @@ class PreintegrationExample(object):
|
|||
self.labels = list('xyz')
|
||||
self.colors = list('rgb')
|
||||
|
||||
# Create runner
|
||||
self.g = 10 # simple gravity constant
|
||||
self.params = self.defaultParams(self.g)
|
||||
if params:
|
||||
self.params = params
|
||||
else:
|
||||
# Default params with simple gravity constant
|
||||
self.params = self.defaultParams(g=GRAVITY)
|
||||
|
||||
if bias is not None:
|
||||
self.actualBias = bias
|
||||
|
@ -69,13 +78,22 @@ class PreintegrationExample(object):
|
|||
gyroBias = np.array([0, 0, 0])
|
||||
self.actualBias = gtsam.imuBias.ConstantBias(accBias, gyroBias)
|
||||
|
||||
self.runner = gtsam.ScenarioRunner(
|
||||
self.scenario, self.params, self.dt, self.actualBias)
|
||||
# Create runner
|
||||
self.runner = gtsam.ScenarioRunner(self.scenario, self.params, self.dt,
|
||||
self.actualBias)
|
||||
|
||||
fig, self.axes = plt.subplots(4, 3)
|
||||
fig.set_tight_layout(True)
|
||||
|
||||
def plotImu(self, t, measuredOmega, measuredAcc):
|
||||
def plotImu(self, t: float, measuredOmega: Sequence,
|
||||
measuredAcc: Sequence):
|
||||
"""
|
||||
Plot IMU measurements.
|
||||
Args:
|
||||
t: The time at which the measurement was recoreded.
|
||||
measuredOmega: Measured angular velocity.
|
||||
measuredAcc: Measured linear acceleration.
|
||||
"""
|
||||
plt.figure(IMU_FIG)
|
||||
|
||||
# plot angular velocity
|
||||
|
@ -108,12 +126,21 @@ class PreintegrationExample(object):
|
|||
ax.scatter(t, measuredAcc[i], color=color, marker='.')
|
||||
ax.set_xlabel('specific force ' + label)
|
||||
|
||||
def plotGroundTruthPose(self, t, scale=0.3, time_interval=0.01):
|
||||
# plot ground truth pose, as well as prediction from integrated IMU measurements
|
||||
def plotGroundTruthPose(self,
|
||||
t: float,
|
||||
scale: float = 0.3,
|
||||
time_interval: float = 0.01):
|
||||
"""
|
||||
Plot ground truth pose, as well as prediction from integrated IMU measurements.
|
||||
Args:
|
||||
t: Time at which the pose was obtained.
|
||||
scale: The scaling factor for the pose axes.
|
||||
time_interval: The time to wait before showing the plot.
|
||||
"""
|
||||
actualPose = self.scenario.pose(t)
|
||||
plot_pose3(POSES_FIG, actualPose, scale)
|
||||
t = actualPose.translation()
|
||||
self.maxDim = max([max(np.abs(t)), self.maxDim])
|
||||
translation = actualPose.translation()
|
||||
self.maxDim = max([max(np.abs(translation)), self.maxDim])
|
||||
ax = plt.gca()
|
||||
ax.set_xlim3d(-self.maxDim, self.maxDim)
|
||||
ax.set_ylim3d(-self.maxDim, self.maxDim)
|
||||
|
@ -121,8 +148,8 @@ class PreintegrationExample(object):
|
|||
|
||||
plt.pause(time_interval)
|
||||
|
||||
def run(self, T=12):
|
||||
# simulate the loop
|
||||
def run(self, T: int = 12):
|
||||
"""Simulate the loop."""
|
||||
for i, t in enumerate(np.arange(0, T, self.dt)):
|
||||
measuredOmega = self.runner.measuredAngularVelocity(t)
|
||||
measuredAcc = self.runner.measuredSpecificForce(t)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,38 @@
|
|||
"""
|
||||
GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
See LICENSE for the license information
|
||||
|
||||
Author: John Lambert (Python)
|
||||
"""
|
||||
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import Point3, Pose2, PriorFactorPose2, Values
|
||||
|
||||
|
||||
class TestLago(unittest.TestCase):
|
||||
"""Test selected LAGO methods."""
|
||||
|
||||
def test_initialize(self) -> None:
|
||||
"""Smokescreen to ensure LAGO can be imported and run on toy data stored in a g2o file."""
|
||||
g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt")
|
||||
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
graph, initial = gtsam.readG2o(g2oFile)
|
||||
|
||||
# Add prior on the pose having index (key) = 0
|
||||
priorModel = gtsam.noiseModel.Diagonal.Variances(Point3(1e-6, 1e-6, 1e-8))
|
||||
graph.add(PriorFactorPose2(0, Pose2(), priorModel))
|
||||
|
||||
estimateLago: Values = gtsam.lago.initialize(graph)
|
||||
assert isinstance(estimateLago, Values)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -129,6 +129,46 @@ TEST( testProduct, Logmap ) {
|
|||
EXPECT(assert_equal(numericH, actH, tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Product interpolate_proxy(const Product& x, const Product& y, double t) {
|
||||
return interpolate<Product>(x, y, t);
|
||||
}
|
||||
|
||||
TEST(Lie, Interpolate) {
|
||||
Product x(Point2(1, 2), Pose2(3, 4, 5));
|
||||
Product y(Point2(6, 7), Pose2(8, 9, 0));
|
||||
|
||||
double t;
|
||||
Matrix actH1, numericH1, actH2, numericH2;
|
||||
|
||||
t = 0.0;
|
||||
interpolate<Product>(x, y, t, actH1, actH2);
|
||||
numericH1 = numericalDerivative31<Product, Product, Product, double>(
|
||||
interpolate_proxy, x, y, t);
|
||||
EXPECT(assert_equal(numericH1, actH1, tol));
|
||||
numericH2 = numericalDerivative32<Product, Product, Product, double>(
|
||||
interpolate_proxy, x, y, t);
|
||||
EXPECT(assert_equal(numericH2, actH2, tol));
|
||||
|
||||
t = 0.5;
|
||||
interpolate<Product>(x, y, t, actH1, actH2);
|
||||
numericH1 = numericalDerivative31<Product, Product, Product, double>(
|
||||
interpolate_proxy, x, y, t);
|
||||
EXPECT(assert_equal(numericH1, actH1, tol));
|
||||
numericH2 = numericalDerivative32<Product, Product, Product, double>(
|
||||
interpolate_proxy, x, y, t);
|
||||
EXPECT(assert_equal(numericH2, actH2, tol));
|
||||
|
||||
t = 1.0;
|
||||
interpolate<Product>(x, y, t, actH1, actH2);
|
||||
numericH1 = numericalDerivative31<Product, Product, Product, double>(
|
||||
interpolate_proxy, x, y, t);
|
||||
EXPECT(assert_equal(numericH1, actH1, tol));
|
||||
numericH2 = numericalDerivative32<Product, Product, Product, double>(
|
||||
interpolate_proxy, x, y, t);
|
||||
EXPECT(assert_equal(numericH2, actH2, tol));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
@ -27,10 +27,12 @@ jobs:
|
|||
|
||||
- name: Python Dependencies
|
||||
run: |
|
||||
pip3 install -U pip setuptools
|
||||
pip3 install -r requirements.txt
|
||||
|
||||
- name: Build and Test
|
||||
run: |
|
||||
# Build
|
||||
cmake .
|
||||
cd tests
|
||||
# Use Pytest to run all the tests.
|
||||
|
|
|
@ -192,9 +192,9 @@ function(install_python_files source_files dest_directory)
|
|||
endfunction()
|
||||
|
||||
# ~~~
|
||||
# https://stackoverflow.com/questions/13959434/cmake-out-of-source-build-python-files
|
||||
# Copy over the directory from source_folder to dest_foler
|
||||
# ~~~
|
||||
function(create_symlinks source_folder dest_folder)
|
||||
function(copy_directory source_folder dest_folder)
|
||||
if(${source_folder} STREQUAL ${dest_folder})
|
||||
return()
|
||||
endif()
|
||||
|
@ -215,31 +215,13 @@ function(create_symlinks source_folder dest_folder)
|
|||
# Create REAL folder
|
||||
file(MAKE_DIRECTORY "${dest_folder}")
|
||||
|
||||
# Delete symlink if it exists
|
||||
# Delete if it exists
|
||||
file(REMOVE "${dest_folder}/${path_file}")
|
||||
|
||||
# Get OS dependent path to use in `execute_process`
|
||||
file(TO_NATIVE_PATH "${dest_folder}/${path_file}" link)
|
||||
# Get OS dependent path to use in copy
|
||||
file(TO_NATIVE_PATH "${source_folder}/${path_file}" target)
|
||||
|
||||
# cmake-format: off
|
||||
if(UNIX)
|
||||
set(command ln -s ${target} ${link})
|
||||
else()
|
||||
set(command cmd.exe /c mklink ${link} ${target})
|
||||
endif()
|
||||
# cmake-format: on
|
||||
|
||||
execute_process(
|
||||
COMMAND ${command}
|
||||
RESULT_VARIABLE result
|
||||
ERROR_VARIABLE output)
|
||||
|
||||
if(NOT ${result} EQUAL 0)
|
||||
message(
|
||||
FATAL_ERROR
|
||||
"Could not create symbolic link for: ${target} --> ${output}")
|
||||
endif()
|
||||
file(COPY ${target} DESTINATION ${dest_folder})
|
||||
|
||||
endforeach(path_file)
|
||||
endfunction(create_symlinks)
|
||||
endfunction(copy_directory)
|
||||
|
|
|
@ -62,6 +62,10 @@ class Method:
|
|||
|
||||
self.parent = parent
|
||||
|
||||
def to_cpp(self) -> str:
|
||||
"""Generate the C++ code for wrapping."""
|
||||
return self.name
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "Method: {} {} {}({}){}".format(
|
||||
self.template,
|
||||
|
@ -84,7 +88,8 @@ class StaticMethod:
|
|||
```
|
||||
"""
|
||||
rule = (
|
||||
STATIC #
|
||||
Optional(Template.rule("template")) #
|
||||
+ STATIC #
|
||||
+ ReturnType.rule("return_type") #
|
||||
+ IDENT("name") #
|
||||
+ LPAREN #
|
||||
|
@ -92,16 +97,18 @@ class StaticMethod:
|
|||
+ RPAREN #
|
||||
+ SEMI_COLON # BR
|
||||
).setParseAction(
|
||||
lambda t: StaticMethod(t.name, t.return_type, t.args_list))
|
||||
lambda t: StaticMethod(t.name, t.return_type, t.args_list, t.template))
|
||||
|
||||
def __init__(self,
|
||||
name: str,
|
||||
return_type: ReturnType,
|
||||
args: ArgumentList,
|
||||
template: Union[Template, Any] = None,
|
||||
parent: Union["Class", Any] = ''):
|
||||
self.name = name
|
||||
self.return_type = return_type
|
||||
self.args = args
|
||||
self.template = template
|
||||
|
||||
self.parent = parent
|
||||
|
||||
|
@ -221,8 +228,8 @@ class Class:
|
|||
Rule for all the members within a class.
|
||||
"""
|
||||
rule = ZeroOrMore(Constructor.rule #
|
||||
^ StaticMethod.rule #
|
||||
^ Method.rule #
|
||||
^ StaticMethod.rule #
|
||||
^ Variable.rule #
|
||||
^ Operator.rule #
|
||||
^ Enum.rule #
|
||||
|
|
|
@ -158,6 +158,8 @@ class Type:
|
|||
"""
|
||||
Parsed datatype, can be either a fundamental type or a custom datatype.
|
||||
E.g. void, double, size_t, Matrix.
|
||||
Think of this as a high-level type which encodes the typename and other
|
||||
characteristics of the type.
|
||||
|
||||
The type can optionally be a raw pointer, shared pointer or reference.
|
||||
Can also be optionally qualified with a `const`, e.g. `const int`.
|
||||
|
@ -240,6 +242,9 @@ class Type:
|
|||
or self.typename.name in ["Matrix", "Vector"]) else "",
|
||||
typename=typename))
|
||||
|
||||
def get_typename(self):
|
||||
"""Convenience method to get the typename of this type."""
|
||||
return self.typename.name
|
||||
|
||||
class TemplatedType:
|
||||
"""
|
||||
|
|
|
@ -112,7 +112,7 @@ class FormatMixin:
|
|||
|
||||
if separator == "::": # C++
|
||||
templates = []
|
||||
for idx in range(len(type_name.instantiations)):
|
||||
for idx, _ in enumerate(type_name.instantiations):
|
||||
template = '{}'.format(
|
||||
self._format_type_name(type_name.instantiations[idx],
|
||||
include_namespace=include_namespace,
|
||||
|
@ -124,7 +124,7 @@ class FormatMixin:
|
|||
formatted_type_name += '<{}>'.format(','.join(templates))
|
||||
|
||||
else:
|
||||
for idx in range(len(type_name.instantiations)):
|
||||
for idx, _ in enumerate(type_name.instantiations):
|
||||
formatted_type_name += '{}'.format(
|
||||
self._format_type_name(type_name.instantiations[idx],
|
||||
separator=separator,
|
||||
|
|
|
@ -119,8 +119,11 @@ class PybindWrapper:
|
|||
if py_method in self.python_keywords:
|
||||
py_method = py_method + "_"
|
||||
|
||||
is_method = isinstance(method, instantiator.InstantiatedMethod)
|
||||
is_static = isinstance(method, parser.StaticMethod)
|
||||
is_method = isinstance(
|
||||
method, (parser.Method, instantiator.InstantiatedMethod))
|
||||
is_static = isinstance(
|
||||
method,
|
||||
(parser.StaticMethod, instantiator.InstantiatedStaticMethod))
|
||||
return_void = method.return_type.is_void()
|
||||
args_names = method.args.names()
|
||||
py_args_names = self._py_args_names(method.args)
|
||||
|
|
|
@ -1,714 +0,0 @@
|
|||
"""Code to help instantiate templated classes, methods and functions."""
|
||||
|
||||
# pylint: disable=too-many-arguments, too-many-instance-attributes, no-self-use, no-else-return, too-many-arguments, unused-format-string-argument, unused-variable
|
||||
|
||||
import itertools
|
||||
from copy import deepcopy
|
||||
from typing import Any, Iterable, List, Sequence
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
|
||||
|
||||
def is_scoped_template(template_typenames, str_arg_typename):
|
||||
"""
|
||||
Check if the template given by `str_arg_typename` is a scoped template,
|
||||
and if so, return what template and index matches the scoped template correctly.
|
||||
"""
|
||||
for idx, template in enumerate(template_typenames):
|
||||
if template in str_arg_typename.split("::"):
|
||||
return template, idx
|
||||
return False, -1
|
||||
|
||||
|
||||
def instantiate_type(ctype: parser.Type,
|
||||
template_typenames: List[str],
|
||||
instantiations: List[parser.Typename],
|
||||
cpp_typename: parser.Typename,
|
||||
instantiated_class=None):
|
||||
"""
|
||||
Instantiate template typename for @p ctype.
|
||||
|
||||
Args:
|
||||
instiated_class (InstantiatedClass):
|
||||
|
||||
@return If ctype's name is in the @p template_typenames, return the
|
||||
corresponding type to replace in @p instantiations.
|
||||
If ctype name is `This`, return the new typename @p `cpp_typename`.
|
||||
Otherwise, return the original ctype.
|
||||
"""
|
||||
# make a deep copy so that there is no overwriting of original template params
|
||||
ctype = deepcopy(ctype)
|
||||
|
||||
# Check if the return type has template parameters
|
||||
if ctype.typename.instantiations:
|
||||
for idx, instantiation in enumerate(ctype.typename.instantiations):
|
||||
if instantiation.name in template_typenames:
|
||||
template_idx = template_typenames.index(instantiation.name)
|
||||
ctype.typename.instantiations[
|
||||
idx] = instantiations[ # type: ignore
|
||||
template_idx]
|
||||
|
||||
return ctype
|
||||
|
||||
str_arg_typename = str(ctype.typename)
|
||||
|
||||
# Check if template is a scoped template e.g. T::Value where T is the template
|
||||
scoped_template, scoped_idx = is_scoped_template(template_typenames,
|
||||
str_arg_typename)
|
||||
|
||||
# Instantiate templates which have enumerated instantiations in the template.
|
||||
# E.g. `template<T={double}>`.
|
||||
|
||||
# Instantiate scoped templates, e.g. T::Value.
|
||||
if scoped_template:
|
||||
# Create a copy of the instantiation so we can modify it.
|
||||
instantiation = deepcopy(instantiations[scoped_idx])
|
||||
# Replace the part of the template with the instantiation
|
||||
instantiation.name = str_arg_typename.replace(scoped_template,
|
||||
instantiation.name)
|
||||
return parser.Type(
|
||||
typename=instantiation,
|
||||
is_const=ctype.is_const,
|
||||
is_shared_ptr=ctype.is_shared_ptr,
|
||||
is_ptr=ctype.is_ptr,
|
||||
is_ref=ctype.is_ref,
|
||||
is_basic=ctype.is_basic,
|
||||
)
|
||||
# Check for exact template match.
|
||||
elif str_arg_typename in template_typenames:
|
||||
idx = template_typenames.index(str_arg_typename)
|
||||
return parser.Type(
|
||||
typename=instantiations[idx],
|
||||
is_const=ctype.is_const,
|
||||
is_shared_ptr=ctype.is_shared_ptr,
|
||||
is_ptr=ctype.is_ptr,
|
||||
is_ref=ctype.is_ref,
|
||||
is_basic=ctype.is_basic,
|
||||
)
|
||||
|
||||
# If a method has the keyword `This`, we replace it with the (instantiated) class.
|
||||
elif str_arg_typename == 'This':
|
||||
# Check if the class is template instantiated
|
||||
# so we can replace it with the instantiated version.
|
||||
if instantiated_class:
|
||||
name = instantiated_class.original.name
|
||||
namespaces_name = instantiated_class.namespaces()
|
||||
namespaces_name.append(name)
|
||||
cpp_typename = parser.Typename(
|
||||
namespaces_name,
|
||||
instantiations=instantiated_class.instantiations)
|
||||
|
||||
return parser.Type(
|
||||
typename=cpp_typename,
|
||||
is_const=ctype.is_const,
|
||||
is_shared_ptr=ctype.is_shared_ptr,
|
||||
is_ptr=ctype.is_ptr,
|
||||
is_ref=ctype.is_ref,
|
||||
is_basic=ctype.is_basic,
|
||||
)
|
||||
|
||||
# Case when 'This' is present in the type namespace, e.g `This::Subclass`.
|
||||
elif 'This' in str_arg_typename:
|
||||
# Simply get the index of `This` in the namespace and replace it with the instantiated name.
|
||||
namespace_idx = ctype.typename.namespaces.index('This')
|
||||
ctype.typename.namespaces[namespace_idx] = cpp_typename.name
|
||||
return ctype
|
||||
|
||||
else:
|
||||
return ctype
|
||||
|
||||
|
||||
def instantiate_args_list(args_list, template_typenames, instantiations,
|
||||
cpp_typename):
|
||||
"""
|
||||
Instantiate template typenames in an argument list.
|
||||
Type with name `This` will be replaced by @p `cpp_typename`.
|
||||
|
||||
@param[in] args_list A list of `parser.Argument` to instantiate.
|
||||
@param[in] template_typenames List of template typenames to instantiate,
|
||||
e.g. ['T', 'U', 'V'].
|
||||
@param[in] instantiations List of specific types to instantiate, each
|
||||
associated with each template typename. Each type is a parser.Typename,
|
||||
including its name and full namespaces.
|
||||
@param[in] cpp_typename Full-namespace cpp class name of this instantiation
|
||||
to replace for arguments of type named `This`.
|
||||
@return A new list of parser.Argument which types are replaced with their
|
||||
instantiations.
|
||||
"""
|
||||
instantiated_args = []
|
||||
for arg in args_list:
|
||||
new_type = instantiate_type(arg.ctype, template_typenames,
|
||||
instantiations, cpp_typename)
|
||||
instantiated_args.append(
|
||||
parser.Argument(name=arg.name, ctype=new_type,
|
||||
default=arg.default))
|
||||
return instantiated_args
|
||||
|
||||
|
||||
def instantiate_return_type(return_type,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename,
|
||||
instantiated_class=None):
|
||||
"""Instantiate the return type."""
|
||||
new_type1 = instantiate_type(return_type.type1,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename,
|
||||
instantiated_class=instantiated_class)
|
||||
if return_type.type2:
|
||||
new_type2 = instantiate_type(return_type.type2,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename,
|
||||
instantiated_class=instantiated_class)
|
||||
else:
|
||||
new_type2 = ''
|
||||
return parser.ReturnType(new_type1, new_type2)
|
||||
|
||||
|
||||
def instantiate_name(original_name, instantiations):
|
||||
"""
|
||||
Concatenate instantiated types with an @p original name to form a new
|
||||
instantiated name.
|
||||
TODO(duy): To avoid conflicts, we should include the instantiation's
|
||||
namespaces, but I find that too verbose.
|
||||
"""
|
||||
instantiated_names = []
|
||||
for inst in instantiations:
|
||||
# Ensure the first character of the type is capitalized
|
||||
name = inst.instantiated_name()
|
||||
# Using `capitalize` on the complete name causes other caps to be lower case
|
||||
instantiated_names.append(name.replace(name[0], name[0].capitalize()))
|
||||
|
||||
return "{}{}".format(original_name, "".join(instantiated_names))
|
||||
|
||||
|
||||
class InstantiatedGlobalFunction(parser.GlobalFunction):
|
||||
"""
|
||||
Instantiate global functions.
|
||||
|
||||
E.g.
|
||||
template<T = {double}>
|
||||
T add(const T& x, const T& y);
|
||||
"""
|
||||
def __init__(self, original, instantiations=(), new_name=''):
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
self.template = ''
|
||||
self.parent = original.parent
|
||||
|
||||
if not original.template:
|
||||
self.name = original.name
|
||||
self.return_type = original.return_type
|
||||
self.args = original.args
|
||||
else:
|
||||
self.name = instantiate_name(
|
||||
original.name, instantiations) if not new_name else new_name
|
||||
self.return_type = instantiate_return_type(
|
||||
original.return_type,
|
||||
self.original.template.typenames,
|
||||
self.instantiations,
|
||||
# Keyword type name `This` should already be replaced in the
|
||||
# previous class template instantiation round.
|
||||
cpp_typename='',
|
||||
)
|
||||
instantiated_args = instantiate_args_list(
|
||||
original.args.list(),
|
||||
self.original.template.typenames,
|
||||
self.instantiations,
|
||||
# Keyword type name `This` should already be replaced in the
|
||||
# previous class template instantiation round.
|
||||
cpp_typename='',
|
||||
)
|
||||
self.args = parser.ArgumentList(instantiated_args)
|
||||
|
||||
super().__init__(self.name,
|
||||
self.return_type,
|
||||
self.args,
|
||||
self.template,
|
||||
parent=self.parent)
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
if self.original.template:
|
||||
instantiated_names = [
|
||||
inst.instantiated_name() for inst in self.instantiations
|
||||
]
|
||||
ret = "{}<{}>".format(self.original.name,
|
||||
",".join(instantiated_names))
|
||||
else:
|
||||
ret = self.original.name
|
||||
return ret
|
||||
|
||||
def __repr__(self):
|
||||
return "Instantiated {}".format(
|
||||
super(InstantiatedGlobalFunction, self).__repr__())
|
||||
|
||||
|
||||
class InstantiatedMethod(parser.Method):
|
||||
"""
|
||||
Instantiate method with template parameters.
|
||||
|
||||
E.g.
|
||||
class A {
|
||||
template<X, Y>
|
||||
void func(X x, Y y);
|
||||
}
|
||||
"""
|
||||
def __init__(self,
|
||||
original: parser.Method,
|
||||
instantiations: Iterable[parser.Typename] = ()):
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
self.template: Any = ''
|
||||
self.is_const = original.is_const
|
||||
self.parent = original.parent
|
||||
|
||||
# Check for typenames if templated.
|
||||
# This way, we can gracefully handle both templated and non-templated methods.
|
||||
typenames: Sequence = self.original.template.typenames if self.original.template else []
|
||||
self.name = instantiate_name(original.name, self.instantiations)
|
||||
self.return_type = instantiate_return_type(
|
||||
original.return_type,
|
||||
typenames,
|
||||
self.instantiations,
|
||||
# Keyword type name `This` should already be replaced in the
|
||||
# previous class template instantiation round.
|
||||
cpp_typename='',
|
||||
)
|
||||
|
||||
instantiated_args = instantiate_args_list(
|
||||
original.args.list(),
|
||||
typenames,
|
||||
self.instantiations,
|
||||
# Keyword type name `This` should already be replaced in the
|
||||
# previous class template instantiation round.
|
||||
cpp_typename='',
|
||||
)
|
||||
self.args = parser.ArgumentList(instantiated_args)
|
||||
|
||||
super().__init__(self.template,
|
||||
self.name,
|
||||
self.return_type,
|
||||
self.args,
|
||||
self.is_const,
|
||||
parent=self.parent)
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
if self.original.template:
|
||||
# to_cpp will handle all the namespacing and templating
|
||||
instantiation_list = [x.to_cpp() for x in self.instantiations]
|
||||
# now can simply combine the instantiations, separated by commas
|
||||
ret = "{}<{}>".format(self.original.name,
|
||||
",".join(instantiation_list))
|
||||
else:
|
||||
ret = self.original.name
|
||||
return ret
|
||||
|
||||
def __repr__(self):
|
||||
return "Instantiated {}".format(
|
||||
super(InstantiatedMethod, self).__repr__())
|
||||
|
||||
|
||||
class InstantiatedClass(parser.Class):
|
||||
"""
|
||||
Instantiate the class defined in the interface file.
|
||||
"""
|
||||
def __init__(self, original: parser.Class, instantiations=(), new_name=''):
|
||||
"""
|
||||
Template <T, U>
|
||||
Instantiations: [T1, U1]
|
||||
"""
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
|
||||
self.template = None
|
||||
self.is_virtual = original.is_virtual
|
||||
self.parent_class = original.parent_class
|
||||
self.parent = original.parent
|
||||
|
||||
# If the class is templated, check if the number of provided instantiations
|
||||
# match the number of templates, else it's only a partial instantiation which is bad.
|
||||
if original.template:
|
||||
assert len(original.template.typenames) == len(
|
||||
instantiations), "Typenames and instantiations mismatch!"
|
||||
|
||||
# Get the instantiated name of the class. E.g. FuncDouble
|
||||
self.name = instantiate_name(
|
||||
original.name, instantiations) if not new_name else new_name
|
||||
|
||||
# Check for typenames if templated.
|
||||
# By passing in typenames, we can gracefully handle both templated and non-templated classes
|
||||
# This will allow the `This` keyword to be used in both templated and non-templated classes.
|
||||
typenames = self.original.template.typenames if self.original.template else []
|
||||
|
||||
# Instantiate the constructors, static methods, properties, respectively.
|
||||
self.ctors = self.instantiate_ctors(typenames)
|
||||
self.static_methods = self.instantiate_static_methods(typenames)
|
||||
self.properties = self.instantiate_properties(typenames)
|
||||
|
||||
# Instantiate all operator overloads
|
||||
self.operators = self.instantiate_operators(typenames)
|
||||
|
||||
# Set enums
|
||||
self.enums = original.enums
|
||||
|
||||
# Instantiate all instance methods
|
||||
instantiated_methods = \
|
||||
self.instantiate_class_templates_in_methods(typenames)
|
||||
|
||||
# Second instantiation round to instantiate templated methods.
|
||||
# This is done in case both the class and the method are templated.
|
||||
self.methods = []
|
||||
for method in instantiated_methods:
|
||||
if not method.template:
|
||||
self.methods.append(InstantiatedMethod(method, ()))
|
||||
else:
|
||||
instantiations = []
|
||||
# Get all combinations of template parameters
|
||||
for instantiations in itertools.product(
|
||||
*method.template.instantiations):
|
||||
self.methods.append(
|
||||
InstantiatedMethod(method, instantiations))
|
||||
|
||||
super().__init__(
|
||||
self.template,
|
||||
self.is_virtual,
|
||||
self.name,
|
||||
[self.parent_class],
|
||||
self.ctors,
|
||||
self.methods,
|
||||
self.static_methods,
|
||||
self.properties,
|
||||
self.operators,
|
||||
self.enums,
|
||||
parent=self.parent,
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return "{virtual}Class {cpp_class} : {parent_class}\n"\
|
||||
"{ctors}\n{static_methods}\n{methods}\n{operators}".format(
|
||||
virtual="virtual " if self.is_virtual else '',
|
||||
cpp_class=self.to_cpp(),
|
||||
parent_class=self.parent,
|
||||
ctors="\n".join([repr(ctor) for ctor in self.ctors]),
|
||||
static_methods="\n".join([repr(m)
|
||||
for m in self.static_methods]),
|
||||
methods="\n".join([repr(m) for m in self.methods]),
|
||||
operators="\n".join([repr(op) for op in self.operators])
|
||||
)
|
||||
|
||||
def instantiate_ctors(self, typenames):
|
||||
"""
|
||||
Instantiate the class constructors.
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of constructors instantiated with provided template args.
|
||||
"""
|
||||
instantiated_ctors = []
|
||||
|
||||
def instantiate(instantiated_ctors, ctor, typenames, instantiations):
|
||||
instantiated_args = instantiate_args_list(
|
||||
ctor.args.list(),
|
||||
typenames,
|
||||
instantiations,
|
||||
self.cpp_typename(),
|
||||
)
|
||||
instantiated_ctors.append(
|
||||
parser.Constructor(
|
||||
name=self.name,
|
||||
args=parser.ArgumentList(instantiated_args),
|
||||
template=self.original.template,
|
||||
parent=self,
|
||||
))
|
||||
return instantiated_ctors
|
||||
|
||||
for ctor in self.original.ctors:
|
||||
# Add constructor templates to the typenames and instantiations
|
||||
if isinstance(ctor.template, parser.template.Template):
|
||||
typenames.extend(ctor.template.typenames)
|
||||
|
||||
# Get all combinations of template args
|
||||
for instantiations in itertools.product(
|
||||
*ctor.template.instantiations):
|
||||
instantiations = self.instantiations + list(instantiations)
|
||||
|
||||
instantiated_ctors = instantiate(
|
||||
instantiated_ctors,
|
||||
ctor,
|
||||
typenames=typenames,
|
||||
instantiations=instantiations)
|
||||
|
||||
else:
|
||||
# If no constructor level templates, just use the class templates
|
||||
instantiated_ctors = instantiate(
|
||||
instantiated_ctors,
|
||||
ctor,
|
||||
typenames=typenames,
|
||||
instantiations=self.instantiations)
|
||||
|
||||
return instantiated_ctors
|
||||
|
||||
def instantiate_static_methods(self, typenames):
|
||||
"""
|
||||
Instantiate static methods in the class.
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of static methods instantiated with provided template args.
|
||||
"""
|
||||
instantiated_static_methods = []
|
||||
for static_method in self.original.static_methods:
|
||||
instantiated_args = instantiate_args_list(
|
||||
static_method.args.list(), typenames, self.instantiations,
|
||||
self.cpp_typename())
|
||||
instantiated_static_methods.append(
|
||||
parser.StaticMethod(
|
||||
name=static_method.name,
|
||||
return_type=instantiate_return_type(
|
||||
static_method.return_type,
|
||||
typenames,
|
||||
self.instantiations,
|
||||
self.cpp_typename(),
|
||||
instantiated_class=self),
|
||||
args=parser.ArgumentList(instantiated_args),
|
||||
parent=self,
|
||||
))
|
||||
return instantiated_static_methods
|
||||
|
||||
def instantiate_class_templates_in_methods(self, typenames):
|
||||
"""
|
||||
This function only instantiates the class-level templates in the methods.
|
||||
Template methods are instantiated in InstantiatedMethod in the second
|
||||
round.
|
||||
|
||||
E.g.
|
||||
```
|
||||
template<T={string}>
|
||||
class Greeter{
|
||||
void sayHello(T& name);
|
||||
};
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of methods instantiated with provided template args on the class.
|
||||
"""
|
||||
class_instantiated_methods = []
|
||||
for method in self.original.methods:
|
||||
instantiated_args = instantiate_args_list(
|
||||
method.args.list(),
|
||||
typenames,
|
||||
self.instantiations,
|
||||
self.cpp_typename(),
|
||||
)
|
||||
class_instantiated_methods.append(
|
||||
parser.Method(
|
||||
template=method.template,
|
||||
name=method.name,
|
||||
return_type=instantiate_return_type(
|
||||
method.return_type,
|
||||
typenames,
|
||||
self.instantiations,
|
||||
self.cpp_typename(),
|
||||
),
|
||||
args=parser.ArgumentList(instantiated_args),
|
||||
is_const=method.is_const,
|
||||
parent=self,
|
||||
))
|
||||
return class_instantiated_methods
|
||||
|
||||
def instantiate_operators(self, typenames):
|
||||
"""
|
||||
Instantiate the class-level template in the operator overload.
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of methods instantiated with provided template args on the class.
|
||||
"""
|
||||
instantiated_operators = []
|
||||
for operator in self.original.operators:
|
||||
instantiated_args = instantiate_args_list(
|
||||
operator.args.list(),
|
||||
typenames,
|
||||
self.instantiations,
|
||||
self.cpp_typename(),
|
||||
)
|
||||
instantiated_operators.append(
|
||||
parser.Operator(
|
||||
name=operator.name,
|
||||
operator=operator.operator,
|
||||
return_type=instantiate_return_type(
|
||||
operator.return_type,
|
||||
typenames,
|
||||
self.instantiations,
|
||||
self.cpp_typename(),
|
||||
),
|
||||
args=parser.ArgumentList(instantiated_args),
|
||||
is_const=operator.is_const,
|
||||
parent=self,
|
||||
))
|
||||
return instantiated_operators
|
||||
|
||||
def instantiate_properties(self, typenames):
|
||||
"""
|
||||
Instantiate the class properties.
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of properties instantiated with provided template args.
|
||||
"""
|
||||
instantiated_properties = instantiate_args_list(
|
||||
self.original.properties,
|
||||
typenames,
|
||||
self.instantiations,
|
||||
self.cpp_typename(),
|
||||
)
|
||||
return instantiated_properties
|
||||
|
||||
def cpp_typename(self):
|
||||
"""
|
||||
Return a parser.Typename including namespaces and cpp name of this
|
||||
class.
|
||||
"""
|
||||
if self.original.template:
|
||||
name = "{}<{}>".format(
|
||||
self.original.name,
|
||||
", ".join([inst.to_cpp() for inst in self.instantiations]))
|
||||
else:
|
||||
name = self.original.name
|
||||
namespaces_name = self.namespaces()
|
||||
namespaces_name.append(name)
|
||||
return parser.Typename(namespaces_name)
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
return self.cpp_typename().to_cpp()
|
||||
|
||||
|
||||
class InstantiatedDeclaration(parser.ForwardDeclaration):
|
||||
"""
|
||||
Instantiate typedefs of forward declarations.
|
||||
This is useful when we wish to typedef a templated class
|
||||
which is not defined in the current project.
|
||||
|
||||
E.g.
|
||||
class FactorFromAnotherMother;
|
||||
|
||||
typedef FactorFromAnotherMother<gtsam::Pose3> FactorWeCanUse;
|
||||
"""
|
||||
def __init__(self, original, instantiations=(), new_name=''):
|
||||
super().__init__(original.typename,
|
||||
original.parent_type,
|
||||
original.is_virtual,
|
||||
parent=original.parent)
|
||||
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
self.parent = original.parent
|
||||
|
||||
self.name = instantiate_name(
|
||||
original.name, instantiations) if not new_name else new_name
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
instantiated_names = [
|
||||
inst.qualified_name() for inst in self.instantiations
|
||||
]
|
||||
name = "{}<{}>".format(self.original.name,
|
||||
",".join(instantiated_names))
|
||||
namespaces_name = self.namespaces()
|
||||
namespaces_name.append(name)
|
||||
# Leverage Typename to generate the fully qualified C++ name
|
||||
return parser.Typename(namespaces_name).to_cpp()
|
||||
|
||||
def __repr__(self):
|
||||
return "Instantiated {}".format(
|
||||
super(InstantiatedDeclaration, self).__repr__())
|
||||
|
||||
|
||||
def instantiate_namespace(namespace):
|
||||
"""
|
||||
Instantiate the classes and other elements in the `namespace` content and
|
||||
assign it back to the namespace content attribute.
|
||||
|
||||
@param[in/out] namespace The namespace whose content will be replaced with
|
||||
the instantiated content.
|
||||
"""
|
||||
instantiated_content = []
|
||||
typedef_content = []
|
||||
|
||||
for element in namespace.content:
|
||||
if isinstance(element, parser.Class):
|
||||
original_class = element
|
||||
if not original_class.template:
|
||||
instantiated_content.append(
|
||||
InstantiatedClass(original_class, []))
|
||||
else:
|
||||
# This case is for when the templates have enumerated instantiations.
|
||||
|
||||
# Use itertools to get all possible combinations of instantiations
|
||||
# Works even if one template does not have an instantiation list
|
||||
for instantiations in itertools.product(
|
||||
*original_class.template.instantiations):
|
||||
instantiated_content.append(
|
||||
InstantiatedClass(original_class,
|
||||
list(instantiations)))
|
||||
|
||||
elif isinstance(element, parser.GlobalFunction):
|
||||
original_func = element
|
||||
if not original_func.template:
|
||||
instantiated_content.append(
|
||||
InstantiatedGlobalFunction(original_func, []))
|
||||
else:
|
||||
# Use itertools to get all possible combinations of instantiations
|
||||
# Works even if one template does not have an instantiation list
|
||||
for instantiations in itertools.product(
|
||||
*original_func.template.instantiations):
|
||||
instantiated_content.append(
|
||||
InstantiatedGlobalFunction(original_func,
|
||||
list(instantiations)))
|
||||
|
||||
elif isinstance(element, parser.TypedefTemplateInstantiation):
|
||||
# This is for the case where `typedef` statements are used
|
||||
# to specify the template parameters.
|
||||
typedef_inst = element
|
||||
top_level = namespace.top_level()
|
||||
original_element = top_level.find_class_or_function(
|
||||
typedef_inst.typename)
|
||||
|
||||
# Check if element is a typedef'd class, function or
|
||||
# forward declaration from another project.
|
||||
if isinstance(original_element, parser.Class):
|
||||
typedef_content.append(
|
||||
InstantiatedClass(original_element,
|
||||
typedef_inst.typename.instantiations,
|
||||
typedef_inst.new_name))
|
||||
elif isinstance(original_element, parser.GlobalFunction):
|
||||
typedef_content.append(
|
||||
InstantiatedGlobalFunction(
|
||||
original_element, typedef_inst.typename.instantiations,
|
||||
typedef_inst.new_name))
|
||||
elif isinstance(original_element, parser.ForwardDeclaration):
|
||||
typedef_content.append(
|
||||
InstantiatedDeclaration(
|
||||
original_element, typedef_inst.typename.instantiations,
|
||||
typedef_inst.new_name))
|
||||
|
||||
elif isinstance(element, parser.Namespace):
|
||||
element = instantiate_namespace(element)
|
||||
instantiated_content.append(element)
|
||||
else:
|
||||
instantiated_content.append(element)
|
||||
|
||||
instantiated_content.extend(typedef_content)
|
||||
namespace.content = instantiated_content
|
||||
|
||||
return namespace
|
|
@ -0,0 +1,14 @@
|
|||
"""Code to help instantiate templated classes, methods and functions."""
|
||||
|
||||
# pylint: disable=too-many-arguments, too-many-instance-attributes, no-self-use, no-else-return, too-many-arguments, unused-format-string-argument, unused-variable. unused-argument, too-many-branches
|
||||
|
||||
from typing import Iterable, Sequence, Union
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
from gtwrap.template_instantiator.classes import *
|
||||
from gtwrap.template_instantiator.constructor import *
|
||||
from gtwrap.template_instantiator.declaration import *
|
||||
from gtwrap.template_instantiator.function import *
|
||||
from gtwrap.template_instantiator.helpers import *
|
||||
from gtwrap.template_instantiator.method import *
|
||||
from gtwrap.template_instantiator.namespace import *
|
|
@ -0,0 +1,206 @@
|
|||
"""Instantiate a class and its members."""
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
from gtwrap.template_instantiator.constructor import InstantiatedConstructor
|
||||
from gtwrap.template_instantiator.helpers import (InstantiationHelper,
|
||||
instantiate_args_list,
|
||||
instantiate_name,
|
||||
instantiate_return_type)
|
||||
from gtwrap.template_instantiator.method import (InstantiatedMethod,
|
||||
InstantiatedStaticMethod)
|
||||
|
||||
|
||||
class InstantiatedClass(parser.Class):
|
||||
"""
|
||||
Instantiate the class defined in the interface file.
|
||||
"""
|
||||
def __init__(self, original: parser.Class, instantiations=(), new_name=''):
|
||||
"""
|
||||
Template <T, U>
|
||||
Instantiations: [T1, U1]
|
||||
"""
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
|
||||
self.template = None
|
||||
self.is_virtual = original.is_virtual
|
||||
self.parent_class = original.parent_class
|
||||
self.parent = original.parent
|
||||
|
||||
# If the class is templated, check if the number of provided instantiations
|
||||
# match the number of templates, else it's only a partial instantiation which is bad.
|
||||
if original.template:
|
||||
assert len(original.template.typenames) == len(
|
||||
instantiations), "Typenames and instantiations mismatch!"
|
||||
|
||||
# Get the instantiated name of the class. E.g. FuncDouble
|
||||
self.name = instantiate_name(
|
||||
original.name, instantiations) if not new_name else new_name
|
||||
|
||||
# Check for typenames if templated.
|
||||
# By passing in typenames, we can gracefully handle both templated and non-templated classes
|
||||
# This will allow the `This` keyword to be used in both templated and non-templated classes.
|
||||
typenames = self.original.template.typenames if self.original.template else []
|
||||
|
||||
# Instantiate the constructors, static methods, properties, respectively.
|
||||
self.ctors = self.instantiate_ctors(typenames)
|
||||
self.static_methods = self.instantiate_static_methods(typenames)
|
||||
self.properties = self.instantiate_properties(typenames)
|
||||
|
||||
# Instantiate all operator overloads
|
||||
self.operators = self.instantiate_operators(typenames)
|
||||
|
||||
# Set enums
|
||||
self.enums = original.enums
|
||||
|
||||
# Instantiate all instance methods
|
||||
self.methods = self.instantiate_methods(typenames)
|
||||
|
||||
super().__init__(
|
||||
self.template,
|
||||
self.is_virtual,
|
||||
self.name,
|
||||
[self.parent_class],
|
||||
self.ctors,
|
||||
self.methods,
|
||||
self.static_methods,
|
||||
self.properties,
|
||||
self.operators,
|
||||
self.enums,
|
||||
parent=self.parent,
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return "{virtual}Class {cpp_class} : {parent_class}\n"\
|
||||
"{ctors}\n{static_methods}\n{methods}\n{operators}".format(
|
||||
virtual="virtual " if self.is_virtual else '',
|
||||
cpp_class=self.to_cpp(),
|
||||
parent_class=self.parent,
|
||||
ctors="\n".join([repr(ctor) for ctor in self.ctors]),
|
||||
static_methods="\n".join([repr(m)
|
||||
for m in self.static_methods]),
|
||||
methods="\n".join([repr(m) for m in self.methods]),
|
||||
operators="\n".join([repr(op) for op in self.operators])
|
||||
)
|
||||
|
||||
def instantiate_ctors(self, typenames):
|
||||
"""
|
||||
Instantiate the class constructors.
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of constructors instantiated with provided template args.
|
||||
"""
|
||||
|
||||
helper = InstantiationHelper(
|
||||
instantiation_type=InstantiatedConstructor)
|
||||
|
||||
instantiated_ctors = helper.multilevel_instantiation(
|
||||
self.original.ctors, typenames, self)
|
||||
|
||||
return instantiated_ctors
|
||||
|
||||
def instantiate_static_methods(self, typenames):
|
||||
"""
|
||||
Instantiate static methods in the class.
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of static methods instantiated with provided template args.
|
||||
"""
|
||||
helper = InstantiationHelper(
|
||||
instantiation_type=InstantiatedStaticMethod)
|
||||
|
||||
instantiated_static_methods = helper.multilevel_instantiation(
|
||||
self.original.static_methods, typenames, self)
|
||||
|
||||
return instantiated_static_methods
|
||||
|
||||
def instantiate_methods(self, typenames):
|
||||
"""
|
||||
Instantiate regular methods in the class.
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of methods instantiated with provided template args.
|
||||
"""
|
||||
instantiated_methods = []
|
||||
|
||||
helper = InstantiationHelper(instantiation_type=InstantiatedMethod)
|
||||
|
||||
instantiated_methods = helper.multilevel_instantiation(
|
||||
self.original.methods, typenames, self)
|
||||
|
||||
return instantiated_methods
|
||||
|
||||
def instantiate_operators(self, typenames):
|
||||
"""
|
||||
Instantiate the class-level template in the operator overload.
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of methods instantiated with provided template args on the class.
|
||||
"""
|
||||
instantiated_operators = []
|
||||
for operator in self.original.operators:
|
||||
instantiated_args = instantiate_args_list(
|
||||
operator.args.list(),
|
||||
typenames,
|
||||
self.instantiations,
|
||||
self.cpp_typename(),
|
||||
)
|
||||
instantiated_operators.append(
|
||||
parser.Operator(
|
||||
name=operator.name,
|
||||
operator=operator.operator,
|
||||
return_type=instantiate_return_type(
|
||||
operator.return_type,
|
||||
typenames,
|
||||
self.instantiations,
|
||||
self.cpp_typename(),
|
||||
),
|
||||
args=parser.ArgumentList(instantiated_args),
|
||||
is_const=operator.is_const,
|
||||
parent=self,
|
||||
))
|
||||
return instantiated_operators
|
||||
|
||||
def instantiate_properties(self, typenames):
|
||||
"""
|
||||
Instantiate the class properties.
|
||||
|
||||
Args:
|
||||
typenames: List of template types to instantiate.
|
||||
|
||||
Return: List of properties instantiated with provided template args.
|
||||
"""
|
||||
instantiated_properties = instantiate_args_list(
|
||||
self.original.properties,
|
||||
typenames,
|
||||
self.instantiations,
|
||||
self.cpp_typename(),
|
||||
)
|
||||
return instantiated_properties
|
||||
|
||||
def cpp_typename(self):
|
||||
"""
|
||||
Return a parser.Typename including namespaces and cpp name of this
|
||||
class.
|
||||
"""
|
||||
if self.original.template:
|
||||
name = "{}<{}>".format(
|
||||
self.original.name,
|
||||
", ".join([inst.to_cpp() for inst in self.instantiations]))
|
||||
else:
|
||||
name = self.original.name
|
||||
namespaces_name = self.namespaces()
|
||||
namespaces_name.append(name)
|
||||
return parser.Typename(namespaces_name)
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
return self.cpp_typename().to_cpp()
|
|
@ -0,0 +1,64 @@
|
|||
"""Class constructor instantiator."""
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
|
||||
from typing import Iterable, List
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
|
||||
|
||||
class InstantiatedConstructor(parser.Constructor):
|
||||
"""
|
||||
Instantiate constructor with template parameters.
|
||||
|
||||
E.g.
|
||||
class A {
|
||||
template<X, Y>
|
||||
A(X x, Y y);
|
||||
}
|
||||
"""
|
||||
def __init__(self,
|
||||
original: parser.Constructor,
|
||||
instantiations: Iterable[parser.Typename] = ()):
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
self.name = original.name
|
||||
self.args = original.args
|
||||
self.template = original.template
|
||||
self.parent = original.parent
|
||||
|
||||
super().__init__(self.name,
|
||||
self.args,
|
||||
self.template,
|
||||
parent=self.parent)
|
||||
|
||||
@classmethod
|
||||
def construct(cls, original: parser.Constructor, typenames: List[str],
|
||||
class_instantiations: List[parser.Typename],
|
||||
method_instantiations: List[parser.Typename],
|
||||
instantiated_args: List[parser.Argument],
|
||||
parent: 'InstantiatedClass'):
|
||||
"""Class method to construct object as required by InstantiationHelper."""
|
||||
method = parser.Constructor(
|
||||
name=parent.name,
|
||||
args=parser.ArgumentList(instantiated_args),
|
||||
template=original.template,
|
||||
parent=parent,
|
||||
)
|
||||
return InstantiatedConstructor(method,
|
||||
instantiations=method_instantiations)
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
if self.original.template:
|
||||
# to_cpp will handle all the namespacing and templating
|
||||
instantiation_list = [x.to_cpp() for x in self.instantiations]
|
||||
# now can simply combine the instantiations, separated by commas
|
||||
ret = "{}<{}>".format(self.original.name,
|
||||
",".join(instantiation_list))
|
||||
else:
|
||||
ret = self.original.name
|
||||
return ret
|
||||
|
||||
def __repr__(self):
|
||||
return "Instantiated {}".format(super().__repr__())
|
|
@ -0,0 +1,45 @@
|
|||
"""Instantiate a forward declaration."""
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
from gtwrap.template_instantiator.helpers import instantiate_name
|
||||
|
||||
|
||||
class InstantiatedDeclaration(parser.ForwardDeclaration):
|
||||
"""
|
||||
Instantiate typedefs of forward declarations.
|
||||
This is useful when we wish to typedef a templated class
|
||||
which is not defined in the current project.
|
||||
|
||||
E.g.
|
||||
class FactorFromAnotherMother;
|
||||
|
||||
typedef FactorFromAnotherMother<gtsam::Pose3> FactorWeCanUse;
|
||||
"""
|
||||
def __init__(self, original, instantiations=(), new_name=''):
|
||||
super().__init__(original.typename,
|
||||
original.parent_type,
|
||||
original.is_virtual,
|
||||
parent=original.parent)
|
||||
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
self.parent = original.parent
|
||||
|
||||
self.name = instantiate_name(
|
||||
original.name, instantiations) if not new_name else new_name
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
instantiated_names = [
|
||||
inst.qualified_name() for inst in self.instantiations
|
||||
]
|
||||
name = "{}<{}>".format(self.original.name,
|
||||
",".join(instantiated_names))
|
||||
namespaces_name = self.namespaces()
|
||||
namespaces_name.append(name)
|
||||
# Leverage Typename to generate the fully qualified C++ name
|
||||
return parser.Typename(namespaces_name).to_cpp()
|
||||
|
||||
def __repr__(self):
|
||||
return "Instantiated {}".format(
|
||||
super(InstantiatedDeclaration, self).__repr__())
|
|
@ -0,0 +1,68 @@
|
|||
"""Instantiate global function."""
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
from gtwrap.template_instantiator.helpers import (instantiate_args_list,
|
||||
instantiate_name,
|
||||
instantiate_return_type)
|
||||
|
||||
|
||||
class InstantiatedGlobalFunction(parser.GlobalFunction):
|
||||
"""
|
||||
Instantiate global functions.
|
||||
|
||||
E.g.
|
||||
template<T = {double}>
|
||||
T add(const T& x, const T& y);
|
||||
"""
|
||||
def __init__(self, original, instantiations=(), new_name=''):
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
self.template = ''
|
||||
self.parent = original.parent
|
||||
|
||||
if not original.template:
|
||||
self.name = original.name
|
||||
self.return_type = original.return_type
|
||||
self.args = original.args
|
||||
else:
|
||||
self.name = instantiate_name(
|
||||
original.name, instantiations) if not new_name else new_name
|
||||
self.return_type = instantiate_return_type(
|
||||
original.return_type,
|
||||
self.original.template.typenames,
|
||||
self.instantiations,
|
||||
# Keyword type name `This` should already be replaced in the
|
||||
# previous class template instantiation round.
|
||||
cpp_typename='',
|
||||
)
|
||||
instantiated_args = instantiate_args_list(
|
||||
original.args.list(),
|
||||
self.original.template.typenames,
|
||||
self.instantiations,
|
||||
# Keyword type name `This` should already be replaced in the
|
||||
# previous class template instantiation round.
|
||||
cpp_typename='',
|
||||
)
|
||||
self.args = parser.ArgumentList(instantiated_args)
|
||||
|
||||
super().__init__(self.name,
|
||||
self.return_type,
|
||||
self.args,
|
||||
self.template,
|
||||
parent=self.parent)
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
if self.original.template:
|
||||
instantiated_names = [
|
||||
inst.instantiated_name() for inst in self.instantiations
|
||||
]
|
||||
ret = "{}<{}>".format(self.original.name,
|
||||
",".join(instantiated_names))
|
||||
else:
|
||||
ret = self.original.name
|
||||
return ret
|
||||
|
||||
def __repr__(self):
|
||||
return "Instantiated {}".format(
|
||||
super(InstantiatedGlobalFunction, self).__repr__())
|
|
@ -0,0 +1,293 @@
|
|||
"""Various helpers for instantiation."""
|
||||
|
||||
import itertools
|
||||
from copy import deepcopy
|
||||
from typing import List, Sequence, Union
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
|
||||
ClassMembers = Union[parser.Constructor, parser.Method, parser.StaticMethod,
|
||||
parser.GlobalFunction, parser.Operator, parser.Variable,
|
||||
parser.Enum]
|
||||
InstantiatedMembers = Union['InstantiatedConstructor', 'InstantiatedMethod',
|
||||
'InstantiatedStaticMethod',
|
||||
'InstantiatedGlobalFunction']
|
||||
|
||||
|
||||
def is_scoped_template(template_typenames: Sequence[str],
|
||||
str_arg_typename: str):
|
||||
"""
|
||||
Check if the template given by `str_arg_typename` is a scoped template e.g. T::Value,
|
||||
and if so, return what template from `template_typenames` and
|
||||
the corresponding index matches the scoped template correctly.
|
||||
"""
|
||||
for idx, template in enumerate(template_typenames):
|
||||
if "::" in str_arg_typename and \
|
||||
template in str_arg_typename.split("::"):
|
||||
return template, idx
|
||||
return False, -1
|
||||
|
||||
|
||||
def instantiate_type(
|
||||
ctype: parser.Type,
|
||||
template_typenames: Sequence[str],
|
||||
instantiations: Sequence[parser.Typename],
|
||||
cpp_typename: parser.Typename,
|
||||
instantiated_class: 'InstantiatedClass' = None) -> parser.Type:
|
||||
"""
|
||||
Instantiate template typename for `ctype`.
|
||||
|
||||
Args:
|
||||
ctype: The original argument type.
|
||||
template_typenames: List of strings representing the templates.
|
||||
instantiations: List of the instantiations of the templates in `template_typenames`.
|
||||
cpp_typename: Full-namespace cpp class name of this instantiation
|
||||
to replace for arguments of type named `This`.
|
||||
instiated_class: The instantiated class which, if provided,
|
||||
will be used for instantiating `This`.
|
||||
|
||||
Returns:
|
||||
If `ctype`'s name is in the `template_typenames`, return the
|
||||
corresponding type to replace in `instantiations`.
|
||||
If ctype name is `This`, return the new typename `cpp_typename`.
|
||||
Otherwise, return the original ctype.
|
||||
"""
|
||||
# make a deep copy so that there is no overwriting of original template params
|
||||
ctype = deepcopy(ctype)
|
||||
|
||||
# Check if the return type has template parameters
|
||||
if ctype.typename.instantiations:
|
||||
for idx, instantiation in enumerate(ctype.typename.instantiations):
|
||||
if instantiation.name in template_typenames:
|
||||
template_idx = template_typenames.index(instantiation.name)
|
||||
ctype.typename.instantiations[
|
||||
idx] = instantiations[ # type: ignore
|
||||
template_idx]
|
||||
|
||||
return ctype
|
||||
|
||||
str_arg_typename = str(ctype.typename)
|
||||
|
||||
# Check if template is a scoped template e.g. T::Value where T is the template
|
||||
scoped_template, scoped_idx = is_scoped_template(template_typenames,
|
||||
str_arg_typename)
|
||||
|
||||
# Instantiate templates which have enumerated instantiations in the template.
|
||||
# E.g. `template<T={double}>`.
|
||||
|
||||
# Instantiate scoped templates, e.g. T::Value.
|
||||
if scoped_template:
|
||||
# Create a copy of the instantiation so we can modify it.
|
||||
instantiation = deepcopy(instantiations[scoped_idx])
|
||||
# Replace the part of the template with the instantiation
|
||||
instantiation.name = str_arg_typename.replace(scoped_template,
|
||||
instantiation.name)
|
||||
return parser.Type(
|
||||
typename=instantiation,
|
||||
is_const=ctype.is_const,
|
||||
is_shared_ptr=ctype.is_shared_ptr,
|
||||
is_ptr=ctype.is_ptr,
|
||||
is_ref=ctype.is_ref,
|
||||
is_basic=ctype.is_basic,
|
||||
)
|
||||
# Check for exact template match.
|
||||
elif str_arg_typename in template_typenames:
|
||||
idx = template_typenames.index(str_arg_typename)
|
||||
return parser.Type(
|
||||
typename=instantiations[idx],
|
||||
is_const=ctype.is_const,
|
||||
is_shared_ptr=ctype.is_shared_ptr,
|
||||
is_ptr=ctype.is_ptr,
|
||||
is_ref=ctype.is_ref,
|
||||
is_basic=ctype.is_basic,
|
||||
)
|
||||
|
||||
# If a method has the keyword `This`, we replace it with the (instantiated) class.
|
||||
elif str_arg_typename == 'This':
|
||||
# Check if the class is template instantiated
|
||||
# so we can replace it with the instantiated version.
|
||||
if instantiated_class:
|
||||
name = instantiated_class.original.name
|
||||
namespaces_name = instantiated_class.namespaces()
|
||||
namespaces_name.append(name)
|
||||
cpp_typename = parser.Typename(
|
||||
namespaces_name,
|
||||
instantiations=instantiated_class.instantiations)
|
||||
|
||||
return parser.Type(
|
||||
typename=cpp_typename,
|
||||
is_const=ctype.is_const,
|
||||
is_shared_ptr=ctype.is_shared_ptr,
|
||||
is_ptr=ctype.is_ptr,
|
||||
is_ref=ctype.is_ref,
|
||||
is_basic=ctype.is_basic,
|
||||
)
|
||||
|
||||
# Case when 'This' is present in the type namespace, e.g `This::Subclass`.
|
||||
elif 'This' in str_arg_typename:
|
||||
# Simply get the index of `This` in the namespace and replace it with the instantiated name.
|
||||
namespace_idx = ctype.typename.namespaces.index('This')
|
||||
ctype.typename.namespaces[namespace_idx] = cpp_typename.name
|
||||
return ctype
|
||||
|
||||
else:
|
||||
return ctype
|
||||
|
||||
|
||||
def instantiate_args_list(
|
||||
args_list: Sequence[parser.Argument],
|
||||
template_typenames: Sequence[parser.template.Typename],
|
||||
instantiations: Sequence, cpp_typename: parser.Typename):
|
||||
"""
|
||||
Instantiate template typenames in an argument list.
|
||||
Type with name `This` will be replaced by @p `cpp_typename`.
|
||||
|
||||
@param[in] args_list A list of `parser.Argument` to instantiate.
|
||||
@param[in] template_typenames List of template typenames to instantiate,
|
||||
e.g. ['T', 'U', 'V'].
|
||||
@param[in] instantiations List of specific types to instantiate, each
|
||||
associated with each template typename. Each type is a parser.Typename,
|
||||
including its name and full namespaces.
|
||||
@param[in] cpp_typename Full-namespace cpp class name of this instantiation
|
||||
to replace for arguments of type named `This`.
|
||||
@return A new list of parser.Argument which types are replaced with their
|
||||
instantiations.
|
||||
"""
|
||||
instantiated_args = []
|
||||
for arg in args_list:
|
||||
new_type = instantiate_type(arg.ctype, template_typenames,
|
||||
instantiations, cpp_typename)
|
||||
instantiated_args.append(
|
||||
parser.Argument(name=arg.name, ctype=new_type,
|
||||
default=arg.default))
|
||||
return instantiated_args
|
||||
|
||||
|
||||
def instantiate_return_type(
|
||||
return_type: parser.ReturnType,
|
||||
template_typenames: Sequence[parser.template.Typename],
|
||||
instantiations: Sequence[parser.Typename],
|
||||
cpp_typename: parser.Typename,
|
||||
instantiated_class: 'InstantiatedClass' = None):
|
||||
"""Instantiate the return type."""
|
||||
new_type1 = instantiate_type(return_type.type1,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename,
|
||||
instantiated_class=instantiated_class)
|
||||
if return_type.type2:
|
||||
new_type2 = instantiate_type(return_type.type2,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename,
|
||||
instantiated_class=instantiated_class)
|
||||
else:
|
||||
new_type2 = ''
|
||||
return parser.ReturnType(new_type1, new_type2)
|
||||
|
||||
|
||||
def instantiate_name(original_name: str,
|
||||
instantiations: Sequence[parser.Typename]):
|
||||
"""
|
||||
Concatenate instantiated types with `original_name` to form a new
|
||||
instantiated name.
|
||||
|
||||
NOTE: To avoid conflicts, we should include the instantiation's
|
||||
namespaces, but that is too verbose.
|
||||
"""
|
||||
instantiated_names = []
|
||||
for inst in instantiations:
|
||||
# Ensure the first character of the type is capitalized
|
||||
name = inst.instantiated_name()
|
||||
# Using `capitalize` on the complete name causes other caps to be lower case
|
||||
instantiated_names.append(name.replace(name[0], name[0].capitalize()))
|
||||
|
||||
return "{}{}".format(original_name, "".join(instantiated_names))
|
||||
|
||||
|
||||
class InstantiationHelper:
|
||||
"""
|
||||
Helper class for instantiation templates.
|
||||
Requires that `instantiation_type` defines a class method called
|
||||
`construct` to generate the appropriate object type.
|
||||
|
||||
Signature for `construct` should be
|
||||
```
|
||||
construct(method,
|
||||
typenames,
|
||||
class_instantiations,
|
||||
method_instantiations,
|
||||
instantiated_args,
|
||||
parent=parent)
|
||||
```
|
||||
"""
|
||||
def __init__(self, instantiation_type: InstantiatedMembers):
|
||||
self.instantiation_type = instantiation_type
|
||||
|
||||
def instantiate(self, instantiated_methods: List[InstantiatedMembers],
|
||||
method: ClassMembers, typenames: Sequence[str],
|
||||
class_instantiations: Sequence[parser.Typename],
|
||||
method_instantiations: Sequence[parser.Typename],
|
||||
parent: 'InstantiatedClass'):
|
||||
"""
|
||||
Instantiate both the class and method level templates.
|
||||
"""
|
||||
instantiations = class_instantiations + method_instantiations
|
||||
|
||||
instantiated_args = instantiate_args_list(method.args.list(),
|
||||
typenames, instantiations,
|
||||
parent.cpp_typename())
|
||||
|
||||
instantiated_methods.append(
|
||||
self.instantiation_type.construct(method,
|
||||
typenames,
|
||||
class_instantiations,
|
||||
method_instantiations,
|
||||
instantiated_args,
|
||||
parent=parent))
|
||||
|
||||
return instantiated_methods
|
||||
|
||||
def multilevel_instantiation(self, methods_list: Sequence[ClassMembers],
|
||||
typenames: Sequence[str],
|
||||
parent: 'InstantiatedClass'):
|
||||
"""
|
||||
Helper to instantiate methods at both the class and method level.
|
||||
|
||||
Args:
|
||||
methods_list: The list of methods in the class to instantiated.
|
||||
typenames: List of class level template parameters, e.g. ['T'].
|
||||
parent: The instantiated class to which `methods_list` belongs.
|
||||
"""
|
||||
instantiated_methods = []
|
||||
|
||||
for method in methods_list:
|
||||
# We creare a copy since we will modify the typenames list.
|
||||
method_typenames = deepcopy(typenames)
|
||||
|
||||
if isinstance(method.template, parser.template.Template):
|
||||
method_typenames.extend(method.template.typenames)
|
||||
|
||||
# Get all combinations of template args
|
||||
for instantiations in itertools.product(
|
||||
*method.template.instantiations):
|
||||
|
||||
instantiated_methods = self.instantiate(
|
||||
instantiated_methods,
|
||||
method,
|
||||
typenames=method_typenames,
|
||||
class_instantiations=parent.instantiations,
|
||||
method_instantiations=list(instantiations),
|
||||
parent=parent)
|
||||
|
||||
else:
|
||||
# If no constructor level templates, just use the class templates
|
||||
instantiated_methods = self.instantiate(
|
||||
instantiated_methods,
|
||||
method,
|
||||
typenames=method_typenames,
|
||||
class_instantiations=parent.instantiations,
|
||||
method_instantiations=[],
|
||||
parent=parent)
|
||||
|
||||
return instantiated_methods
|
|
@ -0,0 +1,124 @@
|
|||
"""Class method and static method instantiators."""
|
||||
|
||||
from typing import Iterable
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
from gtwrap.template_instantiator.helpers import (instantiate_name,
|
||||
instantiate_return_type)
|
||||
|
||||
|
||||
class InstantiatedMethod(parser.Method):
|
||||
"""
|
||||
Instantiate method with template parameters.
|
||||
|
||||
E.g.
|
||||
class A {
|
||||
template<X, Y>
|
||||
void func(X x, Y y);
|
||||
}
|
||||
"""
|
||||
def __init__(self,
|
||||
original: parser.Method,
|
||||
instantiations: Iterable[parser.Typename] = ()):
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
self.template = original.template
|
||||
self.is_const = original.is_const
|
||||
self.parent = original.parent
|
||||
|
||||
self.name = instantiate_name(original.name, self.instantiations)
|
||||
self.return_type = original.return_type
|
||||
self.args = original.args
|
||||
|
||||
super().__init__(self.template,
|
||||
self.name,
|
||||
self.return_type,
|
||||
self.args,
|
||||
self.is_const,
|
||||
parent=self.parent)
|
||||
|
||||
@classmethod
|
||||
def construct(cls, original, typenames, class_instantiations,
|
||||
method_instantiations, instantiated_args, parent):
|
||||
"""Class method to construct object as required by InstantiationHelper."""
|
||||
method = parser.Method(
|
||||
template=original.template,
|
||||
name=original.name,
|
||||
return_type=instantiate_return_type(
|
||||
original.return_type, typenames,
|
||||
class_instantiations + method_instantiations,
|
||||
parent.cpp_typename()),
|
||||
args=parser.ArgumentList(instantiated_args),
|
||||
is_const=original.is_const,
|
||||
parent=parent,
|
||||
)
|
||||
return InstantiatedMethod(method, instantiations=method_instantiations)
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
if self.original.template:
|
||||
# to_cpp will handle all the namespacing and templating
|
||||
instantiation_list = [x.to_cpp() for x in self.instantiations]
|
||||
# now can simply combine the instantiations, separated by commas
|
||||
ret = "{}<{}>".format(self.original.name,
|
||||
",".join(instantiation_list))
|
||||
else:
|
||||
ret = self.original.name
|
||||
return ret
|
||||
|
||||
def __repr__(self):
|
||||
return "Instantiated {}".format(super().__repr__())
|
||||
|
||||
|
||||
class InstantiatedStaticMethod(parser.StaticMethod):
|
||||
"""
|
||||
Instantiate static method with template parameters.
|
||||
"""
|
||||
def __init__(self,
|
||||
original: parser.StaticMethod,
|
||||
instantiations: Iterable[parser.Typename] = ()):
|
||||
self.original = original
|
||||
self.instantiations = instantiations
|
||||
|
||||
self.name = instantiate_name(original.name, self.instantiations)
|
||||
self.return_type = original.return_type
|
||||
self.args = original.args
|
||||
self.template = original.template
|
||||
self.parent = original.parent
|
||||
|
||||
super().__init__(self.name, self.return_type, self.args, self.template,
|
||||
self.parent)
|
||||
|
||||
@classmethod
|
||||
def construct(cls, original, typenames, class_instantiations,
|
||||
method_instantiations, instantiated_args, parent):
|
||||
"""Class method to construct object as required by InstantiationHelper."""
|
||||
method = parser.StaticMethod(
|
||||
name=original.name,
|
||||
return_type=instantiate_return_type(original.return_type,
|
||||
typenames,
|
||||
class_instantiations +
|
||||
method_instantiations,
|
||||
parent.cpp_typename(),
|
||||
instantiated_class=parent),
|
||||
args=parser.ArgumentList(instantiated_args),
|
||||
template=original.template,
|
||||
parent=parent,
|
||||
)
|
||||
return InstantiatedStaticMethod(method,
|
||||
instantiations=method_instantiations)
|
||||
|
||||
def to_cpp(self):
|
||||
"""Generate the C++ code for wrapping."""
|
||||
if self.original.template:
|
||||
# to_cpp will handle all the namespacing and templating
|
||||
instantiation_list = [x.to_cpp() for x in self.instantiations]
|
||||
# now can simply combine the instantiations, separated by commas
|
||||
ret = "{}<{}>".format(self.original.name,
|
||||
",".join(instantiation_list))
|
||||
else:
|
||||
ret = self.original.name
|
||||
return ret
|
||||
|
||||
def __repr__(self):
|
||||
return "Instantiated {}".format(super().__repr__())
|
|
@ -0,0 +1,88 @@
|
|||
"""Instantiate a namespace."""
|
||||
|
||||
import itertools
|
||||
|
||||
import gtwrap.interface_parser as parser
|
||||
from gtwrap.template_instantiator.classes import InstantiatedClass
|
||||
from gtwrap.template_instantiator.declaration import InstantiatedDeclaration
|
||||
from gtwrap.template_instantiator.function import InstantiatedGlobalFunction
|
||||
|
||||
|
||||
def instantiate_namespace(namespace):
|
||||
"""
|
||||
Instantiate the classes and other elements in the `namespace` content and
|
||||
assign it back to the namespace content attribute.
|
||||
|
||||
@param[in/out] namespace The namespace whose content will be replaced with
|
||||
the instantiated content.
|
||||
"""
|
||||
instantiated_content = []
|
||||
typedef_content = []
|
||||
|
||||
for element in namespace.content:
|
||||
if isinstance(element, parser.Class):
|
||||
original_class = element
|
||||
if not original_class.template:
|
||||
instantiated_content.append(
|
||||
InstantiatedClass(original_class, []))
|
||||
else:
|
||||
# This case is for when the templates have enumerated instantiations.
|
||||
|
||||
# Use itertools to get all possible combinations of instantiations
|
||||
# Works even if one template does not have an instantiation list
|
||||
for instantiations in itertools.product(
|
||||
*original_class.template.instantiations):
|
||||
instantiated_content.append(
|
||||
InstantiatedClass(original_class,
|
||||
list(instantiations)))
|
||||
|
||||
elif isinstance(element, parser.GlobalFunction):
|
||||
original_func = element
|
||||
if not original_func.template:
|
||||
instantiated_content.append(
|
||||
InstantiatedGlobalFunction(original_func, []))
|
||||
else:
|
||||
# Use itertools to get all possible combinations of instantiations
|
||||
# Works even if one template does not have an instantiation list
|
||||
for instantiations in itertools.product(
|
||||
*original_func.template.instantiations):
|
||||
instantiated_content.append(
|
||||
InstantiatedGlobalFunction(original_func,
|
||||
list(instantiations)))
|
||||
|
||||
elif isinstance(element, parser.TypedefTemplateInstantiation):
|
||||
# This is for the case where `typedef` statements are used
|
||||
# to specify the template parameters.
|
||||
typedef_inst = element
|
||||
top_level = namespace.top_level()
|
||||
original_element = top_level.find_class_or_function(
|
||||
typedef_inst.typename)
|
||||
|
||||
# Check if element is a typedef'd class, function or
|
||||
# forward declaration from another project.
|
||||
if isinstance(original_element, parser.Class):
|
||||
typedef_content.append(
|
||||
InstantiatedClass(original_element,
|
||||
typedef_inst.typename.instantiations,
|
||||
typedef_inst.new_name))
|
||||
elif isinstance(original_element, parser.GlobalFunction):
|
||||
typedef_content.append(
|
||||
InstantiatedGlobalFunction(
|
||||
original_element, typedef_inst.typename.instantiations,
|
||||
typedef_inst.new_name))
|
||||
elif isinstance(original_element, parser.ForwardDeclaration):
|
||||
typedef_content.append(
|
||||
InstantiatedDeclaration(
|
||||
original_element, typedef_inst.typename.instantiations,
|
||||
typedef_inst.new_name))
|
||||
|
||||
elif isinstance(element, parser.Namespace):
|
||||
element = instantiate_namespace(element)
|
||||
instantiated_content.append(element)
|
||||
else:
|
||||
instantiated_content.append(element)
|
||||
|
||||
instantiated_content.extend(typedef_content)
|
||||
namespace.content = instantiated_content
|
||||
|
||||
return namespace
|
|
@ -1,2 +1,2 @@
|
|||
pyparsing
|
||||
pytest
|
||||
pyparsing==2.4.7
|
||||
pytest>=6.2.4
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
%
|
||||
%-------Static Methods-------
|
||||
%staticMethodWithThis() : returns Fun<double>
|
||||
%templatedStaticMethodInt(int m) : returns double
|
||||
%
|
||||
%-------Serialization Interface-------
|
||||
%string_serialize() : returns string
|
||||
|
@ -69,5 +70,16 @@ classdef FunDouble < handle
|
|||
error('Arguments do not match any overload of function FunDouble.staticMethodWithThis');
|
||||
end
|
||||
|
||||
function varargout = TemplatedStaticMethodInt(varargin)
|
||||
% TEMPLATEDSTATICMETHODINT usage: templatedStaticMethodInt(int m) : returns double
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'numeric')
|
||||
varargout{1} = class_wrapper(10, varargin{:});
|
||||
return
|
||||
end
|
||||
|
||||
error('Arguments do not match any overload of function FunDouble.templatedStaticMethodInt');
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
|
|
@ -9,7 +9,7 @@ classdef MultipleTemplatesIntDouble < handle
|
|||
function obj = MultipleTemplatesIntDouble(varargin)
|
||||
if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682)
|
||||
my_ptr = varargin{2};
|
||||
class_wrapper(49, my_ptr);
|
||||
class_wrapper(50, my_ptr);
|
||||
else
|
||||
error('Arguments do not match any overload of MultipleTemplatesIntDouble constructor');
|
||||
end
|
||||
|
@ -17,7 +17,7 @@ classdef MultipleTemplatesIntDouble < handle
|
|||
end
|
||||
|
||||
function delete(obj)
|
||||
class_wrapper(50, obj.ptr_MultipleTemplatesIntDouble);
|
||||
class_wrapper(51, obj.ptr_MultipleTemplatesIntDouble);
|
||||
end
|
||||
|
||||
function display(obj), obj.print(''); end
|
||||
|
|
|
@ -9,7 +9,7 @@ classdef MultipleTemplatesIntFloat < handle
|
|||
function obj = MultipleTemplatesIntFloat(varargin)
|
||||
if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682)
|
||||
my_ptr = varargin{2};
|
||||
class_wrapper(51, my_ptr);
|
||||
class_wrapper(52, my_ptr);
|
||||
else
|
||||
error('Arguments do not match any overload of MultipleTemplatesIntFloat constructor');
|
||||
end
|
||||
|
@ -17,7 +17,7 @@ classdef MultipleTemplatesIntFloat < handle
|
|||
end
|
||||
|
||||
function delete(obj)
|
||||
class_wrapper(52, obj.ptr_MultipleTemplatesIntFloat);
|
||||
class_wrapper(53, obj.ptr_MultipleTemplatesIntFloat);
|
||||
end
|
||||
|
||||
function display(obj), obj.print(''); end
|
||||
|
|
|
@ -15,9 +15,9 @@ classdef MyFactorPosePoint2 < handle
|
|||
function obj = MyFactorPosePoint2(varargin)
|
||||
if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682)
|
||||
my_ptr = varargin{2};
|
||||
class_wrapper(62, my_ptr);
|
||||
class_wrapper(63, my_ptr);
|
||||
elseif nargin == 4 && isa(varargin{1},'numeric') && isa(varargin{2},'numeric') && isa(varargin{3},'double') && isa(varargin{4},'gtsam.noiseModel.Base')
|
||||
my_ptr = class_wrapper(63, varargin{1}, varargin{2}, varargin{3}, varargin{4});
|
||||
my_ptr = class_wrapper(64, varargin{1}, varargin{2}, varargin{3}, varargin{4});
|
||||
else
|
||||
error('Arguments do not match any overload of MyFactorPosePoint2 constructor');
|
||||
end
|
||||
|
@ -25,7 +25,7 @@ classdef MyFactorPosePoint2 < handle
|
|||
end
|
||||
|
||||
function delete(obj)
|
||||
class_wrapper(64, obj.ptr_MyFactorPosePoint2);
|
||||
class_wrapper(65, obj.ptr_MyFactorPosePoint2);
|
||||
end
|
||||
|
||||
function display(obj), obj.print(''); end
|
||||
|
@ -36,7 +36,7 @@ classdef MyFactorPosePoint2 < handle
|
|||
% PRINT usage: print(string s, KeyFormatter keyFormatter) : returns void
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 2 && isa(varargin{1},'char') && isa(varargin{2},'gtsam.KeyFormatter')
|
||||
class_wrapper(65, this, varargin{:});
|
||||
class_wrapper(66, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function MyFactorPosePoint2.print');
|
||||
|
|
|
@ -12,9 +12,9 @@ classdef MyVector12 < handle
|
|||
function obj = MyVector12(varargin)
|
||||
if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682)
|
||||
my_ptr = varargin{2};
|
||||
class_wrapper(46, my_ptr);
|
||||
class_wrapper(47, my_ptr);
|
||||
elseif nargin == 0
|
||||
my_ptr = class_wrapper(47);
|
||||
my_ptr = class_wrapper(48);
|
||||
else
|
||||
error('Arguments do not match any overload of MyVector12 constructor');
|
||||
end
|
||||
|
@ -22,7 +22,7 @@ classdef MyVector12 < handle
|
|||
end
|
||||
|
||||
function delete(obj)
|
||||
class_wrapper(48, obj.ptr_MyVector12);
|
||||
class_wrapper(49, obj.ptr_MyVector12);
|
||||
end
|
||||
|
||||
function display(obj), obj.print(''); end
|
||||
|
|
|
@ -12,9 +12,9 @@ classdef MyVector3 < handle
|
|||
function obj = MyVector3(varargin)
|
||||
if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682)
|
||||
my_ptr = varargin{2};
|
||||
class_wrapper(43, my_ptr);
|
||||
class_wrapper(44, my_ptr);
|
||||
elseif nargin == 0
|
||||
my_ptr = class_wrapper(44);
|
||||
my_ptr = class_wrapper(45);
|
||||
else
|
||||
error('Arguments do not match any overload of MyVector3 constructor');
|
||||
end
|
||||
|
@ -22,7 +22,7 @@ classdef MyVector3 < handle
|
|||
end
|
||||
|
||||
function delete(obj)
|
||||
class_wrapper(45, obj.ptr_MyVector3);
|
||||
class_wrapper(46, obj.ptr_MyVector3);
|
||||
end
|
||||
|
||||
function display(obj), obj.print(''); end
|
||||
|
|
|
@ -19,9 +19,9 @@ classdef PrimitiveRefDouble < handle
|
|||
function obj = PrimitiveRefDouble(varargin)
|
||||
if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682)
|
||||
my_ptr = varargin{2};
|
||||
class_wrapper(39, my_ptr);
|
||||
class_wrapper(40, my_ptr);
|
||||
elseif nargin == 0
|
||||
my_ptr = class_wrapper(40);
|
||||
my_ptr = class_wrapper(41);
|
||||
else
|
||||
error('Arguments do not match any overload of PrimitiveRefDouble constructor');
|
||||
end
|
||||
|
@ -29,7 +29,7 @@ classdef PrimitiveRefDouble < handle
|
|||
end
|
||||
|
||||
function delete(obj)
|
||||
class_wrapper(41, obj.ptr_PrimitiveRefDouble);
|
||||
class_wrapper(42, obj.ptr_PrimitiveRefDouble);
|
||||
end
|
||||
|
||||
function display(obj), obj.print(''); end
|
||||
|
@ -43,7 +43,7 @@ classdef PrimitiveRefDouble < handle
|
|||
% BRUTAL usage: Brutal(double t) : returns PrimitiveRefdouble
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'double')
|
||||
varargout{1} = class_wrapper(42, varargin{:});
|
||||
varargout{1} = class_wrapper(43, varargin{:});
|
||||
return
|
||||
end
|
||||
|
||||
|
|
|
@ -40,11 +40,11 @@ classdef Test < handle
|
|||
function obj = Test(varargin)
|
||||
if nargin == 2 && isa(varargin{1}, 'uint64') && varargin{1} == uint64(5139824614673773682)
|
||||
my_ptr = varargin{2};
|
||||
class_wrapper(10, my_ptr);
|
||||
class_wrapper(11, my_ptr);
|
||||
elseif nargin == 0
|
||||
my_ptr = class_wrapper(11);
|
||||
my_ptr = class_wrapper(12);
|
||||
elseif nargin == 2 && isa(varargin{1},'double') && isa(varargin{2},'double')
|
||||
my_ptr = class_wrapper(12, varargin{1}, varargin{2});
|
||||
my_ptr = class_wrapper(13, varargin{1}, varargin{2});
|
||||
else
|
||||
error('Arguments do not match any overload of Test constructor');
|
||||
end
|
||||
|
@ -52,7 +52,7 @@ classdef Test < handle
|
|||
end
|
||||
|
||||
function delete(obj)
|
||||
class_wrapper(13, obj.ptr_Test);
|
||||
class_wrapper(14, obj.ptr_Test);
|
||||
end
|
||||
|
||||
function display(obj), obj.print(''); end
|
||||
|
@ -63,7 +63,7 @@ classdef Test < handle
|
|||
% ARG_EIGENCONSTREF usage: arg_EigenConstRef(Matrix value) : returns void
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'double')
|
||||
class_wrapper(14, this, varargin{:});
|
||||
class_wrapper(15, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.arg_EigenConstRef');
|
||||
|
@ -73,7 +73,7 @@ classdef Test < handle
|
|||
% CREATE_MIXEDPTRS usage: create_MixedPtrs() : returns pair< Test, Test >
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 0
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(15, this, varargin{:});
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(16, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.create_MixedPtrs');
|
||||
|
@ -83,7 +83,7 @@ classdef Test < handle
|
|||
% CREATE_PTRS usage: create_ptrs() : returns pair< Test, Test >
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 0
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(16, this, varargin{:});
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(17, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.create_ptrs');
|
||||
|
@ -93,7 +93,7 @@ classdef Test < handle
|
|||
% GET_CONTAINER usage: get_container() : returns std.vectorTest
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 0
|
||||
varargout{1} = class_wrapper(17, this, varargin{:});
|
||||
varargout{1} = class_wrapper(18, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.get_container');
|
||||
|
@ -103,7 +103,7 @@ classdef Test < handle
|
|||
% LAMBDA usage: lambda() : returns void
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 0
|
||||
class_wrapper(18, this, varargin{:});
|
||||
class_wrapper(19, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.lambda');
|
||||
|
@ -113,7 +113,7 @@ classdef Test < handle
|
|||
% PRINT usage: print() : returns void
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 0
|
||||
class_wrapper(19, this, varargin{:});
|
||||
class_wrapper(20, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.print');
|
||||
|
@ -123,7 +123,7 @@ classdef Test < handle
|
|||
% RETURN_POINT2PTR usage: return_Point2Ptr(bool value) : returns Point2
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'logical')
|
||||
varargout{1} = class_wrapper(20, this, varargin{:});
|
||||
varargout{1} = class_wrapper(21, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_Point2Ptr');
|
||||
|
@ -133,7 +133,7 @@ classdef Test < handle
|
|||
% RETURN_TEST usage: return_Test(Test value) : returns Test
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'Test')
|
||||
varargout{1} = class_wrapper(21, this, varargin{:});
|
||||
varargout{1} = class_wrapper(22, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_Test');
|
||||
|
@ -143,7 +143,7 @@ classdef Test < handle
|
|||
% RETURN_TESTPTR usage: return_TestPtr(Test value) : returns Test
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'Test')
|
||||
varargout{1} = class_wrapper(22, this, varargin{:});
|
||||
varargout{1} = class_wrapper(23, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_TestPtr');
|
||||
|
@ -153,7 +153,7 @@ classdef Test < handle
|
|||
% RETURN_BOOL usage: return_bool(bool value) : returns bool
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'logical')
|
||||
varargout{1} = class_wrapper(23, this, varargin{:});
|
||||
varargout{1} = class_wrapper(24, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_bool');
|
||||
|
@ -163,7 +163,7 @@ classdef Test < handle
|
|||
% RETURN_DOUBLE usage: return_double(double value) : returns double
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'double')
|
||||
varargout{1} = class_wrapper(24, this, varargin{:});
|
||||
varargout{1} = class_wrapper(25, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_double');
|
||||
|
@ -173,7 +173,7 @@ classdef Test < handle
|
|||
% RETURN_FIELD usage: return_field(Test t) : returns bool
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'Test')
|
||||
varargout{1} = class_wrapper(25, this, varargin{:});
|
||||
varargout{1} = class_wrapper(26, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_field');
|
||||
|
@ -183,7 +183,7 @@ classdef Test < handle
|
|||
% RETURN_INT usage: return_int(int value) : returns int
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'numeric')
|
||||
varargout{1} = class_wrapper(26, this, varargin{:});
|
||||
varargout{1} = class_wrapper(27, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_int');
|
||||
|
@ -193,7 +193,7 @@ classdef Test < handle
|
|||
% RETURN_MATRIX1 usage: return_matrix1(Matrix value) : returns Matrix
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'double')
|
||||
varargout{1} = class_wrapper(27, this, varargin{:});
|
||||
varargout{1} = class_wrapper(28, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_matrix1');
|
||||
|
@ -203,7 +203,7 @@ classdef Test < handle
|
|||
% RETURN_MATRIX2 usage: return_matrix2(Matrix value) : returns Matrix
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'double')
|
||||
varargout{1} = class_wrapper(28, this, varargin{:});
|
||||
varargout{1} = class_wrapper(29, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_matrix2');
|
||||
|
@ -213,13 +213,13 @@ classdef Test < handle
|
|||
% RETURN_PAIR usage: return_pair(Vector v, Matrix A) : returns pair< Vector, Matrix >
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 2 && isa(varargin{1},'double') && size(varargin{1},2)==1 && isa(varargin{2},'double')
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(29, this, varargin{:});
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(30, this, varargin{:});
|
||||
return
|
||||
end
|
||||
% RETURN_PAIR usage: return_pair(Vector v) : returns pair< Vector, Matrix >
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'double') && size(varargin{1},2)==1
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(30, this, varargin{:});
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(31, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_pair');
|
||||
|
@ -229,7 +229,7 @@ classdef Test < handle
|
|||
% RETURN_PTRS usage: return_ptrs(Test p1, Test p2) : returns pair< Test, Test >
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 2 && isa(varargin{1},'Test') && isa(varargin{2},'Test')
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(31, this, varargin{:});
|
||||
[ varargout{1} varargout{2} ] = class_wrapper(32, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_ptrs');
|
||||
|
@ -239,7 +239,7 @@ classdef Test < handle
|
|||
% RETURN_SIZE_T usage: return_size_t(size_t value) : returns size_t
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'numeric')
|
||||
varargout{1} = class_wrapper(32, this, varargin{:});
|
||||
varargout{1} = class_wrapper(33, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_size_t');
|
||||
|
@ -249,7 +249,7 @@ classdef Test < handle
|
|||
% RETURN_STRING usage: return_string(string value) : returns string
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'char')
|
||||
varargout{1} = class_wrapper(33, this, varargin{:});
|
||||
varargout{1} = class_wrapper(34, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_string');
|
||||
|
@ -259,7 +259,7 @@ classdef Test < handle
|
|||
% RETURN_VECTOR1 usage: return_vector1(Vector value) : returns Vector
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'double') && size(varargin{1},2)==1
|
||||
varargout{1} = class_wrapper(34, this, varargin{:});
|
||||
varargout{1} = class_wrapper(35, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_vector1');
|
||||
|
@ -269,19 +269,13 @@ classdef Test < handle
|
|||
% RETURN_VECTOR2 usage: return_vector2(Vector value) : returns Vector
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'double') && size(varargin{1},2)==1
|
||||
varargout{1} = class_wrapper(35, this, varargin{:});
|
||||
varargout{1} = class_wrapper(36, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.return_vector2');
|
||||
end
|
||||
|
||||
function varargout = set_container(this, varargin)
|
||||
% SET_CONTAINER usage: set_container(vector<Test> container) : returns void
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'std.vectorTest')
|
||||
class_wrapper(36, this, varargin{:});
|
||||
return
|
||||
end
|
||||
% SET_CONTAINER usage: set_container(vector<Test> container) : returns void
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'std.vectorTest')
|
||||
|
@ -294,6 +288,12 @@ classdef Test < handle
|
|||
class_wrapper(38, this, varargin{:});
|
||||
return
|
||||
end
|
||||
% SET_CONTAINER usage: set_container(vector<Test> container) : returns void
|
||||
% Doxygen can be found at https://gtsam.org/doxygen/
|
||||
if length(varargin) == 1 && isa(varargin{1},'std.vectorTest')
|
||||
class_wrapper(39, this, varargin{:});
|
||||
return
|
||||
end
|
||||
error('Arguments do not match any overload of function Test.set_container');
|
||||
end
|
||||
|
||||
|
|
|
@ -246,7 +246,14 @@ void FunDouble_staticMethodWithThis_9(int nargout, mxArray *out[], int nargin, c
|
|||
out[0] = wrap_shared_ptr(boost::make_shared<Fun<double>>(Fun<double>::staticMethodWithThis()),"Fundouble", false);
|
||||
}
|
||||
|
||||
void Test_collectorInsertAndMakeBase_10(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void FunDouble_templatedStaticMethodInt_10(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("FunDouble.templatedStaticMethodInt",nargout,nargin,1);
|
||||
int m = unwrap< int >(in[0]);
|
||||
out[0] = wrap< double >(Fun<double>::templatedStaticMethodInt(m));
|
||||
}
|
||||
|
||||
void Test_collectorInsertAndMakeBase_11(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<Test> Shared;
|
||||
|
@ -255,7 +262,7 @@ void Test_collectorInsertAndMakeBase_10(int nargout, mxArray *out[], int nargin,
|
|||
collector_Test.insert(self);
|
||||
}
|
||||
|
||||
void Test_constructor_11(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_constructor_12(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<Test> Shared;
|
||||
|
@ -266,7 +273,7 @@ void Test_constructor_11(int nargout, mxArray *out[], int nargin, const mxArray
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void Test_constructor_12(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_constructor_13(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<Test> Shared;
|
||||
|
@ -279,7 +286,7 @@ void Test_constructor_12(int nargout, mxArray *out[], int nargin, const mxArray
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void Test_deconstructor_13(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_deconstructor_14(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
typedef boost::shared_ptr<Test> Shared;
|
||||
checkArguments("delete_Test",nargout,nargin,1);
|
||||
|
@ -292,7 +299,7 @@ void Test_deconstructor_13(int nargout, mxArray *out[], int nargin, const mxArra
|
|||
}
|
||||
}
|
||||
|
||||
void Test_arg_EigenConstRef_14(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_arg_EigenConstRef_15(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("arg_EigenConstRef",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -300,7 +307,7 @@ void Test_arg_EigenConstRef_14(int nargout, mxArray *out[], int nargin, const mx
|
|||
obj->arg_EigenConstRef(value);
|
||||
}
|
||||
|
||||
void Test_create_MixedPtrs_15(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_create_MixedPtrs_16(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("create_MixedPtrs",nargout,nargin-1,0);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -309,7 +316,7 @@ void Test_create_MixedPtrs_15(int nargout, mxArray *out[], int nargin, const mxA
|
|||
out[1] = wrap_shared_ptr(pairResult.second,"Test", false);
|
||||
}
|
||||
|
||||
void Test_create_ptrs_16(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_create_ptrs_17(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("create_ptrs",nargout,nargin-1,0);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -318,28 +325,28 @@ void Test_create_ptrs_16(int nargout, mxArray *out[], int nargin, const mxArray
|
|||
out[1] = wrap_shared_ptr(pairResult.second,"Test", false);
|
||||
}
|
||||
|
||||
void Test_get_container_17(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_get_container_18(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("get_container",nargout,nargin-1,0);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
out[0] = wrap_shared_ptr(boost::make_shared<std::vector<testing::Test>>(obj->get_container()),"std.vectorTest", false);
|
||||
}
|
||||
|
||||
void Test_lambda_18(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_lambda_19(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("lambda",nargout,nargin-1,0);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
obj->lambda();
|
||||
}
|
||||
|
||||
void Test_print_19(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_print_20(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("print",nargout,nargin-1,0);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
obj->print();
|
||||
}
|
||||
|
||||
void Test_return_Point2Ptr_20(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_Point2Ptr_21(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_Point2Ptr",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -350,7 +357,7 @@ void Test_return_Point2Ptr_20(int nargout, mxArray *out[], int nargin, const mxA
|
|||
}
|
||||
}
|
||||
|
||||
void Test_return_Test_21(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_Test_22(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_Test",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -358,7 +365,7 @@ void Test_return_Test_21(int nargout, mxArray *out[], int nargin, const mxArray
|
|||
out[0] = wrap_shared_ptr(boost::make_shared<Test>(obj->return_Test(value)),"Test", false);
|
||||
}
|
||||
|
||||
void Test_return_TestPtr_22(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_TestPtr_23(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_TestPtr",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -366,7 +373,7 @@ void Test_return_TestPtr_22(int nargout, mxArray *out[], int nargin, const mxArr
|
|||
out[0] = wrap_shared_ptr(obj->return_TestPtr(value),"Test", false);
|
||||
}
|
||||
|
||||
void Test_return_bool_23(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_bool_24(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_bool",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -374,7 +381,7 @@ void Test_return_bool_23(int nargout, mxArray *out[], int nargin, const mxArray
|
|||
out[0] = wrap< bool >(obj->return_bool(value));
|
||||
}
|
||||
|
||||
void Test_return_double_24(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_double_25(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_double",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -382,7 +389,7 @@ void Test_return_double_24(int nargout, mxArray *out[], int nargin, const mxArra
|
|||
out[0] = wrap< double >(obj->return_double(value));
|
||||
}
|
||||
|
||||
void Test_return_field_25(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_field_26(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_field",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -390,7 +397,7 @@ void Test_return_field_25(int nargout, mxArray *out[], int nargin, const mxArray
|
|||
out[0] = wrap< bool >(obj->return_field(t));
|
||||
}
|
||||
|
||||
void Test_return_int_26(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_int_27(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_int",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -398,7 +405,7 @@ void Test_return_int_26(int nargout, mxArray *out[], int nargin, const mxArray *
|
|||
out[0] = wrap< int >(obj->return_int(value));
|
||||
}
|
||||
|
||||
void Test_return_matrix1_27(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_matrix1_28(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_matrix1",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -406,7 +413,7 @@ void Test_return_matrix1_27(int nargout, mxArray *out[], int nargin, const mxArr
|
|||
out[0] = wrap< Matrix >(obj->return_matrix1(value));
|
||||
}
|
||||
|
||||
void Test_return_matrix2_28(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_matrix2_29(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_matrix2",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -414,7 +421,7 @@ void Test_return_matrix2_28(int nargout, mxArray *out[], int nargin, const mxArr
|
|||
out[0] = wrap< Matrix >(obj->return_matrix2(value));
|
||||
}
|
||||
|
||||
void Test_return_pair_29(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_pair_30(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_pair",nargout,nargin-1,2);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -425,7 +432,7 @@ void Test_return_pair_29(int nargout, mxArray *out[], int nargin, const mxArray
|
|||
out[1] = wrap< Matrix >(pairResult.second);
|
||||
}
|
||||
|
||||
void Test_return_pair_30(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_pair_31(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_pair",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -435,7 +442,7 @@ void Test_return_pair_30(int nargout, mxArray *out[], int nargin, const mxArray
|
|||
out[1] = wrap< Matrix >(pairResult.second);
|
||||
}
|
||||
|
||||
void Test_return_ptrs_31(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_ptrs_32(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_ptrs",nargout,nargin-1,2);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -446,7 +453,7 @@ void Test_return_ptrs_31(int nargout, mxArray *out[], int nargin, const mxArray
|
|||
out[1] = wrap_shared_ptr(pairResult.second,"Test", false);
|
||||
}
|
||||
|
||||
void Test_return_size_t_32(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_size_t_33(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_size_t",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -454,7 +461,7 @@ void Test_return_size_t_32(int nargout, mxArray *out[], int nargin, const mxArra
|
|||
out[0] = wrap< size_t >(obj->return_size_t(value));
|
||||
}
|
||||
|
||||
void Test_return_string_33(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_string_34(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_string",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -462,7 +469,7 @@ void Test_return_string_33(int nargout, mxArray *out[], int nargin, const mxArra
|
|||
out[0] = wrap< string >(obj->return_string(value));
|
||||
}
|
||||
|
||||
void Test_return_vector1_34(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_vector1_35(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_vector1",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -470,7 +477,7 @@ void Test_return_vector1_34(int nargout, mxArray *out[], int nargin, const mxArr
|
|||
out[0] = wrap< Vector >(obj->return_vector1(value));
|
||||
}
|
||||
|
||||
void Test_return_vector2_35(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_return_vector2_36(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("return_vector2",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
|
@ -478,14 +485,6 @@ void Test_return_vector2_35(int nargout, mxArray *out[], int nargin, const mxArr
|
|||
out[0] = wrap< Vector >(obj->return_vector2(value));
|
||||
}
|
||||
|
||||
void Test_set_container_36(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("set_container",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
boost::shared_ptr<std::vector<testing::Test>> container = unwrap_shared_ptr< std::vector<testing::Test> >(in[1], "ptr_stdvectorTest");
|
||||
obj->set_container(*container);
|
||||
}
|
||||
|
||||
void Test_set_container_37(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("set_container",nargout,nargin-1,1);
|
||||
|
@ -502,7 +501,15 @@ void Test_set_container_38(int nargout, mxArray *out[], int nargin, const mxArra
|
|||
obj->set_container(*container);
|
||||
}
|
||||
|
||||
void PrimitiveRefDouble_collectorInsertAndMakeBase_39(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void Test_set_container_39(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("set_container",nargout,nargin-1,1);
|
||||
auto obj = unwrap_shared_ptr<Test>(in[0], "ptr_Test");
|
||||
boost::shared_ptr<std::vector<testing::Test>> container = unwrap_shared_ptr< std::vector<testing::Test> >(in[1], "ptr_stdvectorTest");
|
||||
obj->set_container(*container);
|
||||
}
|
||||
|
||||
void PrimitiveRefDouble_collectorInsertAndMakeBase_40(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<PrimitiveRef<double>> Shared;
|
||||
|
@ -511,7 +518,7 @@ void PrimitiveRefDouble_collectorInsertAndMakeBase_39(int nargout, mxArray *out[
|
|||
collector_PrimitiveRefDouble.insert(self);
|
||||
}
|
||||
|
||||
void PrimitiveRefDouble_constructor_40(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void PrimitiveRefDouble_constructor_41(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<PrimitiveRef<double>> Shared;
|
||||
|
@ -522,7 +529,7 @@ void PrimitiveRefDouble_constructor_40(int nargout, mxArray *out[], int nargin,
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void PrimitiveRefDouble_deconstructor_41(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void PrimitiveRefDouble_deconstructor_42(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
typedef boost::shared_ptr<PrimitiveRef<double>> Shared;
|
||||
checkArguments("delete_PrimitiveRefDouble",nargout,nargin,1);
|
||||
|
@ -535,14 +542,14 @@ void PrimitiveRefDouble_deconstructor_41(int nargout, mxArray *out[], int nargin
|
|||
}
|
||||
}
|
||||
|
||||
void PrimitiveRefDouble_Brutal_42(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void PrimitiveRefDouble_Brutal_43(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("PrimitiveRefDouble.Brutal",nargout,nargin,1);
|
||||
double t = unwrap< double >(in[0]);
|
||||
out[0] = wrap_shared_ptr(boost::make_shared<PrimitiveRef<double>>(PrimitiveRef<double>::Brutal(t)),"PrimitiveRefdouble", false);
|
||||
}
|
||||
|
||||
void MyVector3_collectorInsertAndMakeBase_43(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyVector3_collectorInsertAndMakeBase_44(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<MyVector<3>> Shared;
|
||||
|
@ -551,7 +558,7 @@ void MyVector3_collectorInsertAndMakeBase_43(int nargout, mxArray *out[], int na
|
|||
collector_MyVector3.insert(self);
|
||||
}
|
||||
|
||||
void MyVector3_constructor_44(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyVector3_constructor_45(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<MyVector<3>> Shared;
|
||||
|
@ -562,7 +569,7 @@ void MyVector3_constructor_44(int nargout, mxArray *out[], int nargin, const mxA
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void MyVector3_deconstructor_45(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyVector3_deconstructor_46(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
typedef boost::shared_ptr<MyVector<3>> Shared;
|
||||
checkArguments("delete_MyVector3",nargout,nargin,1);
|
||||
|
@ -575,7 +582,7 @@ void MyVector3_deconstructor_45(int nargout, mxArray *out[], int nargin, const m
|
|||
}
|
||||
}
|
||||
|
||||
void MyVector12_collectorInsertAndMakeBase_46(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyVector12_collectorInsertAndMakeBase_47(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<MyVector<12>> Shared;
|
||||
|
@ -584,7 +591,7 @@ void MyVector12_collectorInsertAndMakeBase_46(int nargout, mxArray *out[], int n
|
|||
collector_MyVector12.insert(self);
|
||||
}
|
||||
|
||||
void MyVector12_constructor_47(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyVector12_constructor_48(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<MyVector<12>> Shared;
|
||||
|
@ -595,7 +602,7 @@ void MyVector12_constructor_47(int nargout, mxArray *out[], int nargin, const mx
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void MyVector12_deconstructor_48(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyVector12_deconstructor_49(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
typedef boost::shared_ptr<MyVector<12>> Shared;
|
||||
checkArguments("delete_MyVector12",nargout,nargin,1);
|
||||
|
@ -608,7 +615,7 @@ void MyVector12_deconstructor_48(int nargout, mxArray *out[], int nargin, const
|
|||
}
|
||||
}
|
||||
|
||||
void MultipleTemplatesIntDouble_collectorInsertAndMakeBase_49(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MultipleTemplatesIntDouble_collectorInsertAndMakeBase_50(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<MultipleTemplates<int, double>> Shared;
|
||||
|
@ -617,7 +624,7 @@ void MultipleTemplatesIntDouble_collectorInsertAndMakeBase_49(int nargout, mxArr
|
|||
collector_MultipleTemplatesIntDouble.insert(self);
|
||||
}
|
||||
|
||||
void MultipleTemplatesIntDouble_deconstructor_50(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MultipleTemplatesIntDouble_deconstructor_51(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
typedef boost::shared_ptr<MultipleTemplates<int, double>> Shared;
|
||||
checkArguments("delete_MultipleTemplatesIntDouble",nargout,nargin,1);
|
||||
|
@ -630,7 +637,7 @@ void MultipleTemplatesIntDouble_deconstructor_50(int nargout, mxArray *out[], in
|
|||
}
|
||||
}
|
||||
|
||||
void MultipleTemplatesIntFloat_collectorInsertAndMakeBase_51(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MultipleTemplatesIntFloat_collectorInsertAndMakeBase_52(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<MultipleTemplates<int, float>> Shared;
|
||||
|
@ -639,7 +646,7 @@ void MultipleTemplatesIntFloat_collectorInsertAndMakeBase_51(int nargout, mxArra
|
|||
collector_MultipleTemplatesIntFloat.insert(self);
|
||||
}
|
||||
|
||||
void MultipleTemplatesIntFloat_deconstructor_52(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MultipleTemplatesIntFloat_deconstructor_53(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
typedef boost::shared_ptr<MultipleTemplates<int, float>> Shared;
|
||||
checkArguments("delete_MultipleTemplatesIntFloat",nargout,nargin,1);
|
||||
|
@ -652,7 +659,7 @@ void MultipleTemplatesIntFloat_deconstructor_52(int nargout, mxArray *out[], int
|
|||
}
|
||||
}
|
||||
|
||||
void ForwardKinematics_collectorInsertAndMakeBase_53(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void ForwardKinematics_collectorInsertAndMakeBase_54(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<ForwardKinematics> Shared;
|
||||
|
@ -661,7 +668,7 @@ void ForwardKinematics_collectorInsertAndMakeBase_53(int nargout, mxArray *out[]
|
|||
collector_ForwardKinematics.insert(self);
|
||||
}
|
||||
|
||||
void ForwardKinematics_constructor_54(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void ForwardKinematics_constructor_55(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<ForwardKinematics> Shared;
|
||||
|
@ -677,7 +684,7 @@ void ForwardKinematics_constructor_54(int nargout, mxArray *out[], int nargin, c
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void ForwardKinematics_deconstructor_55(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void ForwardKinematics_deconstructor_56(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
typedef boost::shared_ptr<ForwardKinematics> Shared;
|
||||
checkArguments("delete_ForwardKinematics",nargout,nargin,1);
|
||||
|
@ -690,7 +697,7 @@ void ForwardKinematics_deconstructor_55(int nargout, mxArray *out[], int nargin,
|
|||
}
|
||||
}
|
||||
|
||||
void TemplatedConstructor_collectorInsertAndMakeBase_56(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void TemplatedConstructor_collectorInsertAndMakeBase_57(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<TemplatedConstructor> Shared;
|
||||
|
@ -699,7 +706,7 @@ void TemplatedConstructor_collectorInsertAndMakeBase_56(int nargout, mxArray *ou
|
|||
collector_TemplatedConstructor.insert(self);
|
||||
}
|
||||
|
||||
void TemplatedConstructor_constructor_57(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void TemplatedConstructor_constructor_58(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<TemplatedConstructor> Shared;
|
||||
|
@ -710,7 +717,7 @@ void TemplatedConstructor_constructor_57(int nargout, mxArray *out[], int nargin
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void TemplatedConstructor_constructor_58(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void TemplatedConstructor_constructor_59(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<TemplatedConstructor> Shared;
|
||||
|
@ -722,7 +729,7 @@ void TemplatedConstructor_constructor_58(int nargout, mxArray *out[], int nargin
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void TemplatedConstructor_constructor_59(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void TemplatedConstructor_constructor_60(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<TemplatedConstructor> Shared;
|
||||
|
@ -734,7 +741,7 @@ void TemplatedConstructor_constructor_59(int nargout, mxArray *out[], int nargin
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void TemplatedConstructor_constructor_60(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void TemplatedConstructor_constructor_61(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<TemplatedConstructor> Shared;
|
||||
|
@ -746,7 +753,7 @@ void TemplatedConstructor_constructor_60(int nargout, mxArray *out[], int nargin
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void TemplatedConstructor_deconstructor_61(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void TemplatedConstructor_deconstructor_62(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
typedef boost::shared_ptr<TemplatedConstructor> Shared;
|
||||
checkArguments("delete_TemplatedConstructor",nargout,nargin,1);
|
||||
|
@ -759,7 +766,7 @@ void TemplatedConstructor_deconstructor_61(int nargout, mxArray *out[], int narg
|
|||
}
|
||||
}
|
||||
|
||||
void MyFactorPosePoint2_collectorInsertAndMakeBase_62(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyFactorPosePoint2_collectorInsertAndMakeBase_63(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<MyFactor<gtsam::Pose2, gtsam::Matrix>> Shared;
|
||||
|
@ -768,7 +775,7 @@ void MyFactorPosePoint2_collectorInsertAndMakeBase_62(int nargout, mxArray *out[
|
|||
collector_MyFactorPosePoint2.insert(self);
|
||||
}
|
||||
|
||||
void MyFactorPosePoint2_constructor_63(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyFactorPosePoint2_constructor_64(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
mexAtExit(&_deleteAllObjects);
|
||||
typedef boost::shared_ptr<MyFactor<gtsam::Pose2, gtsam::Matrix>> Shared;
|
||||
|
@ -783,7 +790,7 @@ void MyFactorPosePoint2_constructor_63(int nargout, mxArray *out[], int nargin,
|
|||
*reinterpret_cast<Shared**> (mxGetData(out[0])) = self;
|
||||
}
|
||||
|
||||
void MyFactorPosePoint2_deconstructor_64(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyFactorPosePoint2_deconstructor_65(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
typedef boost::shared_ptr<MyFactor<gtsam::Pose2, gtsam::Matrix>> Shared;
|
||||
checkArguments("delete_MyFactorPosePoint2",nargout,nargin,1);
|
||||
|
@ -796,7 +803,7 @@ void MyFactorPosePoint2_deconstructor_64(int nargout, mxArray *out[], int nargin
|
|||
}
|
||||
}
|
||||
|
||||
void MyFactorPosePoint2_print_65(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
void MyFactorPosePoint2_print_66(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
||||
{
|
||||
checkArguments("print",nargout,nargin-1,2);
|
||||
auto obj = unwrap_shared_ptr<MyFactor<gtsam::Pose2, gtsam::Matrix>>(in[0], "ptr_MyFactorPosePoint2");
|
||||
|
@ -848,85 +855,85 @@ void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
|||
FunDouble_staticMethodWithThis_9(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 10:
|
||||
Test_collectorInsertAndMakeBase_10(nargout, out, nargin-1, in+1);
|
||||
FunDouble_templatedStaticMethodInt_10(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 11:
|
||||
Test_constructor_11(nargout, out, nargin-1, in+1);
|
||||
Test_collectorInsertAndMakeBase_11(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 12:
|
||||
Test_constructor_12(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 13:
|
||||
Test_deconstructor_13(nargout, out, nargin-1, in+1);
|
||||
Test_constructor_13(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 14:
|
||||
Test_arg_EigenConstRef_14(nargout, out, nargin-1, in+1);
|
||||
Test_deconstructor_14(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 15:
|
||||
Test_create_MixedPtrs_15(nargout, out, nargin-1, in+1);
|
||||
Test_arg_EigenConstRef_15(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 16:
|
||||
Test_create_ptrs_16(nargout, out, nargin-1, in+1);
|
||||
Test_create_MixedPtrs_16(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 17:
|
||||
Test_get_container_17(nargout, out, nargin-1, in+1);
|
||||
Test_create_ptrs_17(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 18:
|
||||
Test_lambda_18(nargout, out, nargin-1, in+1);
|
||||
Test_get_container_18(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 19:
|
||||
Test_print_19(nargout, out, nargin-1, in+1);
|
||||
Test_lambda_19(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 20:
|
||||
Test_return_Point2Ptr_20(nargout, out, nargin-1, in+1);
|
||||
Test_print_20(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 21:
|
||||
Test_return_Test_21(nargout, out, nargin-1, in+1);
|
||||
Test_return_Point2Ptr_21(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 22:
|
||||
Test_return_TestPtr_22(nargout, out, nargin-1, in+1);
|
||||
Test_return_Test_22(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 23:
|
||||
Test_return_bool_23(nargout, out, nargin-1, in+1);
|
||||
Test_return_TestPtr_23(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 24:
|
||||
Test_return_double_24(nargout, out, nargin-1, in+1);
|
||||
Test_return_bool_24(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 25:
|
||||
Test_return_field_25(nargout, out, nargin-1, in+1);
|
||||
Test_return_double_25(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 26:
|
||||
Test_return_int_26(nargout, out, nargin-1, in+1);
|
||||
Test_return_field_26(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 27:
|
||||
Test_return_matrix1_27(nargout, out, nargin-1, in+1);
|
||||
Test_return_int_27(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 28:
|
||||
Test_return_matrix2_28(nargout, out, nargin-1, in+1);
|
||||
Test_return_matrix1_28(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 29:
|
||||
Test_return_pair_29(nargout, out, nargin-1, in+1);
|
||||
Test_return_matrix2_29(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 30:
|
||||
Test_return_pair_30(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 31:
|
||||
Test_return_ptrs_31(nargout, out, nargin-1, in+1);
|
||||
Test_return_pair_31(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 32:
|
||||
Test_return_size_t_32(nargout, out, nargin-1, in+1);
|
||||
Test_return_ptrs_32(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 33:
|
||||
Test_return_string_33(nargout, out, nargin-1, in+1);
|
||||
Test_return_size_t_33(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 34:
|
||||
Test_return_vector1_34(nargout, out, nargin-1, in+1);
|
||||
Test_return_string_34(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 35:
|
||||
Test_return_vector2_35(nargout, out, nargin-1, in+1);
|
||||
Test_return_vector1_35(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 36:
|
||||
Test_set_container_36(nargout, out, nargin-1, in+1);
|
||||
Test_return_vector2_36(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 37:
|
||||
Test_set_container_37(nargout, out, nargin-1, in+1);
|
||||
|
@ -935,61 +942,61 @@ void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
|||
Test_set_container_38(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 39:
|
||||
PrimitiveRefDouble_collectorInsertAndMakeBase_39(nargout, out, nargin-1, in+1);
|
||||
Test_set_container_39(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 40:
|
||||
PrimitiveRefDouble_constructor_40(nargout, out, nargin-1, in+1);
|
||||
PrimitiveRefDouble_collectorInsertAndMakeBase_40(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 41:
|
||||
PrimitiveRefDouble_deconstructor_41(nargout, out, nargin-1, in+1);
|
||||
PrimitiveRefDouble_constructor_41(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 42:
|
||||
PrimitiveRefDouble_Brutal_42(nargout, out, nargin-1, in+1);
|
||||
PrimitiveRefDouble_deconstructor_42(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 43:
|
||||
MyVector3_collectorInsertAndMakeBase_43(nargout, out, nargin-1, in+1);
|
||||
PrimitiveRefDouble_Brutal_43(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 44:
|
||||
MyVector3_constructor_44(nargout, out, nargin-1, in+1);
|
||||
MyVector3_collectorInsertAndMakeBase_44(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 45:
|
||||
MyVector3_deconstructor_45(nargout, out, nargin-1, in+1);
|
||||
MyVector3_constructor_45(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 46:
|
||||
MyVector12_collectorInsertAndMakeBase_46(nargout, out, nargin-1, in+1);
|
||||
MyVector3_deconstructor_46(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 47:
|
||||
MyVector12_constructor_47(nargout, out, nargin-1, in+1);
|
||||
MyVector12_collectorInsertAndMakeBase_47(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 48:
|
||||
MyVector12_deconstructor_48(nargout, out, nargin-1, in+1);
|
||||
MyVector12_constructor_48(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 49:
|
||||
MultipleTemplatesIntDouble_collectorInsertAndMakeBase_49(nargout, out, nargin-1, in+1);
|
||||
MyVector12_deconstructor_49(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 50:
|
||||
MultipleTemplatesIntDouble_deconstructor_50(nargout, out, nargin-1, in+1);
|
||||
MultipleTemplatesIntDouble_collectorInsertAndMakeBase_50(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 51:
|
||||
MultipleTemplatesIntFloat_collectorInsertAndMakeBase_51(nargout, out, nargin-1, in+1);
|
||||
MultipleTemplatesIntDouble_deconstructor_51(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 52:
|
||||
MultipleTemplatesIntFloat_deconstructor_52(nargout, out, nargin-1, in+1);
|
||||
MultipleTemplatesIntFloat_collectorInsertAndMakeBase_52(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 53:
|
||||
ForwardKinematics_collectorInsertAndMakeBase_53(nargout, out, nargin-1, in+1);
|
||||
MultipleTemplatesIntFloat_deconstructor_53(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 54:
|
||||
ForwardKinematics_constructor_54(nargout, out, nargin-1, in+1);
|
||||
ForwardKinematics_collectorInsertAndMakeBase_54(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 55:
|
||||
ForwardKinematics_deconstructor_55(nargout, out, nargin-1, in+1);
|
||||
ForwardKinematics_constructor_55(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 56:
|
||||
TemplatedConstructor_collectorInsertAndMakeBase_56(nargout, out, nargin-1, in+1);
|
||||
ForwardKinematics_deconstructor_56(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 57:
|
||||
TemplatedConstructor_constructor_57(nargout, out, nargin-1, in+1);
|
||||
TemplatedConstructor_collectorInsertAndMakeBase_57(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 58:
|
||||
TemplatedConstructor_constructor_58(nargout, out, nargin-1, in+1);
|
||||
|
@ -1001,19 +1008,22 @@ void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
|
|||
TemplatedConstructor_constructor_60(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 61:
|
||||
TemplatedConstructor_deconstructor_61(nargout, out, nargin-1, in+1);
|
||||
TemplatedConstructor_constructor_61(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 62:
|
||||
MyFactorPosePoint2_collectorInsertAndMakeBase_62(nargout, out, nargin-1, in+1);
|
||||
TemplatedConstructor_deconstructor_62(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 63:
|
||||
MyFactorPosePoint2_constructor_63(nargout, out, nargin-1, in+1);
|
||||
MyFactorPosePoint2_collectorInsertAndMakeBase_63(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 64:
|
||||
MyFactorPosePoint2_deconstructor_64(nargout, out, nargin-1, in+1);
|
||||
MyFactorPosePoint2_constructor_64(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 65:
|
||||
MyFactorPosePoint2_print_65(nargout, out, nargin-1, in+1);
|
||||
MyFactorPosePoint2_deconstructor_65(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
case 66:
|
||||
MyFactorPosePoint2_print_66(nargout, out, nargin-1, in+1);
|
||||
break;
|
||||
}
|
||||
} catch(const std::exception& e) {
|
||||
|
|
|
@ -31,7 +31,8 @@ PYBIND11_MODULE(class_py, m_) {
|
|||
py::class_<Fun<double>, std::shared_ptr<Fun<double>>>(m_, "FunDouble")
|
||||
.def("templatedMethodString",[](Fun<double>* self, double d, string t){return self->templatedMethod<string>(d, t);}, py::arg("d"), py::arg("t"))
|
||||
.def("multiTemplatedMethodStringSize_t",[](Fun<double>* self, double d, string t, size_t u){return self->multiTemplatedMethod<string,size_t>(d, t, u);}, py::arg("d"), py::arg("t"), py::arg("u"))
|
||||
.def_static("staticMethodWithThis",[](){return Fun<double>::staticMethodWithThis();});
|
||||
.def_static("staticMethodWithThis",[](){return Fun<double>::staticMethodWithThis();})
|
||||
.def_static("templatedStaticMethodInt",[](const int& m){return Fun<double>::templatedStaticMethod<int>(m);}, py::arg("m"));
|
||||
|
||||
py::class_<Test, std::shared_ptr<Test>>(m_, "Test")
|
||||
.def(py::init<>())
|
||||
|
|
|
@ -10,6 +10,9 @@ class Fun {
|
|||
|
||||
static This staticMethodWithThis();
|
||||
|
||||
template<T={int}>
|
||||
static double templatedStaticMethod(const T& m);
|
||||
|
||||
template<T={string}>
|
||||
This templatedMethod(double d, T t);
|
||||
|
||||
|
|
|
@ -18,11 +18,13 @@ import unittest
|
|||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from gtwrap.interface_parser import (
|
||||
ArgumentList, Class, Constructor, Enum, Enumerator, ForwardDeclaration,
|
||||
GlobalFunction, Include, Method, Module, Namespace, Operator, ReturnType,
|
||||
StaticMethod, TemplatedType, Type, TypedefTemplateInstantiation, Typename,
|
||||
Variable)
|
||||
from gtwrap.interface_parser import (ArgumentList, Class, Constructor, Enum,
|
||||
Enumerator, ForwardDeclaration,
|
||||
GlobalFunction, Include, Method, Module,
|
||||
Namespace, Operator, ReturnType,
|
||||
StaticMethod, TemplatedType, Type,
|
||||
TypedefTemplateInstantiation, Typename,
|
||||
Variable)
|
||||
|
||||
|
||||
class TestInterfaceParser(unittest.TestCase):
|
||||
|
@ -266,6 +268,11 @@ class TestInterfaceParser(unittest.TestCase):
|
|||
self.assertEqual("char", return_type.type1.typename.name)
|
||||
self.assertEqual("int", return_type.type2.typename.name)
|
||||
|
||||
return_type = ReturnType.rule.parseString("pair<Test ,Test*>")[0]
|
||||
self.assertEqual("Test", return_type.type1.typename.name)
|
||||
self.assertEqual("Test", return_type.type2.typename.name)
|
||||
self.assertTrue(return_type.type2.is_shared_ptr)
|
||||
|
||||
def test_method(self):
|
||||
"""Test for a class method."""
|
||||
ret = Method.rule.parseString("int f();")[0]
|
||||
|
@ -283,6 +290,13 @@ class TestInterfaceParser(unittest.TestCase):
|
|||
self.assertEqual("f", ret.name)
|
||||
self.assertEqual(3, len(ret.args))
|
||||
|
||||
ret = Method.rule.parseString(
|
||||
"pair<First ,Second*> create_MixedPtrs();")[0]
|
||||
self.assertEqual("create_MixedPtrs", ret.name)
|
||||
self.assertEqual(0, len(ret.args))
|
||||
self.assertEqual("First", ret.return_type.type1.typename.name)
|
||||
self.assertEqual("Second", ret.return_type.type2.typename.name)
|
||||
|
||||
def test_static_method(self):
|
||||
"""Test for static methods."""
|
||||
ret = StaticMethod.rule.parseString("static int f();")[0]
|
||||
|
|
|
@ -42,17 +42,16 @@ class TestWrap(unittest.TestCase):
|
|||
# Create the `actual/matlab` directory
|
||||
os.makedirs(self.MATLAB_ACTUAL_DIR, exist_ok=True)
|
||||
|
||||
def compare_and_diff(self, file):
|
||||
def compare_and_diff(self, file, actual):
|
||||
"""
|
||||
Compute the comparison between the expected and actual file,
|
||||
and assert if diff is zero.
|
||||
"""
|
||||
output = osp.join(self.MATLAB_ACTUAL_DIR, file)
|
||||
expected = osp.join(self.MATLAB_TEST_DIR, file)
|
||||
success = filecmp.cmp(output, expected)
|
||||
success = filecmp.cmp(actual, expected)
|
||||
|
||||
if not success:
|
||||
print("Differ in file: {}".format(file))
|
||||
os.system("diff {} {}".format(output, expected))
|
||||
os.system("diff {} {}".format(actual, expected))
|
||||
self.assertTrue(success, "Mismatch for file {0}".format(file))
|
||||
|
||||
def test_geometry(self):
|
||||
|
@ -77,7 +76,8 @@ class TestWrap(unittest.TestCase):
|
|||
self.assertTrue(osp.isdir(osp.join(self.MATLAB_ACTUAL_DIR, '+gtsam')))
|
||||
|
||||
for file in files:
|
||||
self.compare_and_diff(file)
|
||||
actual = osp.join(self.MATLAB_ACTUAL_DIR, file)
|
||||
self.compare_and_diff(file, actual)
|
||||
|
||||
def test_functions(self):
|
||||
"""Test interface file with function info."""
|
||||
|
@ -99,7 +99,8 @@ class TestWrap(unittest.TestCase):
|
|||
]
|
||||
|
||||
for file in files:
|
||||
self.compare_and_diff(file)
|
||||
actual = osp.join(self.MATLAB_ACTUAL_DIR, file)
|
||||
self.compare_and_diff(file, actual)
|
||||
|
||||
def test_class(self):
|
||||
"""Test interface file with only class info."""
|
||||
|
@ -121,7 +122,8 @@ class TestWrap(unittest.TestCase):
|
|||
]
|
||||
|
||||
for file in files:
|
||||
self.compare_and_diff(file)
|
||||
actual = osp.join(self.MATLAB_ACTUAL_DIR, file)
|
||||
self.compare_and_diff(file, actual)
|
||||
|
||||
def test_templates(self):
|
||||
"""Test interface file with template info."""
|
||||
|
@ -138,7 +140,8 @@ class TestWrap(unittest.TestCase):
|
|||
files = ['template_wrapper.cpp']
|
||||
|
||||
for file in files:
|
||||
self.compare_and_diff(file)
|
||||
actual = osp.join(self.MATLAB_ACTUAL_DIR, file)
|
||||
self.compare_and_diff(file, actual)
|
||||
|
||||
def test_inheritance(self):
|
||||
"""Test interface file with class inheritance definitions."""
|
||||
|
@ -157,7 +160,8 @@ class TestWrap(unittest.TestCase):
|
|||
]
|
||||
|
||||
for file in files:
|
||||
self.compare_and_diff(file)
|
||||
actual = osp.join(self.MATLAB_ACTUAL_DIR, file)
|
||||
self.compare_and_diff(file, actual)
|
||||
|
||||
def test_namespaces(self):
|
||||
"""
|
||||
|
@ -181,7 +185,8 @@ class TestWrap(unittest.TestCase):
|
|||
]
|
||||
|
||||
for file in files:
|
||||
self.compare_and_diff(file)
|
||||
actual = osp.join(self.MATLAB_ACTUAL_DIR, file)
|
||||
self.compare_and_diff(file, actual)
|
||||
|
||||
def test_special_cases(self):
|
||||
"""
|
||||
|
@ -203,7 +208,8 @@ class TestWrap(unittest.TestCase):
|
|||
]
|
||||
|
||||
for file in files:
|
||||
self.compare_and_diff(file)
|
||||
actual = osp.join(self.MATLAB_ACTUAL_DIR, file)
|
||||
self.compare_and_diff(file, actual)
|
||||
|
||||
def test_multiple_files(self):
|
||||
"""
|
||||
|
@ -228,7 +234,8 @@ class TestWrap(unittest.TestCase):
|
|||
]
|
||||
|
||||
for file in files:
|
||||
self.compare_and_diff(file)
|
||||
actual = osp.join(self.MATLAB_ACTUAL_DIR, file)
|
||||
self.compare_and_diff(file, actual)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -0,0 +1,606 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Tests for template_instantiator.
|
||||
|
||||
Author: Varun Agrawal
|
||||
"""
|
||||
|
||||
# pylint: disable=import-error,wrong-import-position
|
||||
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from gtwrap.interface_parser import (Argument, ArgumentList, Class,
|
||||
Constructor, ForwardDeclaration,
|
||||
GlobalFunction, Include, Method,
|
||||
Namespace, ReturnType, StaticMethod,
|
||||
Typename)
|
||||
from gtwrap.template_instantiator import (
|
||||
InstantiatedClass, InstantiatedConstructor, InstantiatedDeclaration,
|
||||
InstantiatedGlobalFunction, InstantiatedMethod, InstantiatedStaticMethod,
|
||||
InstantiationHelper, instantiate_args_list, instantiate_name,
|
||||
instantiate_namespace, instantiate_return_type, instantiate_type,
|
||||
is_scoped_template)
|
||||
|
||||
|
||||
class TestInstantiationHelper(unittest.TestCase):
|
||||
"""Tests for the InstantiationHelper class."""
|
||||
def test_constructor(self):
|
||||
"""Test constructor."""
|
||||
helper = InstantiationHelper(InstantiatedClass)
|
||||
self.assertEqual(helper.instantiation_type, InstantiatedClass)
|
||||
helper = InstantiationHelper(InstantiatedConstructor)
|
||||
self.assertEqual(helper.instantiation_type, InstantiatedConstructor)
|
||||
helper = InstantiationHelper(InstantiatedDeclaration)
|
||||
self.assertEqual(helper.instantiation_type, InstantiatedDeclaration)
|
||||
helper = InstantiationHelper(InstantiatedGlobalFunction)
|
||||
self.assertEqual(helper.instantiation_type, InstantiatedGlobalFunction)
|
||||
helper = InstantiationHelper(InstantiatedMethod)
|
||||
self.assertEqual(helper.instantiation_type, InstantiatedMethod)
|
||||
helper = InstantiationHelper(InstantiatedStaticMethod)
|
||||
self.assertEqual(helper.instantiation_type, InstantiatedStaticMethod)
|
||||
|
||||
def test_instantiate(self):
|
||||
"""Test instantiate method."""
|
||||
method = Method.rule.parseString("""
|
||||
template<U={double}>
|
||||
double method(const T x, const U& param);
|
||||
""")[0]
|
||||
cls = Class.rule.parseString("""
|
||||
template<T={string}>
|
||||
class Foo {};
|
||||
""")[0]
|
||||
typenames = ['T', 'U']
|
||||
class_instantiations = [Typename.rule.parseString("string")[0]]
|
||||
method_instantiations = [Typename.rule.parseString("double")[0]]
|
||||
|
||||
parent = InstantiatedClass(cls, class_instantiations)
|
||||
|
||||
helper = InstantiationHelper(InstantiatedMethod)
|
||||
instantiated_methods = helper.instantiate([], method, typenames,
|
||||
class_instantiations,
|
||||
method_instantiations,
|
||||
parent)
|
||||
|
||||
self.assertEqual(len(instantiated_methods), 1)
|
||||
args_list = instantiated_methods[0].args.list()
|
||||
self.assertEqual(args_list[0].ctype.get_typename(), 'string')
|
||||
self.assertEqual(args_list[1].ctype.get_typename(), 'double')
|
||||
|
||||
def test_multilevel_instantiation(self):
|
||||
"""
|
||||
Test method for multilevel instantiation
|
||||
i.e. instantiation at both the class and method level.
|
||||
"""
|
||||
cls = Class.rule.parseString("""
|
||||
template<T={string}>
|
||||
class Foo {
|
||||
template<U={double}>
|
||||
double method1(const T x, const U& param);
|
||||
|
||||
template<V={int}>
|
||||
V method2(const T x);
|
||||
};
|
||||
""")[0]
|
||||
|
||||
typenames = ['T']
|
||||
class_instantiations = [Typename.rule.parseString("string")[0]]
|
||||
parent = InstantiatedClass(cls, class_instantiations)
|
||||
|
||||
helper = InstantiationHelper(InstantiatedMethod)
|
||||
|
||||
instantiated_methods = helper.multilevel_instantiation(
|
||||
cls.methods, typenames, parent)
|
||||
self.assertEqual(len(instantiated_methods), 2)
|
||||
self.assertEqual(
|
||||
instantiated_methods[0].args.list()[0].ctype.get_typename(),
|
||||
'string')
|
||||
self.assertEqual(
|
||||
instantiated_methods[0].args.list()[1].ctype.get_typename(),
|
||||
'double')
|
||||
self.assertEqual(
|
||||
instantiated_methods[1].args.list()[0].ctype.get_typename(),
|
||||
'string')
|
||||
self.assertEqual(
|
||||
instantiated_methods[1].return_type.type1.get_typename(), 'int')
|
||||
|
||||
|
||||
class TestInstantiatedGlobalFunction(unittest.TestCase):
|
||||
"""Tests for the InstantiatedGlobalFunction class."""
|
||||
def setUp(self):
|
||||
original = GlobalFunction.rule.parseString("""
|
||||
template<T={int}, R={double}>
|
||||
R function(const T& x);
|
||||
""")[0]
|
||||
instantiations = [
|
||||
Typename.rule.parseString("int")[0],
|
||||
Typename.rule.parseString("double")[0]
|
||||
]
|
||||
self.func = InstantiatedGlobalFunction(original, instantiations)
|
||||
|
||||
def test_constructor(self):
|
||||
"""Test constructor."""
|
||||
self.assertIsInstance(self.func, InstantiatedGlobalFunction)
|
||||
self.assertIsInstance(self.func.original, GlobalFunction)
|
||||
self.assertEqual(self.func.name, "functionIntDouble")
|
||||
self.assertEqual(len(self.func.args.list()), 1)
|
||||
self.assertEqual(self.func.args.list()[0].ctype.get_typename(), "int")
|
||||
self.assertEqual(self.func.return_type.type1.get_typename(), "double")
|
||||
|
||||
def test_to_cpp(self):
|
||||
"""Test to_cpp method."""
|
||||
actual = self.func.to_cpp()
|
||||
self.assertEqual(actual, "function<int,double>")
|
||||
|
||||
|
||||
class TestInstantiatedConstructor(unittest.TestCase):
|
||||
"""Tests for the InstantiatedConstructor class."""
|
||||
def setUp(self):
|
||||
constructor = Constructor.rule.parseString("""
|
||||
template<U={double}>
|
||||
Class(C x, const U& param);
|
||||
""")[0]
|
||||
instantiations = [
|
||||
Typename.rule.parseString("double")[0],
|
||||
Typename.rule.parseString("string")[0]
|
||||
]
|
||||
self.constructor = InstantiatedConstructor(constructor, instantiations)
|
||||
|
||||
def test_constructor(self):
|
||||
"""Test constructor."""
|
||||
self.assertIsInstance(self.constructor, InstantiatedConstructor)
|
||||
self.assertIsInstance(self.constructor.original, Constructor)
|
||||
|
||||
def test_construct(self):
|
||||
"""Test the construct classmethod."""
|
||||
constructor = Constructor.rule.parseString("""
|
||||
template<U={double}>
|
||||
Class(C x, const U& param);
|
||||
""")[0]
|
||||
c = Class.rule.parseString("""
|
||||
template<C={string}>
|
||||
class Class {};
|
||||
""")[0]
|
||||
class_instantiations = [Typename.rule.parseString("double")[0]]
|
||||
method_instantiations = [Typename.rule.parseString("string")[0]]
|
||||
typenames = ['C', 'U']
|
||||
parent = InstantiatedClass(c, class_instantiations)
|
||||
instantiated_args = instantiate_args_list(
|
||||
constructor.args.list(),
|
||||
typenames, class_instantiations + method_instantiations,
|
||||
parent.cpp_typename())
|
||||
|
||||
instantiated_constructor = InstantiatedConstructor.construct(
|
||||
constructor, typenames, class_instantiations,
|
||||
method_instantiations, instantiated_args, parent)
|
||||
self.assertEqual(instantiated_constructor.name, "ClassDouble")
|
||||
self.assertEqual(
|
||||
instantiated_constructor.args.list()[0].ctype.get_typename(),
|
||||
"double")
|
||||
self.assertEqual(
|
||||
instantiated_constructor.args.list()[1].ctype.get_typename(),
|
||||
"string")
|
||||
|
||||
def test_to_cpp(self):
|
||||
"""Test the to_cpp method."""
|
||||
actual = self.constructor.to_cpp()
|
||||
self.assertEqual(actual, "Class<double,string>")
|
||||
|
||||
|
||||
class TestInstantiatedMethod(unittest.TestCase):
|
||||
"""Tests for the InstantiatedMethod class."""
|
||||
def setUp(self):
|
||||
method = Method.rule.parseString("""
|
||||
template<U={double}>
|
||||
double method(const U& param);
|
||||
""")[0]
|
||||
instantiations = [Typename.rule.parseString("double")[0]]
|
||||
self.method = InstantiatedMethod(method, instantiations)
|
||||
|
||||
def test_constructor(self):
|
||||
"""Test constructor."""
|
||||
self.assertIsInstance(self.method, InstantiatedMethod)
|
||||
self.assertIsInstance(self.method.original, Method)
|
||||
self.assertEqual(self.method.name, "methodDouble")
|
||||
|
||||
def test_construct(self):
|
||||
"""Test the construct classmethod."""
|
||||
method = Method.rule.parseString("""
|
||||
template<U={double}>
|
||||
T method(U& param);
|
||||
""")[0]
|
||||
method_instantiations = [Typename.rule.parseString("double")[0]]
|
||||
c = Class.rule.parseString("""
|
||||
template<T={string}>
|
||||
class Class {};
|
||||
""")[0]
|
||||
class_instantiations = [Typename.rule.parseString("string")[0]]
|
||||
|
||||
typenames = ['T', 'U']
|
||||
parent = InstantiatedClass(c, class_instantiations)
|
||||
instantiated_args = instantiate_args_list(
|
||||
method.args.list(),
|
||||
typenames, class_instantiations + method_instantiations,
|
||||
parent.cpp_typename())
|
||||
|
||||
instantiated_method = InstantiatedMethod.construct(
|
||||
method, typenames, class_instantiations, method_instantiations,
|
||||
instantiated_args, parent)
|
||||
self.assertEqual(instantiated_method.name, "methodDouble")
|
||||
self.assertEqual(
|
||||
instantiated_method.args.list()[0].ctype.get_typename(), "double")
|
||||
self.assertEqual(instantiated_method.return_type.type1.get_typename(),
|
||||
"string")
|
||||
|
||||
def test_to_cpp(self):
|
||||
"""Test the to_cpp method."""
|
||||
actual = self.method.to_cpp()
|
||||
self.assertEqual(actual, "method<double>")
|
||||
|
||||
|
||||
class TestInstantiatedStaticMethod(unittest.TestCase):
|
||||
"""Tests for the InstantiatedStaticMethod class."""
|
||||
def setUp(self):
|
||||
static_method = StaticMethod.rule.parseString("""
|
||||
template<U={double}>
|
||||
static T staticMethod(const U& param);
|
||||
""")[0]
|
||||
instantiations = [Typename.rule.parseString("double")[0]]
|
||||
self.static_method = InstantiatedStaticMethod(static_method,
|
||||
instantiations)
|
||||
|
||||
def test_constructor(self):
|
||||
"""Test constructor."""
|
||||
self.assertIsInstance(self.static_method, InstantiatedStaticMethod)
|
||||
self.assertIsInstance(self.static_method.original, StaticMethod)
|
||||
self.assertEqual(self.static_method.name, "staticMethodDouble")
|
||||
|
||||
def test_construct(self):
|
||||
"""Test the construct classmethod."""
|
||||
static_method = StaticMethod.rule.parseString("""
|
||||
template<U={double}>
|
||||
static T staticMethod(U& param);
|
||||
""")[0]
|
||||
method_instantiations = [Typename.rule.parseString("double")[0]]
|
||||
c = Class.rule.parseString("""
|
||||
template<T={string}>
|
||||
class Class {};
|
||||
""")[0]
|
||||
class_instantiations = [Typename.rule.parseString("string")[0]]
|
||||
|
||||
typenames = ['T', 'U']
|
||||
parent = InstantiatedClass(c, class_instantiations)
|
||||
instantiated_args = instantiate_args_list(
|
||||
static_method.args.list(),
|
||||
typenames, class_instantiations + method_instantiations,
|
||||
parent.cpp_typename())
|
||||
|
||||
instantiated_static_method = InstantiatedStaticMethod.construct(
|
||||
static_method, typenames, class_instantiations,
|
||||
method_instantiations, instantiated_args, parent)
|
||||
self.assertEqual(instantiated_static_method.name, "staticMethodDouble")
|
||||
self.assertEqual(
|
||||
instantiated_static_method.args.list()[0].ctype.get_typename(),
|
||||
"double")
|
||||
self.assertEqual(
|
||||
instantiated_static_method.return_type.type1.get_typename(),
|
||||
"string")
|
||||
|
||||
def test_to_cpp(self):
|
||||
"""Test the to_cpp method."""
|
||||
actual = self.static_method.to_cpp()
|
||||
self.assertEqual(actual, "staticMethod<double>")
|
||||
|
||||
|
||||
class TestInstantiatedClass(unittest.TestCase):
|
||||
"""Tests for the InstantiatedClass class."""
|
||||
def setUp(self):
|
||||
cl = Class.rule.parseString("""
|
||||
template<T={string}>
|
||||
class Foo {
|
||||
template<C={int}>
|
||||
Foo(C& c);
|
||||
|
||||
template<S={char}>
|
||||
static T staticMethod(const S& s);
|
||||
|
||||
template<M={double}>
|
||||
T method(const M& m);
|
||||
|
||||
T operator*(T other) const;
|
||||
|
||||
T prop;
|
||||
};
|
||||
""")[0]
|
||||
class_instantiations = [Typename.rule.parseString('string')[0]]
|
||||
self.member_instantiations = [
|
||||
Typename.rule.parseString('int')[0],
|
||||
Typename.rule.parseString('char')[0],
|
||||
Typename.rule.parseString('double')[0],
|
||||
]
|
||||
self.cl = InstantiatedClass(cl, class_instantiations)
|
||||
self.typenames = self.cl.original.template.typenames
|
||||
|
||||
def test_constructor(self):
|
||||
"""Test constructor."""
|
||||
self.assertIsInstance(self.cl, InstantiatedClass)
|
||||
self.assertIsInstance(self.cl.original, Class)
|
||||
self.assertEqual(self.cl.name, "FooString")
|
||||
|
||||
def test_instantiate_ctors(self):
|
||||
"""Test instantiate_ctors method."""
|
||||
ctors = self.cl.instantiate_ctors(self.typenames)
|
||||
self.assertEqual(len(ctors), 1)
|
||||
self.assertEqual(ctors[0].name, "FooString")
|
||||
self.assertEqual(ctors[0].args.list()[0].ctype.get_typename(), "int")
|
||||
|
||||
def test_instantiate_static_methods(self):
|
||||
"""Test instantiate_static_methods method."""
|
||||
static_methods = self.cl.instantiate_static_methods(self.typenames)
|
||||
self.assertEqual(len(static_methods), 1)
|
||||
self.assertEqual(static_methods[0].name, "staticMethodChar")
|
||||
self.assertEqual(static_methods[0].args.list()[0].ctype.get_typename(),
|
||||
"char")
|
||||
self.assertEqual(static_methods[0].return_type.type1.get_typename(),
|
||||
"string")
|
||||
|
||||
def test_instantiate_methods(self):
|
||||
"""Test instantiate_methods method."""
|
||||
methods = self.cl.instantiate_methods(self.typenames)
|
||||
self.assertEqual(len(methods), 1)
|
||||
self.assertEqual(methods[0].name, "methodDouble")
|
||||
self.assertEqual(methods[0].args.list()[0].ctype.get_typename(),
|
||||
"double")
|
||||
self.assertEqual(methods[0].return_type.type1.get_typename(), "string")
|
||||
|
||||
def test_instantiate_operators(self):
|
||||
"""Test instantiate_operators method."""
|
||||
operators = self.cl.instantiate_operators(self.typenames)
|
||||
self.assertEqual(len(operators), 1)
|
||||
self.assertEqual(operators[0].operator, "*")
|
||||
self.assertEqual(operators[0].args.list()[0].ctype.get_typename(),
|
||||
"string")
|
||||
self.assertEqual(operators[0].return_type.type1.get_typename(),
|
||||
"string")
|
||||
|
||||
def test_instantiate_properties(self):
|
||||
"""Test instantiate_properties method."""
|
||||
properties = self.cl.instantiate_properties(self.typenames)
|
||||
self.assertEqual(len(properties), 1)
|
||||
self.assertEqual(properties[0].name, "prop")
|
||||
self.assertEqual(properties[0].ctype.get_typename(), "string")
|
||||
|
||||
def test_cpp_typename(self):
|
||||
"""Test cpp_typename method."""
|
||||
actual = self.cl.cpp_typename()
|
||||
self.assertEqual(actual.name, "Foo<string>")
|
||||
|
||||
def test_to_cpp(self):
|
||||
"""Test to_cpp method."""
|
||||
actual = self.cl.to_cpp()
|
||||
self.assertEqual(actual, "Foo<string>")
|
||||
|
||||
|
||||
class TestInstantiatedDeclaration(unittest.TestCase):
|
||||
"""Tests for the InstantiatedDeclaration class."""
|
||||
def setUp(self):
|
||||
#TODO(Varun) Need to support templated class forward declaration.
|
||||
forward_declaration = ForwardDeclaration.rule.parseString("""
|
||||
class FooBar;
|
||||
""")[0]
|
||||
instantiations = [Typename.rule.parseString("double")[0]]
|
||||
self.declaration = InstantiatedDeclaration(
|
||||
forward_declaration, instantiations=instantiations)
|
||||
|
||||
def test_constructor(self):
|
||||
"""Test constructor."""
|
||||
self.assertIsInstance(self.declaration, InstantiatedDeclaration)
|
||||
self.assertIsInstance(self.declaration.original, ForwardDeclaration)
|
||||
self.assertEqual(self.declaration.instantiations[0].name, "double")
|
||||
|
||||
def test_to_cpp(self):
|
||||
"""Test to_cpp method."""
|
||||
cpp = self.declaration.to_cpp()
|
||||
self.assertEqual(cpp, "FooBar<double>")
|
||||
|
||||
|
||||
class TestTemplateInstantiator(unittest.TestCase):
|
||||
"""
|
||||
Test overall template instantiation and the functions in the module.
|
||||
"""
|
||||
def test_scoped_template(self):
|
||||
"""Test is_scoped_template."""
|
||||
# Test if not scoped template.
|
||||
template_typenames = ['T']
|
||||
str_arg_typename = "double"
|
||||
scoped_template, scoped_idx = is_scoped_template(
|
||||
template_typenames, str_arg_typename)
|
||||
self.assertFalse(scoped_template)
|
||||
self.assertEqual(scoped_idx, -1)
|
||||
|
||||
# Check for correct template match.
|
||||
template_typenames = ['T']
|
||||
str_arg_typename = "gtsam::Matrix"
|
||||
scoped_template, scoped_idx = is_scoped_template(
|
||||
template_typenames, str_arg_typename)
|
||||
self.assertFalse(scoped_template)
|
||||
self.assertEqual(scoped_idx, -1)
|
||||
|
||||
# Test scoped templatte
|
||||
template_typenames = ['T']
|
||||
str_arg_typename = "T::Value"
|
||||
scoped_template, scoped_idx = is_scoped_template(
|
||||
template_typenames, str_arg_typename)
|
||||
self.assertEqual(scoped_template, "T")
|
||||
self.assertEqual(scoped_idx, 0)
|
||||
|
||||
template_typenames = ['U', 'T']
|
||||
str_arg_typename = "T::Value"
|
||||
scoped_template, scoped_idx = is_scoped_template(
|
||||
template_typenames, str_arg_typename)
|
||||
self.assertEqual(scoped_template, "T")
|
||||
self.assertEqual(scoped_idx, 1)
|
||||
|
||||
def test_instantiate_type(self):
|
||||
"""Test for instantiate_type."""
|
||||
arg = Argument.rule.parseString("const T x")[0]
|
||||
template_typenames = ["T"]
|
||||
instantiations = [Typename.rule.parseString("double")[0]]
|
||||
cpp_typename = "ExampleClass"
|
||||
new_type = instantiate_type(arg.ctype,
|
||||
template_typenames,
|
||||
instantiations=instantiations,
|
||||
cpp_typename=cpp_typename,
|
||||
instantiated_class=None)
|
||||
|
||||
new_typename = new_type.typename
|
||||
self.assertEqual(new_typename.name, "double")
|
||||
self.assertEqual(new_typename.instantiated_name(), "double")
|
||||
|
||||
def test_instantiate_args_list(self):
|
||||
"""Test for instantiate_args_list."""
|
||||
args = ArgumentList.rule.parseString("T x, double y, string z")[0]
|
||||
args_list = args.list()
|
||||
template_typenames = ['T']
|
||||
instantiations = [Typename.rule.parseString("double")[0]]
|
||||
instantiated_args_list = instantiate_args_list(
|
||||
args_list,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename="ExampleClass")
|
||||
|
||||
self.assertEqual(instantiated_args_list[0].ctype.get_typename(),
|
||||
"double")
|
||||
|
||||
args = ArgumentList.rule.parseString("T x, U y, string z")[0]
|
||||
args_list = args.list()
|
||||
template_typenames = ['T', 'U']
|
||||
instantiations = [
|
||||
Typename.rule.parseString("double")[0],
|
||||
Typename.rule.parseString("Matrix")[0]
|
||||
]
|
||||
instantiated_args_list = instantiate_args_list(
|
||||
args_list,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename="ExampleClass")
|
||||
self.assertEqual(instantiated_args_list[0].ctype.get_typename(),
|
||||
"double")
|
||||
self.assertEqual(instantiated_args_list[1].ctype.get_typename(),
|
||||
"Matrix")
|
||||
|
||||
args = ArgumentList.rule.parseString("T x, U y, T z")[0]
|
||||
args_list = args.list()
|
||||
template_typenames = ['T', 'U']
|
||||
instantiations = [
|
||||
Typename.rule.parseString("double")[0],
|
||||
Typename.rule.parseString("Matrix")[0]
|
||||
]
|
||||
instantiated_args_list = instantiate_args_list(
|
||||
args_list,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename="ExampleClass")
|
||||
self.assertEqual(instantiated_args_list[0].ctype.get_typename(),
|
||||
"double")
|
||||
self.assertEqual(instantiated_args_list[1].ctype.get_typename(),
|
||||
"Matrix")
|
||||
self.assertEqual(instantiated_args_list[2].ctype.get_typename(),
|
||||
"double")
|
||||
|
||||
def test_instantiate_return_type(self):
|
||||
"""Test for instantiate_return_type."""
|
||||
return_type = ReturnType.rule.parseString("T")[0]
|
||||
template_typenames = ['T']
|
||||
instantiations = [Typename.rule.parseString("double")[0]]
|
||||
instantiated_return_type = instantiate_return_type(
|
||||
return_type,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename="ExampleClass")
|
||||
|
||||
self.assertEqual(instantiated_return_type.type1.get_typename(),
|
||||
"double")
|
||||
|
||||
return_type = ReturnType.rule.parseString("pair<T, U>")[0]
|
||||
template_typenames = ['T', 'U']
|
||||
instantiations = [
|
||||
Typename.rule.parseString("double")[0],
|
||||
Typename.rule.parseString("char")[0],
|
||||
]
|
||||
instantiated_return_type = instantiate_return_type(
|
||||
return_type,
|
||||
template_typenames,
|
||||
instantiations,
|
||||
cpp_typename="ExampleClass")
|
||||
|
||||
self.assertEqual(instantiated_return_type.type1.get_typename(),
|
||||
"double")
|
||||
self.assertEqual(instantiated_return_type.type2.get_typename(), "char")
|
||||
|
||||
def test_instantiate_name(self):
|
||||
"""Test for instantiate_name."""
|
||||
instantiations = [Typename.rule.parseString("Man")[0]]
|
||||
instantiated_name = instantiate_name("Iron", instantiations)
|
||||
self.assertEqual(instantiated_name, "IronMan")
|
||||
|
||||
def test_instantiate_namespace(self):
|
||||
"""Test for instantiate_namespace."""
|
||||
namespace = Namespace.rule.parseString("""
|
||||
namespace gtsam {
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
template<T={gtsam::Basis}>
|
||||
class Values {
|
||||
Values(const T& other);
|
||||
|
||||
template<V={Vector, Matrix}>
|
||||
void insert(size_t j, V x);
|
||||
|
||||
template<S={double}>
|
||||
static S staticMethod(const S& s);
|
||||
};
|
||||
}
|
||||
""")[0]
|
||||
instantiated_namespace = instantiate_namespace(namespace)
|
||||
|
||||
self.assertEqual(instantiated_namespace.name, "gtsam")
|
||||
self.assertEqual(instantiated_namespace.parent, "")
|
||||
|
||||
self.assertEqual(instantiated_namespace.content[0].header,
|
||||
"gtsam/nonlinear/Values.h")
|
||||
self.assertIsInstance(instantiated_namespace.content[0], Include)
|
||||
|
||||
self.assertEqual(instantiated_namespace.content[1].name, "ValuesBasis")
|
||||
self.assertIsInstance(instantiated_namespace.content[1], Class)
|
||||
|
||||
self.assertIsInstance(instantiated_namespace.content[1].ctors[0],
|
||||
Constructor)
|
||||
self.assertEqual(instantiated_namespace.content[1].ctors[0].name,
|
||||
"ValuesBasis")
|
||||
|
||||
self.assertIsInstance(instantiated_namespace.content[1].methods[0],
|
||||
Method)
|
||||
self.assertIsInstance(instantiated_namespace.content[1].methods[1],
|
||||
Method)
|
||||
self.assertEqual(instantiated_namespace.content[1].methods[0].name,
|
||||
"insertVector")
|
||||
self.assertEqual(instantiated_namespace.content[1].methods[1].name,
|
||||
"insertMatrix")
|
||||
|
||||
self.assertIsInstance(
|
||||
instantiated_namespace.content[1].static_methods[0], StaticMethod)
|
||||
self.assertEqual(
|
||||
instantiated_namespace.content[1].static_methods[0].name,
|
||||
"staticMethodDouble")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in New Issue