Merge branch 'develop' into feature/sphericalCamera

# Conflicts:
#	gtsam/geometry/CameraSet.h
#	gtsam_unstable/slam/SmartProjectionPoseFactorRollingShutter.h
#	gtsam_unstable/slam/tests/testSmartProjectionPoseFactorRollingShutter.cpp
release/4.3a0
lcarlone 2021-11-07 12:02:33 -05:00
parent 02c7d86dfc
commit e51d10f18c
150 changed files with 11522 additions and 2587 deletions

View File

@ -85,4 +85,4 @@ make -j2 install
cd $GITHUB_WORKSPACE/build/python cd $GITHUB_WORKSPACE/build/python
$PYTHON setup.py install --user --prefix= $PYTHON setup.py install --user --prefix=
cd $GITHUB_WORKSPACE/python/gtsam/tests cd $GITHUB_WORKSPACE/python/gtsam/tests
$PYTHON -m unittest discover $PYTHON -m unittest discover -v

View File

@ -68,6 +68,8 @@ function configure()
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
-DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \ -DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \
-DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \ -DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \
-DGTSAM_USE_SYSTEM_EIGEN=${GTSAM_USE_SYSTEM_EIGEN:-OFF} \
-DGTSAM_USE_SYSTEM_METIS=${GTSAM_USE_SYSTEM_METIS:-OFF} \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DBOOST_ROOT=$BOOST_ROOT \ -DBOOST_ROOT=$BOOST_ROOT \
-DBoost_NO_SYSTEM_PATHS=ON \ -DBoost_NO_SYSTEM_PATHS=ON \
@ -111,9 +113,9 @@ function test ()
# Actual testing # Actual testing
if [ "$(uname)" == "Linux" ]; then if [ "$(uname)" == "Linux" ]; then
make -j$(nproc) make -j$(nproc) check
elif [ "$(uname)" == "Darwin" ]; then elif [ "$(uname)" == "Darwin" ]; then
make -j$(sysctl -n hw.physicalcpu) make -j$(sysctl -n hw.physicalcpu) check
fi fi
finish finish

View File

@ -55,6 +55,12 @@ jobs:
version: "9" version: "9"
flag: cayley flag: cayley
- name: ubuntu-system-libs
os: ubuntu-18.04
compiler: gcc
version: "9"
flag: system-libs
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v2
@ -126,6 +132,12 @@ jobs:
echo "GTSAM_ROT3_EXPMAP=OFF" >> $GITHUB_ENV echo "GTSAM_ROT3_EXPMAP=OFF" >> $GITHUB_ENV
echo "GTSAM Uses Cayley map for Rot3" echo "GTSAM Uses Cayley map for Rot3"
- name: Use system versions of 3rd party libraries
if: matrix.flag == 'system'
run: |
echo "GTSAM_USE_SYSTEM_EIGEN=ON" >> $GITHUB_ENV
echo "GTSAM_USE_SYSTEM_METIS=ON" >> $GITHUB_ENV
- name: Build & Test - name: Build & Test
run: | run: |
bash .github/scripts/unix.sh -t bash .github/scripts/unix.sh -t

View File

@ -38,11 +38,14 @@ if(${GTSAM_SOURCE_DIR} STREQUAL ${GTSAM_BINARY_DIR})
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ") message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
endif() endif()
include(cmake/HandleGeneralOptions.cmake) # CMake build options
# Libraries:
include(cmake/HandleBoost.cmake) # Boost include(cmake/HandleBoost.cmake) # Boost
include(cmake/HandleCCache.cmake) # ccache include(cmake/HandleCCache.cmake) # ccache
include(cmake/HandleCPack.cmake) # CPack include(cmake/HandleCPack.cmake) # CPack
include(cmake/HandleEigen.cmake) # Eigen3 include(cmake/HandleEigen.cmake) # Eigen3
include(cmake/HandleGeneralOptions.cmake) # CMake build options include(cmake/HandleMetis.cmake) # metis
include(cmake/HandleMKL.cmake) # MKL include(cmake/HandleMKL.cmake) # MKL
include(cmake/HandleOpenMP.cmake) # OpenMP include(cmake/HandleOpenMP.cmake) # OpenMP
include(cmake/HandlePerfTools.cmake) # Google perftools include(cmake/HandlePerfTools.cmake) # Google perftools
@ -102,6 +105,11 @@ endif()
GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in") GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake) export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake)
if (GTSAM_BUILD_UNSTABLE)
GtsamMakeConfigFile(GTSAM_UNSTABLE "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
export(TARGETS ${GTSAM_UNSTABLE_EXPORTED_TARGETS} FILE GTSAM_UNSTABLE-exports.cmake)
endif()
# Check for doxygen availability - optional dependency # Check for doxygen availability - optional dependency
find_package(Doxygen) find_package(Doxygen)

View File

@ -13,7 +13,8 @@ $ make install
## Important Installation Notes ## Important Installation Notes
1. GTSAM requires the following libraries to be installed on your system: 1. GTSAM requires the following libraries to be installed on your system:
- BOOST version 1.65 or greater (install through Linux repositories or MacPorts) - BOOST version 1.65 or greater (install through Linux repositories or MacPorts). Please see [Boost Notes](#boost-notes).
- Cmake version 3.0 or higher - Cmake version 3.0 or higher
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher - Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
@ -66,11 +67,15 @@ execute commands as follows for an out-of-source build:
This will build the library and unit tests, run all of the unit tests, This will build the library and unit tests, run all of the unit tests,
and then install the library itself. and then install the library itself.
## Boost Notes
Versions of Boost prior to 1.65 have a known bug that prevents proper "deep" serialization of objects, which means that objects encapsulated inside other objects don't get serialized.
This is particularly seen when using `clang` as the C++ compiler.
For this reason we require Boost>=1.65, and recommend installing it through alternative channels when it is not available through your operating system's primary package manager.
## Known Issues ## Known Issues
- When using `GTSAM_BUILD_WITH_MARCH_NATIVE=ON`, you may encounter issues in running tests which we are still investigating:
- Use of a version of GCC < 7.5 results in an "Indeterminant Linear System" error for `testSmartProjectionFactor`.
- Use of Boost version < 1.65 with clang will give a segfault for mulitple test cases.
- MSVC 2013 is not yet supported because it cannot build the serialization module of Boost 1.55 (or earlier). - MSVC 2013 is not yet supported because it cannot build the serialization module of Boost 1.55 (or earlier).
# Windows Installation # Windows Installation

View File

@ -144,7 +144,8 @@ if(NOT TBB_FOUND)
elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin") elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
# OS X # OS X
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb") set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb"
"/usr/local/opt/tbb")
# TODO: Check to see which C++ library is being used by the compiler. # TODO: Check to see which C++ library is being used by the compiler.
if(NOT ${CMAKE_SYSTEM_VERSION} VERSION_LESS 13.0) if(NOT ${CMAKE_SYSTEM_VERSION} VERSION_LESS 13.0)
@ -181,7 +182,18 @@ if(NOT TBB_FOUND)
################################## ##################################
if(TBB_INCLUDE_DIRS) if(TBB_INCLUDE_DIRS)
file(READ "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h" _tbb_version_file) set(_tbb_version_file_prior_to_tbb_2021_1 "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h")
set(_tbb_version_file_after_tbb_2021_1 "${TBB_INCLUDE_DIRS}/oneapi/tbb/version.h")
if (EXISTS "${_tbb_version_file_prior_to_tbb_2021_1}")
file(READ "${_tbb_version_file_prior_to_tbb_2021_1}" _tbb_version_file )
elseif (EXISTS "${_tbb_version_file_after_tbb_2021_1}")
file(READ "${_tbb_version_file_after_tbb_2021_1}" _tbb_version_file )
else()
message(FATAL_ERROR "Found TBB installation: ${TBB_INCLUDE_DIRS} "
"missing version header.")
endif()
string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1" string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1"
TBB_VERSION_MAJOR "${_tbb_version_file}") TBB_VERSION_MAJOR "${_tbb_version_file}")
string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1" string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1"

View File

@ -27,6 +27,8 @@ function(GtsamMakeConfigFile PACKAGE_NAME)
# here. # here.
if(NOT DEFINED ${PACKAGE_NAME}_VERSION AND DEFINED ${PACKAGE_NAME}_VERSION_STRING) if(NOT DEFINED ${PACKAGE_NAME}_VERSION AND DEFINED ${PACKAGE_NAME}_VERSION_STRING)
set(${PACKAGE_NAME}_VERSION ${${PACKAGE_NAME}_VERSION_STRING}) set(${PACKAGE_NAME}_VERSION ${${PACKAGE_NAME}_VERSION_STRING})
elseif(NOT DEFINED ${PACKAGE_NAME}_VERSION_STRING)
set(${PACKAGE_NAME}_VERSION ${GTSAM_VERSION_STRING})
endif() endif()
# Version file # Version file

View File

@ -14,20 +14,21 @@ if(GTSAM_UNSTABLE_AVAILABLE)
option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON) option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF) option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
endif() endif()
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON) option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF) option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF)
if(NOT MSVC AND NOT XCODE_VERSION) if(NOT MSVC AND NOT XCODE_VERSION)
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON) option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
endif() endif()

44
cmake/HandleMetis.cmake Normal file
View File

@ -0,0 +1,44 @@
###############################################################################
# Metis library
# For both system or bundle version, a cmake target "metis-gtsam-if" is defined (interface library)
# Dont try to use metis if GTSAM_SUPPORT_NESTED_DISSECTION is disabled:
if (NOT GTSAM_SUPPORT_NESTED_DISSECTION)
return()
endif()
option(GTSAM_USE_SYSTEM_METIS "Find and use system-installed libmetis. If 'off', use the one bundled with GTSAM" OFF)
if(GTSAM_USE_SYSTEM_METIS)
# Debian package: libmetis-dev
find_path(METIS_INCLUDE_DIR metis.h REQUIRED)
find_library(METIS_LIBRARY metis REQUIRED)
if(METIS_INCLUDE_DIR AND METIS_LIBRARY)
mark_as_advanced(METIS_INCLUDE_DIR)
mark_as_advanced(METIS_LIBRARY)
add_library(metis-gtsam-if INTERFACE)
target_include_directories(metis-gtsam-if BEFORE INTERFACE ${METIS_INCLUDE_DIR})
target_link_libraries(metis-gtsam-if INTERFACE ${METIS_LIBRARY})
endif()
else()
# Bundled version:
option(GTSAM_BUILD_METIS_EXECUTABLES "Build metis library executables" OFF)
add_subdirectory(${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis)
target_include_directories(metis-gtsam BEFORE PUBLIC
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/include>
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
)
add_library(metis-gtsam-if INTERFACE)
target_link_libraries(metis-gtsam-if INTERFACE metis-gtsam)
endif()
list(APPEND GTSAM_EXPORTED_TARGETS metis-gtsam-if)
install(TARGETS metis-gtsam-if EXPORT GTSAM-exports ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})

View File

@ -32,6 +32,7 @@ endif()
print_build_options_for_target(gtsam) print_build_options_for_target(gtsam)
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
print_config("Use System Metis" "${GTSAM_USE_SYSTEM_METIS}")
if(GTSAM_USE_TBB) if(GTSAM_USE_TBB)
print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})") print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})")

View File

@ -1,24 +1,32 @@
############################################################################### ###############################################################################
# Find TBB if (GTSAM_WITH_TBB)
find_package(TBB 4.4 COMPONENTS tbb tbbmalloc) # Find TBB
find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
# Set up variables if we're using TBB # Set up variables if we're using TBB
if(TBB_FOUND AND GTSAM_WITH_TBB) if(TBB_FOUND)
set(GTSAM_USE_TBB 1) # This will go into config.h set(GTSAM_USE_TBB 1) # This will go into config.h
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
set(TBB_GREATER_EQUAL_2020 1) # if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
# message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
# endif()
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
set(TBB_GREATER_EQUAL_2020 1)
else()
set(TBB_GREATER_EQUAL_2020 0)
endif()
# all definitions and link requisites will go via imported targets:
# tbb & tbbmalloc
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
else() else()
set(TBB_GREATER_EQUAL_2020 0) set(GTSAM_USE_TBB 0) # This will go into config.h
endif()
###############################################################################
# Prohibit Timing build mode in combination with TBB
if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
endif() endif()
# all definitions and link requisites will go via imported targets:
# tbb & tbbmalloc
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
else()
set(GTSAM_USE_TBB 0) # This will go into config.h
endif()
###############################################################################
# Prohibit Timing build mode in combination with TBB
if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
endif() endif()

View File

@ -22,18 +22,19 @@ if (GTSAM_BUILD_DOCS)
# GTSAM core subfolders # GTSAM core subfolders
set(gtsam_doc_subdirs set(gtsam_doc_subdirs
gtsam/base gtsam/base
gtsam/discrete gtsam/basis
gtsam/geometry gtsam/discrete
gtsam/inference gtsam/geometry
gtsam/linear gtsam/inference
gtsam/navigation gtsam/linear
gtsam/nonlinear gtsam/navigation
gtsam/sam gtsam/nonlinear
gtsam/sfm gtsam/sam
gtsam/slam gtsam/sfm
gtsam/smart gtsam/slam
gtsam/symbolic gtsam/smart
gtsam/symbolic
gtsam gtsam
) )

View File

@ -5082,6 +5082,394 @@ reference "ex:projection"
\end_inset \end_inset
\end_layout
\begin_layout Subsection
Derivative of Adjoint
\begin_inset CommandInset label
LatexCommand label
name "subsec:pose3_adjoint_deriv"
\end_inset
\end_layout
\begin_layout Standard
Consider
\begin_inset Formula $f:SE(3)\times\mathbb{R}^{6}\rightarrow\mathbb{R}^{6}$
\end_inset
is defined as
\begin_inset Formula $f(T,\xi_{b})=Ad_{T}\hat{\xi}_{b}$
\end_inset
.
The derivative is notated (see Section
\begin_inset CommandInset ref
LatexCommand ref
reference "sec:Derivatives-of-Actions"
plural "false"
caps "false"
noprefix "false"
\end_inset
):
\end_layout
\begin_layout Standard
\begin_inset Formula
\[
Df_{(T,\xi_{b})}(\xi,\delta\xi_{b})=D_{1}f_{(T,\xi_{b})}(\xi)+D_{2}f_{(T,\xi_{b})}(\delta\xi_{b})
\]
\end_inset
First, computing
\begin_inset Formula $D_{2}f_{(T,\xi_{b})}(\xi_{b})$
\end_inset
is easy, as its matrix is simply
\begin_inset Formula $Ad_{T}$
\end_inset
:
\end_layout
\begin_layout Standard
\begin_inset Formula
\[
f(T,\xi_{b}+\delta\xi_{b})=Ad_{T}(\widehat{\xi_{b}+\delta\xi_{b}})=Ad_{T}(\hat{\xi}_{b})+Ad_{T}(\delta\hat{\xi}_{b})
\]
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Formula
\[
D_{2}f_{(T,\xi_{b})}(\xi_{b})=Ad_{T}
\]
\end_inset
We will derive
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi)$
\end_inset
using two approaches.
In the first, we'll define
\begin_inset Formula $g(T,\xi)\triangleq T\exp\hat{\xi}$
\end_inset
.
From Section
\begin_inset CommandInset ref
LatexCommand ref
reference "sec:Derivatives-of-Actions"
plural "false"
caps "false"
noprefix "false"
\end_inset
,
\end_layout
\begin_layout Standard
\begin_inset Formula
\begin{align*}
D_{2}g_{(T,\xi)}(\xi) & =T\hat{\xi}\\
D_{2}g_{(T,\xi)}^{-1}(\xi) & =-\hat{\xi}T^{-1}
\end{align*}
\end_inset
Now we can use the definition of the Adjoint representation
\begin_inset Formula $Ad_{g}\hat{\xi}=g\hat{\xi}g^{-1}$
\end_inset
(aka conjugation by
\begin_inset Formula $g$
\end_inset
) then apply product rule and simplify:
\end_layout
\begin_layout Standard
\begin_inset Formula
\begin{align*}
D_{1}f_{(T,\xi_{b})}(\xi)=D_{1}\left(Ad_{T\exp(\hat{\xi})}\hat{\xi}_{b}\right)(\xi) & =D_{1}\left(g\hat{\xi}_{b}g^{-1}\right)(\xi)\\
& =\left(D_{2}g_{(T,\xi)}(\xi)\right)\hat{\xi}_{b}g^{-1}(T,0)+g(T,0)\hat{\xi}_{b}\left(D_{2}g_{(T,\xi)}^{-1}(\xi)\right)\\
& =T\hat{\xi}\hat{\xi}_{b}T^{-1}-T\hat{\xi}_{b}\hat{\xi}T^{-1}\\
& =T\left(\hat{\xi}\hat{\xi}_{b}-\hat{\xi}_{b}\hat{\xi}\right)T^{-1}\\
& =Ad_{T}(ad_{\hat{\xi}}\hat{\xi}_{b})\\
& =-Ad_{T}(ad_{\hat{\xi}_{b}}\hat{\xi})\\
D_{1}F_{(T,\xi_{b})} & =-(Ad_{T})(ad_{\hat{\xi}_{b}})
\end{align*}
\end_inset
Where
\begin_inset Formula $ad_{\hat{\xi}}:\mathfrak{g}\rightarrow\mathfrak{g}$
\end_inset
is the adjoint map of the lie algebra.
\end_layout
\begin_layout Standard
The second, perhaps more intuitive way of deriving
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi_{b})$
\end_inset
, would be to use the fact that the derivative at the origin
\begin_inset Formula $D_{1}Ad_{I}\hat{\xi}_{b}=ad_{\hat{\xi}_{b}}$
\end_inset
by definition of the adjoint
\begin_inset Formula $ad_{\xi}$
\end_inset
.
Then applying the property
\begin_inset Formula $Ad_{AB}=Ad_{A}Ad_{B}$
\end_inset
,
\end_layout
\begin_layout Standard
\begin_inset Formula
\[
D_{1}Ad_{T}\hat{\xi}_{b}(\xi)=D_{1}Ad_{T*I}\hat{\xi}_{b}(\xi)=Ad_{T}\left(D_{1}Ad_{I}\hat{\xi}_{b}(\xi)\right)=Ad_{T}\left(ad_{\hat{\xi}}(\hat{\xi}_{b})\right)=-Ad_{T}\left(ad_{\hat{\xi}_{b}}(\hat{\xi})\right)
\]
\end_inset
\end_layout
\begin_layout Subsection
Derivative of AdjointTranspose
\end_layout
\begin_layout Standard
The transpose of the Adjoint,
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
\begin_inset Formula $Ad_{T}^{T}:\mathfrak{g^{*}\rightarrow g^{*}}$
\end_inset
, is useful as a way to change the reference frame of vectors in the dual
space
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
(note the
\begin_inset Formula $^{*}$
\end_inset
denoting that we are now in the dual space)
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
.
To be more concrete, where
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
as
\begin_inset Formula $Ad_{T}\hat{\xi}_{b}$
\end_inset
converts the
\emph on
twist
\emph default
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
\begin_inset Formula $\xi_{b}$
\end_inset
from the
\begin_inset Formula $T$
\end_inset
frame,
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
\end_inset
converts the
\family default
\series default
\shape default
\size default
\emph on
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
wrench
\emph default
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
\begin_inset Formula $\xi_{b}^{*}$
\end_inset
from the
\begin_inset Formula $T$
\end_inset
frame
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
.
It's difficult to apply a similar derivation as in Section
\begin_inset CommandInset ref
LatexCommand ref
reference "subsec:pose3_adjoint_deriv"
plural "false"
caps "false"
noprefix "false"
\end_inset
for the derivative of
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
\end_inset
because
\begin_inset Formula $Ad_{T}^{T}$
\end_inset
cannot be naturally defined as a conjugation, so we resort to crunching
through the algebra.
The details are omitted but the result is a form that vaguely resembles
(but does not exactly match)
\begin_inset Formula $ad(Ad_{T}^{T}\hat{\xi}_{b}^{*})$
\end_inset
:
\end_layout
\begin_layout Standard
\begin_inset Formula
\begin{align*}
\begin{bmatrix}\omega_{T}\\
v_{T}
\end{bmatrix}^{*} & \triangleq Ad_{T}^{T}\hat{\xi}_{b}^{*}\\
D_{1}Ad_{T}^{T}\hat{\xi}_{b}^{*}(\xi) & =\begin{bmatrix}\hat{\omega}_{T} & \hat{v}_{T}\\
\hat{v}_{T} & 0
\end{bmatrix}
\end{align*}
\end_inset
\end_layout \end_layout
\begin_layout Subsection \begin_layout Subsection

Binary file not shown.

View File

@ -1,6 +1,57 @@
# Instructions # Instructions
Build all docker images, in order: # Images on Docker Hub
There are 4 images available on https://hub.docker.com/orgs/borglab/repositories:
- `borglab/ubuntu-boost-tbb`: 18.06 Linux (nicknamed `bionic`) base image, with Boost and TBB installed.
- `borglab/ubuntu-gtsam`: GTSAM Release version installed in `/usr/local`.
- `borglab/ubuntu-gtsam-python`: installed GTSAM with python wrapper.
- `borglab/ubuntu-gtsam-python-vnc`: image with GTSAM+python wrapper that will run a VNC server to connect to.
# Using the images
## Just GTSAM
To start the Docker image, execute
```bash
docker run -it borglab/ubuntu-gtsam:bionic
```
after you will find yourself in a bash shell, in the directory `/usr/src/gtsam/build`.
## GTSAM with Python wrapper
To use GTSAM via the python wrapper, similarly execute
```bash
docker run -it borglab/ubuntu-gtsam-python:bionic
```
and then launch `python3`:
```bash
python3
>>> import gtsam
>>> gtsam.Pose2(1,2,3)
(1, 2, 3)
```
## GTSAM with Python wrapper and VNC
First, start the docker image, which will run a VNC server on port 5900:
```bash
docker run -p 5900:5900 borglab/ubuntu-gtsam-python-vnc:bionic
```
Then open a remote VNC X client, for example:
### Linux
```bash
sudo apt-get install tigervnc-viewer
xtigervncviewer :5900
```
### Mac
The Finder's "Connect to Server..." with `vnc://127.0.0.1` does not work, for some reason. Using the free [VNC Viewer](https://www.realvnc.com/en/connect/download/viewer/), enter `0.0.0.0:5900` as the server.
# Re-building the images locally
To build all docker images, in order:
```bash ```bash
(cd ubuntu-boost-tbb && ./build.sh) (cd ubuntu-boost-tbb && ./build.sh)
@ -9,13 +60,4 @@ Build all docker images, in order:
(cd ubuntu-gtsam-python-vnc && ./build.sh) (cd ubuntu-gtsam-python-vnc && ./build.sh)
``` ```
Then launch with: Note: building GTSAM can take a lot of memory because of the heavy templating. It is advisable to give Docker enough resources, e.g., 8GB, to avoid OOM errors while compiling.
docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic
Then open a remote VNC X client, for example:
sudo apt-get install tigervnc-viewer
xtigervncviewer :5900

View File

@ -1,3 +1,3 @@
# Build command for Docker image # Build command for Docker image
# TODO(dellaert): use docker compose and/or cmake # TODO(dellaert): use docker compose and/or cmake
docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic . docker build --no-cache -t borglab/ubuntu-boost-tbb:bionic .

View File

@ -1,7 +1,7 @@
# This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction. # This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction.
# Get the base Ubuntu/GTSAM image from Docker Hub # Get the base Ubuntu/GTSAM image from Docker Hub
FROM dellaert/ubuntu-gtsam-python:bionic FROM borglab/ubuntu-gtsam-python:bionic
# Things needed to get a python GUI # Things needed to get a python GUI
ENV DEBIAN_FRONTEND noninteractive ENV DEBIAN_FRONTEND noninteractive

View File

@ -1,4 +1,4 @@
# Build command for Docker image # Build command for Docker image
# TODO(dellaert): use docker compose and/or cmake # TODO(dellaert): use docker compose and/or cmake
# Needs to be run in docker/ubuntu-gtsam-python-vnc directory # Needs to be run in docker/ubuntu-gtsam-python-vnc directory
docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic . docker build -t borglab/ubuntu-gtsam-python-vnc:bionic .

View File

@ -2,4 +2,4 @@
docker run -it \ docker run -it \
--workdir="/usr/src/gtsam" \ --workdir="/usr/src/gtsam" \
-p 5900:5900 \ -p 5900:5900 \
dellaert/ubuntu-gtsam-python-vnc:bionic borglab/ubuntu-gtsam-python-vnc:bionic

View File

@ -1,7 +1,7 @@
# GTSAM Ubuntu image with Python wrapper support. # GTSAM Ubuntu image with Python wrapper support.
# Get the base Ubuntu/GTSAM image from Docker Hub # Get the base Ubuntu/GTSAM image from Docker Hub
FROM dellaert/ubuntu-gtsam:bionic FROM borglab/ubuntu-gtsam:bionic
# Install pip # Install pip
RUN apt-get install -y python3-pip python3-dev RUN apt-get install -y python3-pip python3-dev
@ -22,7 +22,9 @@ RUN cmake \
.. ..
# Build again, as ubuntu-gtsam image cleaned # Build again, as ubuntu-gtsam image cleaned
RUN make -j4 install && make clean RUN make -j4 install
RUN make python-install
RUN make clean
# Needed to run python wrapper: # Needed to run python wrapper:
RUN echo 'export PYTHONPATH=/usr/local/python/:$PYTHONPATH' >> /root/.bashrc RUN echo 'export PYTHONPATH=/usr/local/python/:$PYTHONPATH' >> /root/.bashrc

View File

@ -1,3 +1,3 @@
# Build command for Docker image # Build command for Docker image
# TODO(dellaert): use docker compose and/or cmake # TODO(dellaert): use docker compose and/or cmake
docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic . docker build --no-cache -t borglab/ubuntu-gtsam-python:bionic .

View File

@ -1,7 +1,7 @@
# Ubuntu image with GTSAM installed. Configured with Boost and TBB support. # Ubuntu image with GTSAM installed. Configured with Boost and TBB support.
# Get the base Ubuntu image from Docker Hub # Get the base Ubuntu image from Docker Hub
FROM dellaert/ubuntu-boost-tbb:bionic FROM borglab/ubuntu-boost-tbb:bionic
# Install git # Install git
RUN apt-get update && \ RUN apt-get update && \

View File

@ -1,3 +1,3 @@
# Build command for Docker image # Build command for Docker image
# TODO(dellaert): use docker compose and/or cmake # TODO(dellaert): use docker compose and/or cmake
docker build --no-cache -t dellaert/ubuntu-gtsam:bionic . docker build --no-cache -t borglab/ubuntu-gtsam:bionic .

View File

@ -11,21 +11,23 @@
/** /**
* @file IMUKittiExampleGPS * @file IMUKittiExampleGPS
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE * @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics * VISION BENCHMARK SUITE
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ
* Electronics
*/ */
// GTSAM related includes. // GTSAM related includes.
#include <gtsam/inference/Symbol.h>
#include <gtsam/navigation/CombinedImuFactor.h> #include <gtsam/navigation/CombinedImuFactor.h>
#include <gtsam/navigation/GPSFactor.h> #include <gtsam/navigation/GPSFactor.h>
#include <gtsam/navigation/ImuFactor.h> #include <gtsam/navigation/ImuFactor.h>
#include <gtsam/slam/dataset.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/nonlinear/ISAM2.h> #include <gtsam/nonlinear/ISAM2.h>
#include <gtsam/nonlinear/ISAM2Params.h> #include <gtsam/nonlinear/ISAM2Params.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h> #include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/inference/Symbol.h> #include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/dataset.h>
#include <cstring> #include <cstring>
#include <fstream> #include <fstream>
@ -34,35 +36,35 @@
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz) using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
struct KittiCalibration { struct KittiCalibration {
double body_ptx; double body_ptx;
double body_pty; double body_pty;
double body_ptz; double body_ptz;
double body_prx; double body_prx;
double body_pry; double body_pry;
double body_prz; double body_prz;
double accelerometer_sigma; double accelerometer_sigma;
double gyroscope_sigma; double gyroscope_sigma;
double integration_sigma; double integration_sigma;
double accelerometer_bias_sigma; double accelerometer_bias_sigma;
double gyroscope_bias_sigma; double gyroscope_bias_sigma;
double average_delta_t; double average_delta_t;
}; };
struct ImuMeasurement { struct ImuMeasurement {
double time; double time;
double dt; double dt;
Vector3 accelerometer; Vector3 accelerometer;
Vector3 gyroscope; // omega Vector3 gyroscope; // omega
}; };
struct GpsMeasurement { struct GpsMeasurement {
double time; double time;
Vector3 position; // x,y,z Vector3 position; // x,y,z
}; };
const string output_filename = "IMUKittiExampleGPSResults.csv"; const string output_filename = "IMUKittiExampleGPSResults.csv";
@ -70,290 +72,313 @@ const string output_filename = "IMUKittiExampleGPSResults.csv";
void loadKittiData(KittiCalibration& kitti_calibration, void loadKittiData(KittiCalibration& kitti_calibration,
vector<ImuMeasurement>& imu_measurements, vector<ImuMeasurement>& imu_measurements,
vector<GpsMeasurement>& gps_measurements) { vector<GpsMeasurement>& gps_measurements) {
string line; string line;
// Read IMU metadata and compute relative sensor pose transforms // Read IMU metadata and compute relative sensor pose transforms
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma // BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma
// AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT // GyroscopeSigma IntegrationSigma AccelerometerBiasSigma GyroscopeBiasSigma
string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt"); // AverageDeltaT
ifstream imu_metadata(imu_metadata_file.c_str()); string imu_metadata_file =
findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
ifstream imu_metadata(imu_metadata_file.c_str());
printf("-- Reading sensor metadata\n"); printf("-- Reading sensor metadata\n");
getline(imu_metadata, line, '\n'); // ignore the first line getline(imu_metadata, line, '\n'); // ignore the first line
// Load Kitti calibration // Load Kitti calibration
getline(imu_metadata, line, '\n'); getline(imu_metadata, line, '\n');
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf", sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
&kitti_calibration.body_ptx, &kitti_calibration.body_ptx, &kitti_calibration.body_pty,
&kitti_calibration.body_pty, &kitti_calibration.body_ptz, &kitti_calibration.body_prx,
&kitti_calibration.body_ptz, &kitti_calibration.body_pry, &kitti_calibration.body_prz,
&kitti_calibration.body_prx, &kitti_calibration.accelerometer_sigma,
&kitti_calibration.body_pry, &kitti_calibration.gyroscope_sigma,
&kitti_calibration.body_prz, &kitti_calibration.integration_sigma,
&kitti_calibration.accelerometer_sigma, &kitti_calibration.accelerometer_bias_sigma,
&kitti_calibration.gyroscope_sigma, &kitti_calibration.gyroscope_bias_sigma,
&kitti_calibration.integration_sigma, &kitti_calibration.average_delta_t);
&kitti_calibration.accelerometer_bias_sigma, printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
&kitti_calibration.gyroscope_bias_sigma, kitti_calibration.body_ptx, kitti_calibration.body_pty,
&kitti_calibration.average_delta_t); kitti_calibration.body_ptz, kitti_calibration.body_prx,
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n", kitti_calibration.body_pry, kitti_calibration.body_prz,
kitti_calibration.body_ptx, kitti_calibration.accelerometer_sigma,
kitti_calibration.body_pty, kitti_calibration.gyroscope_sigma, kitti_calibration.integration_sigma,
kitti_calibration.body_ptz, kitti_calibration.accelerometer_bias_sigma,
kitti_calibration.body_prx, kitti_calibration.gyroscope_bias_sigma,
kitti_calibration.body_pry, kitti_calibration.average_delta_t);
kitti_calibration.body_prz,
kitti_calibration.accelerometer_sigma,
kitti_calibration.gyroscope_sigma,
kitti_calibration.integration_sigma,
kitti_calibration.accelerometer_bias_sigma,
kitti_calibration.gyroscope_bias_sigma,
kitti_calibration.average_delta_t);
// Read IMU data // Read IMU data
// Time dt accelX accelY accelZ omegaX omegaY omegaZ // Time dt accelX accelY accelZ omegaX omegaY omegaZ
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt"); string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
printf("-- Reading IMU measurements from file\n"); printf("-- Reading IMU measurements from file\n");
{ {
ifstream imu_data(imu_data_file.c_str()); ifstream imu_data(imu_data_file.c_str());
getline(imu_data, line, '\n'); // ignore the first line getline(imu_data, line, '\n'); // ignore the first line
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0; double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0,
while (!imu_data.eof()) { gyro_y = 0, gyro_z = 0;
getline(imu_data, line, '\n'); while (!imu_data.eof()) {
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", getline(imu_data, line, '\n');
&time, &dt, sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", &time, &dt,
&acc_x, &acc_y, &acc_z, &acc_x, &acc_y, &acc_z, &gyro_x, &gyro_y, &gyro_z);
&gyro_x, &gyro_y, &gyro_z);
ImuMeasurement measurement; ImuMeasurement measurement;
measurement.time = time; measurement.time = time;
measurement.dt = dt; measurement.dt = dt;
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z); measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z); measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
imu_measurements.push_back(measurement); imu_measurements.push_back(measurement);
}
} }
}
// Read GPS data // Read GPS data
// Time,X,Y,Z // Time,X,Y,Z
string gps_data_file = findExampleDataFile("KittiGps_converted.txt"); string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
printf("-- Reading GPS measurements from file\n"); printf("-- Reading GPS measurements from file\n");
{ {
ifstream gps_data(gps_data_file.c_str()); ifstream gps_data(gps_data_file.c_str());
getline(gps_data, line, '\n'); // ignore the first line getline(gps_data, line, '\n'); // ignore the first line
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0; double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
while (!gps_data.eof()) { while (!gps_data.eof()) {
getline(gps_data, line, '\n'); getline(gps_data, line, '\n');
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z); sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
GpsMeasurement measurement; GpsMeasurement measurement;
measurement.time = time; measurement.time = time;
measurement.position = Vector3(gps_x, gps_y, gps_z); measurement.position = Vector3(gps_x, gps_y, gps_z);
gps_measurements.push_back(measurement); gps_measurements.push_back(measurement);
}
} }
}
} }
int main(int argc, char* argv[]) { int main(int argc, char* argv[]) {
KittiCalibration kitti_calibration; KittiCalibration kitti_calibration;
vector<ImuMeasurement> imu_measurements; vector<ImuMeasurement> imu_measurements;
vector<GpsMeasurement> gps_measurements; vector<GpsMeasurement> gps_measurements;
loadKittiData(kitti_calibration, imu_measurements, gps_measurements); loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz, Vector6 BodyP =
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz) (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty,
.finished(); kitti_calibration.body_ptz, kitti_calibration.body_prx,
auto body_T_imu = Pose3::Expmap(BodyP); kitti_calibration.body_pry, kitti_calibration.body_prz)
if (!body_T_imu.equals(Pose3(), 1e-5)) { .finished();
printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same"); auto body_T_imu = Pose3::Expmap(BodyP);
exit(-1); if (!body_T_imu.equals(Pose3(), 1e-5)) {
} printf(
"Currently only support IMUinBody is identity, i.e. IMU and body frame "
"are the same");
exit(-1);
}
// Configure different variables // Configure different variables
// double t_offset = gps_measurements[0].time; // double t_offset = gps_measurements[0].time;
size_t first_gps_pose = 1; size_t first_gps_pose = 1;
size_t gps_skip = 10; // Skip this many GPS measurements each time size_t gps_skip = 10; // Skip this many GPS measurements each time
double g = 9.8; double g = 9.8;
auto w_coriolis = Vector3::Zero(); // zero vector auto w_coriolis = Vector3::Zero(); // zero vector
// Configure noise models // Configure noise models
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0), auto noise_model_gps = noiseModel::Diagonal::Precisions(
Vector3::Constant(1.0/0.07)) (Vector6() << Vector3::Constant(0), Vector3::Constant(1.0 / 0.07))
.finished()); .finished());
// Set initial conditions for the estimated trajectory // Set initial conditions for the estimated trajectory
// initial pose is the reference frame (navigation frame) // initial pose is the reference frame (navigation frame)
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position); auto current_pose_global =
// the vehicle is stationary at the beginning at position 0,0,0 Pose3(Rot3(), gps_measurements[first_gps_pose].position);
Vector3 current_velocity_global = Vector3::Zero(); // the vehicle is stationary at the beginning at position 0,0,0
auto current_bias = imuBias::ConstantBias(); // init with zero bias Vector3 current_velocity_global = Vector3::Zero();
auto current_bias = imuBias::ConstantBias(); // init with zero bias
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0), auto sigma_init_x = noiseModel::Diagonal::Precisions(
Vector3::Constant(1.0)) (Vector6() << Vector3::Constant(0), Vector3::Constant(1.0)).finished());
.finished()); auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0)); auto sigma_init_b = noiseModel::Diagonal::Sigmas(
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100), (Vector6() << Vector3::Constant(0.100), Vector3::Constant(5.00e-05))
Vector3::Constant(5.00e-05)) .finished());
.finished());
// Set IMU preintegration parameters // Set IMU preintegration parameters
Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2); Matrix33 measured_acc_cov =
Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2); I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
// error committed in integrating position from velocities Matrix33 measured_omega_cov =
Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2); I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
// error committed in integrating position from velocities
Matrix33 integration_error_cov =
I_3x3 * pow(kitti_calibration.integration_sigma, 2);
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g); auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous imu_params->accelerometerCovariance =
imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous measured_acc_cov; // acc white noise in continuous
imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous imu_params->integrationCovariance =
imu_params->omegaCoriolis = w_coriolis; integration_error_cov; // integration uncertainty continuous
imu_params->gyroscopeCovariance =
measured_omega_cov; // gyro white noise in continuous
imu_params->omegaCoriolis = w_coriolis;
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr; std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement =
nullptr;
// Set ISAM2 parameters and create ISAM2 solver object // Set ISAM2 parameters and create ISAM2 solver object
ISAM2Params isam_params; ISAM2Params isam_params;
isam_params.factorization = ISAM2Params::CHOLESKY; isam_params.factorization = ISAM2Params::CHOLESKY;
isam_params.relinearizeSkip = 10; isam_params.relinearizeSkip = 10;
ISAM2 isam(isam_params); ISAM2 isam(isam_params);
// Create the factor graph and values object that will store new factors and values to add to the incremental graph // Create the factor graph and values object that will store new factors and
NonlinearFactorGraph new_factors; // values to add to the incremental graph
Values new_values; // values storing the initial estimates of new nodes in the factor graph NonlinearFactorGraph new_factors;
Values new_values; // values storing the initial estimates of new nodes in
// the factor graph
/// Main loop: /// Main loop:
/// (1) we read the measurements /// (1) we read the measurements
/// (2) we create the corresponding factors in the graph /// (2) we create the corresponding factors in the graph
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory /// (3) we solve the graph to obtain and optimal estimate of robot trajectory
printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n"); printf(
size_t j = 0; "-- Starting main loop: inference is performed at each time step, but we "
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { "plot trajectory every 10 steps\n");
// At each non=IMU measurement we initialize a new node in the graph size_t j = 0;
auto current_pose_key = X(i); size_t included_imu_measurement_count = 0;
auto current_vel_key = V(i);
auto current_bias_key = B(i);
double t = gps_measurements[i].time;
if (i == first_gps_pose) { for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
// Create initial estimate and prior on initial pose, velocity, and biases // At each non=IMU measurement we initialize a new node in the graph
new_values.insert(current_pose_key, current_pose_global); auto current_pose_key = X(i);
new_values.insert(current_vel_key, current_velocity_global); auto current_vel_key = V(i);
new_values.insert(current_bias_key, current_bias); auto current_bias_key = B(i);
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x); double t = gps_measurements[i].time;
new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v);
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b);
} else {
double t_previous = gps_measurements[i-1].time;
// Summarize IMU data between the previous GPS measurement and now if (i == first_gps_pose) {
current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias); // Create initial estimate and prior on initial pose, velocity, and biases
static size_t included_imu_measurement_count = 0; new_values.insert(current_pose_key, current_pose_global);
while (j < imu_measurements.size() && imu_measurements[j].time <= t) { new_values.insert(current_vel_key, current_velocity_global);
if (imu_measurements[j].time >= t_previous) { new_values.insert(current_bias_key, current_bias);
current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer, new_factors.emplace_shared<PriorFactor<Pose3>>(
imu_measurements[j].gyroscope, current_pose_key, current_pose_global, sigma_init_x);
imu_measurements[j].dt); new_factors.emplace_shared<PriorFactor<Vector3>>(
included_imu_measurement_count++; current_vel_key, current_velocity_global, sigma_init_v);
} new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(
j++; current_bias_key, current_bias, sigma_init_b);
} } else {
double t_previous = gps_measurements[i - 1].time;
// Create IMU factor // Summarize IMU data between the previous GPS measurement and now
auto previous_pose_key = X(i-1); current_summarized_measurement =
auto previous_vel_key = V(i-1); std::make_shared<PreintegratedImuMeasurements>(imu_params,
auto previous_bias_key = B(i-1); current_bias);
new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key, while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
current_pose_key, current_vel_key, if (imu_measurements[j].time >= t_previous) {
previous_bias_key, *current_summarized_measurement); current_summarized_measurement->integrateMeasurement(
imu_measurements[j].accelerometer, imu_measurements[j].gyroscope,
// Bias evolution as given in the IMU metadata imu_measurements[j].dt);
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() << included_imu_measurement_count++;
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
.finished());
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key,
current_bias_key,
imuBias::ConstantBias(),
sigma_between_b);
// Create GPS factor
auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
if ((i % gps_skip) == 0) {
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps);
new_values.insert(current_pose_key, gps_pose);
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
cout << gps_pose.translation();
printf("\n\n");
} else {
new_values.insert(current_pose_key, current_pose_global);
}
// Add initial values for velocity and bias based on the previous estimates
new_values.insert(current_vel_key, current_velocity_global);
new_values.insert(current_bias_key, current_bias);
// Update solver
// =======================================================================
// We accumulate 2*GPSskip GPS measurements before updating the solver at
// first so that the heading becomes observable.
if (i > (first_gps_pose + 2*gps_skip)) {
printf("################ NEW FACTORS AT TIME %lf ################\n", t);
new_factors.print();
isam.update(new_factors, new_values);
// Reset the newFactors and newValues list
new_factors.resize(0);
new_values.clear();
// Extract the result/current estimates
Values result = isam.calculateEstimate();
current_pose_global = result.at<Pose3>(current_pose_key);
current_velocity_global = result.at<Vector3>(current_vel_key);
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
printf("\n################ POSE AT TIME %lf ################\n", t);
current_pose_global.print();
printf("\n\n");
}
} }
j++;
}
// Create IMU factor
auto previous_pose_key = X(i - 1);
auto previous_vel_key = V(i - 1);
auto previous_bias_key = B(i - 1);
new_factors.emplace_shared<ImuFactor>(
previous_pose_key, previous_vel_key, current_pose_key,
current_vel_key, previous_bias_key, *current_summarized_measurement);
// Bias evolution as given in the IMU metadata
auto sigma_between_b = noiseModel::Diagonal::Sigmas(
(Vector6() << Vector3::Constant(
sqrt(included_imu_measurement_count) *
kitti_calibration.accelerometer_bias_sigma),
Vector3::Constant(sqrt(included_imu_measurement_count) *
kitti_calibration.gyroscope_bias_sigma))
.finished());
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(
previous_bias_key, current_bias_key, imuBias::ConstantBias(),
sigma_between_b);
// Create GPS factor
auto gps_pose =
Pose3(current_pose_global.rotation(), gps_measurements[i].position);
if ((i % gps_skip) == 0) {
new_factors.emplace_shared<PriorFactor<Pose3>>(
current_pose_key, gps_pose, noise_model_gps);
new_values.insert(current_pose_key, gps_pose);
printf("############ POSE INCLUDED AT TIME %.6lf ############\n",
t);
cout << gps_pose.translation();
printf("\n\n");
} else {
new_values.insert(current_pose_key, current_pose_global);
}
// Add initial values for velocity and bias based on the previous
// estimates
new_values.insert(current_vel_key, current_velocity_global);
new_values.insert(current_bias_key, current_bias);
// Update solver
// =======================================================================
// We accumulate 2*GPSskip GPS measurements before updating the solver at
// first so that the heading becomes observable.
if (i > (first_gps_pose + 2 * gps_skip)) {
printf("############ NEW FACTORS AT TIME %.6lf ############\n",
t);
new_factors.print();
isam.update(new_factors, new_values);
// Reset the newFactors and newValues list
new_factors.resize(0);
new_values.clear();
// Extract the result/current estimates
Values result = isam.calculateEstimate();
current_pose_global = result.at<Pose3>(current_pose_key);
current_velocity_global = result.at<Vector3>(current_vel_key);
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
printf("\n############ POSE AT TIME %lf ############\n", t);
current_pose_global.print();
printf("\n\n");
}
} }
}
// Save results to file // Save results to file
printf("\nWriting results to file...\n"); printf("\nWriting results to file...\n");
FILE* fp_out = fopen(output_filename.c_str(), "w+"); FILE* fp_out = fopen(output_filename.c_str(), "w+");
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n"); fprintf(fp_out,
"#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
Values result = isam.calculateEstimate(); Values result = isam.calculateEstimate();
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
auto pose_key = X(i); auto pose_key = X(i);
auto vel_key = V(i); auto vel_key = V(i);
auto bias_key = B(i); auto bias_key = B(i);
auto pose = result.at<Pose3>(pose_key); auto pose = result.at<Pose3>(pose_key);
auto velocity = result.at<Vector3>(vel_key); auto velocity = result.at<Vector3>(vel_key);
auto bias = result.at<imuBias::ConstantBias>(bias_key); auto bias = result.at<imuBias::ConstantBias>(bias_key);
auto pose_quat = pose.rotation().toQuaternion(); auto pose_quat = pose.rotation().toQuaternion();
auto gps = gps_measurements[i].position; auto gps = gps_measurements[i].position;
cout << "State at #" << i << endl; cout << "State at #" << i << endl;
cout << "Pose:" << endl << pose << endl; cout << "Pose:" << endl << pose << endl;
cout << "Velocity:" << endl << velocity << endl; cout << "Velocity:" << endl << velocity << endl;
cout << "Bias:" << endl << bias << endl; cout << "Bias:" << endl << bias << endl;
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n", fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
gps_measurements[i].time, gps_measurements[i].time, pose.x(), pose.y(), pose.z(),
pose.x(), pose.y(), pose.z(), pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), gps(0),
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), gps(1), gps(2));
gps(0), gps(1), gps(2)); }
}
fclose(fp_out); fclose(fp_out);
} }

View File

@ -49,10 +49,7 @@ if(NOT GTSAM_USE_SYSTEM_EIGEN)
endif() endif()
option(GTSAM_BUILD_METIS_EXECUTABLES "Build metis library executables" OFF) # metis: already handled in ROOT/cmake/HandleMetis.cmake
if(GTSAM_SUPPORT_NESTED_DISSECTION)
add_subdirectory(metis)
endif()
add_subdirectory(ceres) add_subdirectory(ceres)

View File

@ -5,6 +5,7 @@ project(gtsam LANGUAGES CXX)
# The following variable is the master list of subdirs to add # The following variable is the master list of subdirs to add
set (gtsam_subdirs set (gtsam_subdirs
base base
basis
geometry geometry
inference inference
symbolic symbolic
@ -88,7 +89,8 @@ list(APPEND gtsam_srcs "${PROJECT_BINARY_DIR}/config.h" "${PROJECT_BINARY_DIR}/d
install(FILES "${PROJECT_BINARY_DIR}/config.h" "${PROJECT_BINARY_DIR}/dllexport.h" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam) install(FILES "${PROJECT_BINARY_DIR}/config.h" "${PROJECT_BINARY_DIR}/dllexport.h" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam)
if(GTSAM_SUPPORT_NESTED_DISSECTION) if(GTSAM_SUPPORT_NESTED_DISSECTION)
list(APPEND GTSAM_ADDITIONAL_LIBRARIES metis-gtsam) # target metis-gtsam-if is defined in both cases: embedded metis or system version:
list(APPEND GTSAM_ADDITIONAL_LIBRARIES metis-gtsam-if)
endif() endif()
# Versions # Versions
@ -154,16 +156,6 @@ target_include_directories(gtsam SYSTEM BEFORE PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/CCOLAMD> $<INSTALL_INTERFACE:include/gtsam/3rdparty/CCOLAMD>
) )
if(GTSAM_SUPPORT_NESTED_DISSECTION)
target_include_directories(gtsam BEFORE PUBLIC
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/include>
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
)
endif()
if(WIN32) # Add 'lib' prefix to static library to avoid filename collision with shared library if(WIN32) # Add 'lib' prefix to static library to avoid filename collision with shared library
if (NOT BUILD_SHARED_LIBS) if (NOT BUILD_SHARED_LIBS)

View File

@ -320,28 +320,28 @@ T expm(const Vector& x, int K=7) {
} }
/** /**
* Linear interpolation between X and Y by coefficient t (typically t \in [0,1], * Linear interpolation between X and Y by coefficient t. Typically t \in [0,1],
* but can also be used to extrapolate before pose X or after pose Y), with optional jacobians. * but can also be used to extrapolate before pose X or after pose Y.
*/ */
template <typename T> template <typename T>
T interpolate(const T& X, const T& Y, double t, T interpolate(const T& X, const T& Y, double t,
typename MakeOptionalJacobian<T, T>::type Hx = boost::none, typename MakeOptionalJacobian<T, T>::type Hx = boost::none,
typename MakeOptionalJacobian<T, T>::type Hy = boost::none) { typename MakeOptionalJacobian<T, T>::type Hy = boost::none) {
if (Hx || Hy) { if (Hx || Hy) {
typename traits<T>::TangentVector log_Xinv_Y;
typename MakeJacobian<T, T>::type between_H_x, log_H, exp_H, compose_H_x; typename MakeJacobian<T, T>::type between_H_x, log_H, exp_H, compose_H_x;
const T between =
traits<T>::Between(X, Y, between_H_x); // between_H_y = identity
typename traits<T>::TangentVector delta = traits<T>::Logmap(between, log_H);
const T Delta = traits<T>::Expmap(t * delta, exp_H);
const T result = traits<T>::Compose(
X, Delta, compose_H_x); // compose_H_xinv_y = identity
T Xinv_Y = traits<T>::Between(X, Y, between_H_x); // between_H_y = identity if (Hx) *Hx = compose_H_x + t * exp_H * log_H * between_H_x;
log_Xinv_Y = traits<T>::Logmap(Xinv_Y, log_H); if (Hy) *Hy = t * exp_H * log_H;
Xinv_Y = traits<T>::Expmap(t * log_Xinv_Y, exp_H); return result;
Xinv_Y = traits<T>::Compose(X, Xinv_Y, compose_H_x); // compose_H_xinv_y = identity
if(Hx) *Hx = compose_H_x + t * exp_H * log_H * between_H_x;
if(Hy) *Hy = t * exp_H * log_H;
return Xinv_Y;
} }
return traits<T>::Compose(X, traits<T>::Expmap(t * traits<T>::Logmap(traits<T>::Between(X, Y)))); return traits<T>::Compose(
X, traits<T>::Expmap(t * traits<T>::Logmap(traits<T>::Between(X, Y))));
} }
/** /**

View File

@ -89,6 +89,13 @@ public:
usurp(dynamic.data()); usurp(dynamic.data());
} }
/// Constructor that will resize a dynamic matrix (unless already correct)
OptionalJacobian(Eigen::MatrixXd* dynamic) :
map_(nullptr) {
dynamic->resize(Rows, Cols); // no malloc if correct size
usurp(dynamic->data());
}
#ifndef OPTIONALJACOBIAN_NOBOOST #ifndef OPTIONALJACOBIAN_NOBOOST
/// Constructor with boost::none just makes empty /// Constructor with boost::none just makes empty

View File

@ -24,40 +24,33 @@ using namespace std;
using namespace gtsam; using namespace gtsam;
//****************************************************************************** //******************************************************************************
#define TEST_CONSTRUCTOR(DIM1, DIM2, X, TRUTHY) \
{ \
OptionalJacobian<DIM1, DIM2> H(X); \
EXPECT(H == TRUTHY); \
}
TEST( OptionalJacobian, Constructors ) { TEST( OptionalJacobian, Constructors ) {
Matrix23 fixed; Matrix23 fixed;
OptionalJacobian<2, 3> H1;
EXPECT(!H1);
OptionalJacobian<2, 3> H2(fixed);
EXPECT(H2);
OptionalJacobian<2, 3> H3(&fixed);
EXPECT(H3);
Matrix dynamic; Matrix dynamic;
OptionalJacobian<2, 3> H4(dynamic);
EXPECT(H4);
OptionalJacobian<2, 3> H5(boost::none);
EXPECT(!H5);
boost::optional<Matrix&> optional(dynamic); boost::optional<Matrix&> optional(dynamic);
OptionalJacobian<2, 3> H6(optional);
EXPECT(H6);
OptionalJacobian<2, 3> H;
EXPECT(!H);
TEST_CONSTRUCTOR(2, 3, fixed, true);
TEST_CONSTRUCTOR(2, 3, &fixed, true);
TEST_CONSTRUCTOR(2, 3, dynamic, true);
TEST_CONSTRUCTOR(2, 3, &dynamic, true);
TEST_CONSTRUCTOR(2, 3, boost::none, false);
TEST_CONSTRUCTOR(2, 3, optional, true);
// Test dynamic
OptionalJacobian<-1, -1> H7; OptionalJacobian<-1, -1> H7;
EXPECT(!H7); EXPECT(!H7);
OptionalJacobian<-1, -1> H8(dynamic); TEST_CONSTRUCTOR(-1, -1, dynamic, true);
EXPECT(H8); TEST_CONSTRUCTOR(-1, -1, boost::none, false);
TEST_CONSTRUCTOR(-1, -1, optional, true);
OptionalJacobian<-1, -1> H9(boost::none);
EXPECT(!H9);
OptionalJacobian<-1, -1> H10(optional);
EXPECT(H10);
} }
//****************************************************************************** //******************************************************************************
@ -101,6 +94,25 @@ TEST( OptionalJacobian, Fixed) {
dynamic2.setOnes(); dynamic2.setOnes();
test(dynamic2); test(dynamic2);
EXPECT(assert_equal(kTestMatrix, dynamic2)); EXPECT(assert_equal(kTestMatrix, dynamic2));
{ // Dynamic pointer
// Passing in an empty matrix means we want it resized
Matrix dynamic0;
test(&dynamic0);
EXPECT(assert_equal(kTestMatrix, dynamic0));
// Dynamic wrong size
Matrix dynamic1(3, 5);
dynamic1.setOnes();
test(&dynamic1);
EXPECT(assert_equal(kTestMatrix, dynamic1));
// Dynamic right size
Matrix dynamic2(2, 5);
dynamic2.setOnes();
test(&dynamic2);
EXPECT(assert_equal(kTestMatrix, dynamic2));
}
} }
//****************************************************************************** //******************************************************************************

View File

@ -158,9 +158,8 @@ void DepthFirstForestParallel(FOREST& forest, DATA& rootData,
// Typedefs // Typedefs
typedef typename FOREST::Node Node; typedef typename FOREST::Node Node;
tbb::task::spawn_root_and_wait( internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre, visitorPost, problemSizeThreshold);
visitorPost, problemSizeThreshold));
#else #else
DepthFirstForest(forest, rootData, visitorPre, visitorPost); DepthFirstForest(forest, rootData, visitorPre, visitorPost);
#endif #endif

View File

@ -22,7 +22,7 @@
#include <boost/make_shared.hpp> #include <boost/make_shared.hpp>
#ifdef GTSAM_USE_TBB #ifdef GTSAM_USE_TBB
#include <tbb/task.h> // tbb::task, tbb::task_list #include <tbb/task_group.h> // tbb::task_group
#include <tbb/scalable_allocator.h> // tbb::scalable_allocator #include <tbb/scalable_allocator.h> // tbb::scalable_allocator
namespace gtsam { namespace gtsam {
@ -34,7 +34,7 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
template<typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST> template<typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
class PreOrderTask : public tbb::task class PreOrderTask
{ {
public: public:
const boost::shared_ptr<NODE>& treeNode; const boost::shared_ptr<NODE>& treeNode;
@ -42,28 +42,30 @@ namespace gtsam {
VISITOR_PRE& visitorPre; VISITOR_PRE& visitorPre;
VISITOR_POST& visitorPost; VISITOR_POST& visitorPost;
int problemSizeThreshold; int problemSizeThreshold;
tbb::task_group& tg;
bool makeNewTasks; bool makeNewTasks;
bool isPostOrderPhase; // Keep track of order phase across multiple calls to the same functor
mutable bool isPostOrderPhase;
PreOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData, PreOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData,
VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold,
bool makeNewTasks = true) tbb::task_group& tg, bool makeNewTasks = true)
: treeNode(treeNode), : treeNode(treeNode),
myData(myData), myData(myData),
visitorPre(visitorPre), visitorPre(visitorPre),
visitorPost(visitorPost), visitorPost(visitorPost),
problemSizeThreshold(problemSizeThreshold), problemSizeThreshold(problemSizeThreshold),
tg(tg),
makeNewTasks(makeNewTasks), makeNewTasks(makeNewTasks),
isPostOrderPhase(false) {} isPostOrderPhase(false) {}
tbb::task* execute() override void operator()() const
{ {
if(isPostOrderPhase) if(isPostOrderPhase)
{ {
// Run the post-order visitor since this task was recycled to run the post-order visitor // Run the post-order visitor since this task was recycled to run the post-order visitor
(void) visitorPost(treeNode, *myData); (void) visitorPost(treeNode, *myData);
return nullptr;
} }
else else
{ {
@ -71,14 +73,10 @@ namespace gtsam {
{ {
if(!treeNode->children.empty()) if(!treeNode->children.empty())
{ {
// Allocate post-order task as a continuation
isPostOrderPhase = true;
recycle_as_continuation();
bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold); bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold);
tbb::task* firstChild = 0; // If we have child tasks, start subtasks and wait for them to complete
tbb::task_list childTasks; tbb::task_group ctg;
for(const boost::shared_ptr<NODE>& child: treeNode->children) for(const boost::shared_ptr<NODE>& child: treeNode->children)
{ {
// Process child in a subtask. Important: Run visitorPre before calling // Process child in a subtask. Important: Run visitorPre before calling
@ -86,37 +84,30 @@ namespace gtsam {
// allocated an extra child, this causes a TBB error. // allocated an extra child, this causes a TBB error.
boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>( boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>(
tbb::scalable_allocator<DATA>(), visitorPre(child, *myData)); tbb::scalable_allocator<DATA>(), visitorPre(child, *myData));
tbb::task* childTask = ctg.run(PreOrderTask(child, childData, visitorPre, visitorPost,
new (allocate_child()) PreOrderTask(child, childData, visitorPre, visitorPost, problemSizeThreshold, ctg, overThreshold));
problemSizeThreshold, overThreshold);
if (firstChild)
childTasks.push_back(*childTask);
else
firstChild = childTask;
} }
ctg.wait();
// If we have child tasks, start subtasks and wait for them to complete // Allocate post-order task as a continuation
set_ref_count((int)treeNode->children.size()); isPostOrderPhase = true;
spawn(childTasks); tg.run(*this);
return firstChild;
} }
else else
{ {
// Run the post-order visitor in this task if we have no children // Run the post-order visitor in this task if we have no children
(void) visitorPost(treeNode, *myData); (void) visitorPost(treeNode, *myData);
return nullptr;
} }
} }
else else
{ {
// Process this node and its children in this task // Process this node and its children in this task
processNodeRecursively(treeNode, *myData); processNodeRecursively(treeNode, *myData);
return nullptr;
} }
} }
} }
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData) void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData) const
{ {
for(const boost::shared_ptr<NODE>& child: node->children) for(const boost::shared_ptr<NODE>& child: node->children)
{ {
@ -131,7 +122,7 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
template<typename ROOTS, typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST> template<typename ROOTS, typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
class RootTask : public tbb::task class RootTask
{ {
public: public:
const ROOTS& roots; const ROOTS& roots;
@ -139,38 +130,31 @@ namespace gtsam {
VISITOR_PRE& visitorPre; VISITOR_PRE& visitorPre;
VISITOR_POST& visitorPost; VISITOR_POST& visitorPost;
int problemSizeThreshold; int problemSizeThreshold;
tbb::task_group& tg;
RootTask(const ROOTS& roots, DATA& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, RootTask(const ROOTS& roots, DATA& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost,
int problemSizeThreshold) : int problemSizeThreshold, tbb::task_group& tg) :
roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost), roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost),
problemSizeThreshold(problemSizeThreshold) {} problemSizeThreshold(problemSizeThreshold), tg(tg) {}
tbb::task* execute() override void operator()() const
{ {
typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask; typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask;
// Create data and tasks for our children // Create data and tasks for our children
tbb::task_list tasks;
for(const boost::shared_ptr<NODE>& root: roots) for(const boost::shared_ptr<NODE>& root: roots)
{ {
boost::shared_ptr<DATA> rootData = boost::allocate_shared<DATA>(tbb::scalable_allocator<DATA>(), visitorPre(root, myData)); boost::shared_ptr<DATA> rootData = boost::allocate_shared<DATA>(tbb::scalable_allocator<DATA>(), visitorPre(root, myData));
tasks.push_back(*new(allocate_child()) tg.run(PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold));
} }
// Set TBB ref count
set_ref_count(1 + (int) roots.size());
// Spawn tasks
spawn_and_wait_for_all(tasks);
// Return nullptr
return nullptr;
} }
}; };
template<typename NODE, typename ROOTS, typename DATA, typename VISITOR_PRE, typename VISITOR_POST> template<typename NODE, typename ROOTS, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST>& void CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
{ {
typedef RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST> RootTask; typedef RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST> RootTask;
return *new(tbb::task::allocate_root()) RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold); tbb::task_group tg;
} tg.run_and_wait(RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
}
} }

507
gtsam/basis/Basis.h Normal file
View File

@ -0,0 +1,507 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Basis.h
* @brief Compute an interpolating basis
* @author Varun Agrawal, Jing Dong, Frank Dellaert
* @date July 4, 2020
*/
#pragma once
#include <gtsam/base/Matrix.h>
#include <gtsam/base/OptionalJacobian.h>
#include <gtsam/basis/ParameterMatrix.h>
#include <iostream>
/**
* This file supports creating continuous functions `f(x;p)` as a linear
* combination of `basis functions` such as the Fourier basis on SO(2) or a set
* of Chebyshev polynomials on [-1,1].
*
* In the expression `f(x;p)` the variable `x` is
* the continuous argument at which the function is evaluated, and `p` are
* the parameters which are coefficients of the different basis functions,
* e.g. p = [4; 3; 2] => 4 + 3x + 2x^2 for a polynomial.
* However, different parameterizations are also possible.
* The `Basis` class below defines a number of functors that can be used to
* evaluate `f(x;p)` at a given `x`, and these functors also calculate
* the Jacobian of `f(x;p)` with respect to the parameters `p`.
* This is actually the most important calculation, as it will allow GTSAM
* to optimize over the parameters `p`.
* This functionality is implemented using the `CRTP` or "Curiously recurring
* template pattern" C++ idiom, which is a meta-programming technique in which
* the derived class is passed as a template argument to `Basis<DERIVED>`.
* The DERIVED class is assumed to satisfy a C++ concept,
* i.e., we expect it to define the following types and methods:
- type `Parameters`: the parameters `p` in f(x;p)
- `CalculateWeights(size_t N, double x, double a=default, double b=default)`
- `DerivativeWeights(size_t N, double x, double a=default, double b=default)`
where `Weights` is an N*1 row vector which defines the basis values for the
polynomial at the specified point `x`.
E.g. A Fourier series would give the following:
- `CalculateWeights` -> For N=5, the values for the bases:
[1, cos(x), sin(x), cos(2x), sin(2x)]
- `DerivativeWeights` -> For N=5, these are:
[0, -sin(x), cos(x), -2sin(2x), 2cos(x)]
Note that for a pseudo-spectral basis (as in Chebyshev2), the weights are
instead the values for the Barycentric interpolation formula, since the values
at the polynomial points (e.g. Chebyshev points) define the bases.
*/
namespace gtsam {
using Weights = Eigen::Matrix<double, 1, -1>; /* 1xN vector */
/**
* @brief Function for computing the kronecker product of the 1*N Weight vector
* `w` with the MxM identity matrix `I` efficiently.
* The main reason for this is so we don't need to use Eigen's Unsupported
* library.
*
* @tparam M Size of the identity matrix.
* @param w The weights of the polynomial.
* @return Mx(M*N) kronecker product [w(0)*I, w(1)*I, ..., w(N-1)*I]
*/
template <size_t M>
Matrix kroneckerProductIdentity(const Weights& w) {
Matrix result(M, w.cols() * M);
result.setZero();
for (int i = 0; i < w.cols(); i++) {
result.block(0, i * M, M, M).diagonal().array() = w(i);
}
return result;
}
/// CRTP Base class for function bases
template <typename DERIVED>
class GTSAM_EXPORT Basis {
public:
/**
* Calculate weights for all x in vector X.
* Returns M*N matrix where M is the size of the vector X,
* and N is the number of basis functions.
*/
static Matrix WeightMatrix(size_t N, const Vector& X) {
Matrix W(X.size(), N);
for (int i = 0; i < X.size(); i++)
W.row(i) = DERIVED::CalculateWeights(N, X(i));
return W;
}
/**
* @brief Calculate weights for all x in vector X, with interval [a,b].
*
* @param N The number of basis functions.
* @param X The vector for which to compute the weights.
* @param a The lower bound for the interval range.
* @param b The upper bound for the interval range.
* @return Returns M*N matrix where M is the size of the vector X.
*/
static Matrix WeightMatrix(size_t N, const Vector& X, double a, double b) {
Matrix W(X.size(), N);
for (int i = 0; i < X.size(); i++)
W.row(i) = DERIVED::CalculateWeights(N, X(i), a, b);
return W;
}
/**
* An instance of an EvaluationFunctor calculates f(x;p) at a given `x`,
* applied to Parameters `p`.
* This functor is used to evaluate a parameterized function at a given scalar
* value x. When given a specific N*1 vector of Parameters, returns the scalar
* value of the function at x, possibly with Jacobians wrpt the parameters.
*/
class EvaluationFunctor {
protected:
Weights weights_;
public:
/// For serialization
EvaluationFunctor() {}
/// Constructor with interval [a,b]
EvaluationFunctor(size_t N, double x)
: weights_(DERIVED::CalculateWeights(N, x)) {}
/// Constructor with interval [a,b]
EvaluationFunctor(size_t N, double x, double a, double b)
: weights_(DERIVED::CalculateWeights(N, x, a, b)) {}
/// Regular 1D evaluation
double apply(const typename DERIVED::Parameters& p,
OptionalJacobian<-1, -1> H = boost::none) const {
if (H) *H = weights_;
return (weights_ * p)(0);
}
/// c++ sugar
double operator()(const typename DERIVED::Parameters& p,
OptionalJacobian<-1, -1> H = boost::none) const {
return apply(p, H); // might call apply in derived
}
void print(const std::string& s = "") const {
std::cout << s << (s != "" ? " " : "") << weights_ << std::endl;
}
};
/**
* VectorEvaluationFunctor at a given x, applied to ParameterMatrix<M>.
* This functor is used to evaluate a parameterized function at a given scalar
* value x. When given a specific M*N parameters, returns an M-vector the M
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
*/
template <int M>
class VectorEvaluationFunctor : protected EvaluationFunctor {
protected:
using VectorM = Eigen::Matrix<double, M, 1>;
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
Jacobian H_;
/**
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
* the M*N parameter matrix `P`.
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
* H =[ w(0) 0 w(1) 0 w(2) 0
* 0 w(0) 0 w(1) 0 w(2) ]
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
*/
void calculateJacobian() {
H_ = kroneckerProductIdentity<M>(this->weights_);
}
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
/// For serialization
VectorEvaluationFunctor() {}
/// Default Constructor
VectorEvaluationFunctor(size_t N, double x) : EvaluationFunctor(N, x) {
calculateJacobian();
}
/// Constructor, with interval [a,b]
VectorEvaluationFunctor(size_t N, double x, double a, double b)
: EvaluationFunctor(N, x, a, b) {
calculateJacobian();
}
/// M-dimensional evaluation
VectorM apply(const ParameterMatrix<M>& P,
OptionalJacobian</*MxN*/ -1, -1> H = boost::none) const {
if (H) *H = H_;
return P.matrix() * this->weights_.transpose();
}
/// c++ sugar
VectorM operator()(const ParameterMatrix<M>& P,
OptionalJacobian</*MxN*/ -1, -1> H = boost::none) const {
return apply(P, H);
}
};
/**
* Given a M*N Matrix of M-vectors at N polynomial points, an instance of
* VectorComponentFunctor computes the N-vector value for a specific row
* component of the M-vectors at all the polynomial points.
*
* This component is specified by the row index i, with 0<i<M.
*/
template <int M>
class VectorComponentFunctor : public EvaluationFunctor {
protected:
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
size_t rowIndex_;
Jacobian H_;
/*
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
* the M*N parameter matrix `P`.
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
* H=[w(0) 0 w(1) 0 w(2) 0] for rowIndex==0
* H=[0 w(0) 0 w(1) 0 w(2)] for rowIndex==1
* i.e., one row of the Kronecker product of weights_ with the
* MxM identity matrix. See also VectorEvaluationFunctor.
*/
void calculateJacobian(size_t N) {
H_.setZero(1, M * N);
for (int j = 0; j < EvaluationFunctor::weights_.size(); j++)
H_(0, rowIndex_ + j * M) = EvaluationFunctor::weights_(j);
}
public:
/// For serialization
VectorComponentFunctor() {}
/// Construct with row index
VectorComponentFunctor(size_t N, size_t i, double x)
: EvaluationFunctor(N, x), rowIndex_(i) {
calculateJacobian(N);
}
/// Construct with row index and interval
VectorComponentFunctor(size_t N, size_t i, double x, double a, double b)
: EvaluationFunctor(N, x, a, b), rowIndex_(i) {
calculateJacobian(N);
}
/// Calculate component of component rowIndex_ of P
double apply(const ParameterMatrix<M>& P,
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
if (H) *H = H_;
return P.row(rowIndex_) * EvaluationFunctor::weights_.transpose();
}
/// c++ sugar
double operator()(const ParameterMatrix<M>& P,
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
return apply(P, H);
}
};
/**
* Manifold EvaluationFunctor at a given x, applied to ParameterMatrix<M>.
* This functor is used to evaluate a parameterized function at a given scalar
* value x. When given a specific M*N parameters, returns an M-vector the M
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
*
* The difference with the VectorEvaluationFunctor is that after computing the
* M*1 vector xi=F(x;P), with x a scalar and P the M*N parameter vector, we
* also retract xi back to the T manifold.
* For example, if T==Rot3, then we first compute a 3-vector xi using x and P,
* and then map that 3-vector xi back to the Rot3 manifold, yielding a valid
* 3D rotation.
*/
template <class T>
class ManifoldEvaluationFunctor
: public VectorEvaluationFunctor<traits<T>::dimension> {
enum { M = traits<T>::dimension };
using Base = VectorEvaluationFunctor<M>;
public:
/// For serialization
ManifoldEvaluationFunctor() {}
/// Default Constructor
ManifoldEvaluationFunctor(size_t N, double x) : Base(N, x) {}
/// Constructor, with interval [a,b]
ManifoldEvaluationFunctor(size_t N, double x, double a, double b)
: Base(N, x, a, b) {}
/// Manifold evaluation
T apply(const ParameterMatrix<M>& P,
OptionalJacobian</*MxMN*/ -1, -1> H = boost::none) const {
// Interpolate the M-dimensional vector to yield a vector in tangent space
Eigen::Matrix<double, M, 1> xi = Base::operator()(P, H);
// Now call retract with this M-vector, possibly with derivatives
Eigen::Matrix<double, M, M> D_result_xi;
T result = T::ChartAtOrigin::Retract(xi, H ? &D_result_xi : 0);
// Finally, if derivatives are asked, apply chain rule where H is Mx(M*N)
// derivative of interpolation and D_result_xi is MxM derivative of
// retract.
if (H) *H = D_result_xi * (*H);
// and return a T
return result;
}
/// c++ sugar
T operator()(const ParameterMatrix<M>& P,
OptionalJacobian</*MxN*/ -1, -1> H = boost::none) const {
return apply(P, H); // might call apply in derived
}
};
/// Base class for functors below that calculate derivative weights
class DerivativeFunctorBase {
protected:
Weights weights_;
public:
/// For serialization
DerivativeFunctorBase() {}
DerivativeFunctorBase(size_t N, double x)
: weights_(DERIVED::DerivativeWeights(N, x)) {}
DerivativeFunctorBase(size_t N, double x, double a, double b)
: weights_(DERIVED::DerivativeWeights(N, x, a, b)) {}
void print(const std::string& s = "") const {
std::cout << s << (s != "" ? " " : "") << weights_ << std::endl;
}
};
/**
* An instance of a DerivativeFunctor calculates f'(x;p) at a given `x`,
* applied to Parameters `p`.
* When given a scalar value x and a specific N*1 vector of Parameters,
* this functor returns the scalar derivative value of the function at x,
* possibly with Jacobians wrpt the parameters.
*/
class DerivativeFunctor : protected DerivativeFunctorBase {
public:
/// For serialization
DerivativeFunctor() {}
DerivativeFunctor(size_t N, double x) : DerivativeFunctorBase(N, x) {}
DerivativeFunctor(size_t N, double x, double a, double b)
: DerivativeFunctorBase(N, x, a, b) {}
double apply(const typename DERIVED::Parameters& p,
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) const {
if (H) *H = this->weights_;
return (this->weights_ * p)(0);
}
/// c++ sugar
double operator()(const typename DERIVED::Parameters& p,
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) const {
return apply(p, H); // might call apply in derived
}
};
/**
* VectorDerivativeFunctor at a given x, applied to ParameterMatrix<M>.
*
* This functor is used to evaluate the derivatives of a parameterized
* function at a given scalar value x. When given a specific M*N parameters,
* returns an M-vector the M corresponding function derivatives at x, possibly
* with Jacobians wrpt the parameters.
*/
template <int M>
class VectorDerivativeFunctor : protected DerivativeFunctorBase {
protected:
using VectorM = Eigen::Matrix<double, M, 1>;
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
Jacobian H_;
/**
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
* the M*N parameter matrix `P`.
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
* H =[ w(0) 0 w(1) 0 w(2) 0
* 0 w(0) 0 w(1) 0 w(2) ]
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
*/
void calculateJacobian() {
H_ = kroneckerProductIdentity<M>(this->weights_);
}
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
/// For serialization
VectorDerivativeFunctor() {}
/// Default Constructor
VectorDerivativeFunctor(size_t N, double x) : DerivativeFunctorBase(N, x) {
calculateJacobian();
}
/// Constructor, with optional interval [a,b]
VectorDerivativeFunctor(size_t N, double x, double a, double b)
: DerivativeFunctorBase(N, x, a, b) {
calculateJacobian();
}
VectorM apply(const ParameterMatrix<M>& P,
OptionalJacobian</*MxMN*/ -1, -1> H = boost::none) const {
if (H) *H = H_;
return P.matrix() * this->weights_.transpose();
}
/// c++ sugar
VectorM operator()(
const ParameterMatrix<M>& P,
OptionalJacobian</*MxMN*/ -1, -1> H = boost::none) const {
return apply(P, H);
}
};
/**
* Given a M*N Matrix of M-vectors at N polynomial points, an instance of
* ComponentDerivativeFunctor computes the N-vector derivative for a specific
* row component of the M-vectors at all the polynomial points.
*
* This component is specified by the row index i, with 0<i<M.
*/
template <int M>
class ComponentDerivativeFunctor : protected DerivativeFunctorBase {
protected:
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
size_t rowIndex_;
Jacobian H_;
/*
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
* the M*N parameter matrix `P`.
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
* H=[w(0) 0 w(1) 0 w(2) 0] for rowIndex==0
* H=[0 w(0) 0 w(1) 0 w(2)] for rowIndex==1
* i.e., one row of the Kronecker product of weights_ with the
* MxM identity matrix. See also VectorDerivativeFunctor.
*/
void calculateJacobian(size_t N) {
H_.setZero(1, M * N);
for (int j = 0; j < this->weights_.size(); j++)
H_(0, rowIndex_ + j * M) = this->weights_(j);
}
public:
/// For serialization
ComponentDerivativeFunctor() {}
/// Construct with row index
ComponentDerivativeFunctor(size_t N, size_t i, double x)
: DerivativeFunctorBase(N, x), rowIndex_(i) {
calculateJacobian(N);
}
/// Construct with row index and interval
ComponentDerivativeFunctor(size_t N, size_t i, double x, double a, double b)
: DerivativeFunctorBase(N, x, a, b), rowIndex_(i) {
calculateJacobian(N);
}
/// Calculate derivative of component rowIndex_ of F
double apply(const ParameterMatrix<M>& P,
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
if (H) *H = H_;
return P.row(rowIndex_) * this->weights_.transpose();
}
/// c++ sugar
double operator()(const ParameterMatrix<M>& P,
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
return apply(P, H);
}
};
// Vector version for MATLAB :-(
static double Derivative(double x, const Vector& p, //
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) {
return DerivativeFunctor(x)(p.transpose(), H);
}
};
} // namespace gtsam

424
gtsam/basis/BasisFactors.h Normal file
View File

@ -0,0 +1,424 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file BasisFactors.h
* @brief Factor definitions for various Basis functors.
* @author Varun Agrawal
* @date July 4, 2020
**/
#pragma once
#include <gtsam/basis/Basis.h>
#include <gtsam/nonlinear/FunctorizedFactor.h>
namespace gtsam {
/**
* @brief Factor for enforcing the scalar value of the polynomial BASIS
* representation at `x` is the same as the measurement `z` when using a
* pseudo-spectral parameterization.
*
* @tparam BASIS The basis class to use e.g. Chebyshev2
*/
template <class BASIS>
class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
private:
using Base = FunctorizedFactor<double, Vector>;
public:
EvaluationFactor() {}
/**
* @brief Construct a new EvaluationFactor object
*
* @param key Symbol for value to optimize.
* @param z The measurement value.
* @param model Noise model
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the polynomial.
*/
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
const size_t N, double x)
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x)) {}
/**
* @brief Construct a new EvaluationFactor object
*
* @param key Symbol for value to optimize.
* @param z The measurement value.
* @param model Noise model
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the polynomial.
* @param a Lower bound for the polynomial.
* @param b Upper bound for the polynomial.
*/
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
const size_t N, double x, double a, double b)
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x, a, b)) {}
virtual ~EvaluationFactor() {}
};
/**
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
* of size (M, N) is equal to a vector-valued measurement at the same point,
when
* using a pseudo-spectral parameterization.
*
* This factors tries to enforce the basis function evaluation `f(x;p)` to take
* on the value `z` at location `x`, providing a gradient on the parameters p.
* In a probabilistic estimation context, `z` is known as a measurement, and the
* parameterized basis function can be seen as a
* measurement prediction function.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
* @param M: Size of the evaluated state vector.
*/
template <class BASIS, int M>
class GTSAM_EXPORT VectorEvaluationFactor
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
private:
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
public:
VectorEvaluationFactor() {}
/**
* @brief Construct a new VectorEvaluationFactor object.
*
* @param key The key to the ParameterMatrix object used to represent the
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
*/
VectorEvaluationFactor(Key key, const Vector &z,
const SharedNoiseModel &model, const size_t N,
double x)
: Base(key, z, model,
typename BASIS::template VectorEvaluationFunctor<M>(N, x)) {}
/**
* @brief Construct a new VectorEvaluationFactor object.
*
* @param key The key to the ParameterMatrix object used to represent the
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
* @param a Lower bound for the polynomial.
* @param b Upper bound for the polynomial.
*/
VectorEvaluationFactor(Key key, const Vector &z,
const SharedNoiseModel &model, const size_t N,
double x, double a, double b)
: Base(key, z, model,
typename BASIS::template VectorEvaluationFunctor<M>(N, x, a, b)) {}
virtual ~VectorEvaluationFactor() {}
};
/**
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
* of size (P, N) is equal to specified measurement at the same point, when
* using a pseudo-spectral parameterization.
*
* This factor is similar to `VectorEvaluationFactor` with the key difference
* being that it only enforces the constraint for a single scalar in the vector,
* indexed by `i`.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
* @param P: Size of the fixed-size vector.
*
* Example:
* VectorComponentFactor<BASIS, P> controlPrior(key, measured, model,
* N, i, t, a, b);
* where N is the degree and i is the component index.
*/
template <class BASIS, size_t P>
class GTSAM_EXPORT VectorComponentFactor
: public FunctorizedFactor<double, ParameterMatrix<P>> {
private:
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
public:
VectorComponentFactor() {}
/**
* @brief Construct a new VectorComponentFactor object.
*
* @param key The key to the ParameterMatrix object used to represent the
* polynomial.
* @param z The scalar value at a specified index `i` of the full measurement
* vector.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param i The index for the evaluated vector to give us the desired scalar
* value.
* @param x The point at which to evaluate the basis polynomial.
*/
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
const size_t N, size_t i, double x)
: Base(key, z, model,
typename BASIS::template VectorComponentFunctor<P>(N, i, x)) {}
/**
* @brief Construct a new VectorComponentFactor object.
*
* @param key The key to the ParameterMatrix object used to represent the
* polynomial.
* @param z The scalar value at a specified index `i` of the full measurement
* vector.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param i The index for the evaluated vector to give us the desired scalar
* value.
* @param x The point at which to evaluate 0the basis polynomial.
* @param a Lower bound for the polynomial.
* @param b Upper bound for the polynomial.
*/
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
const size_t N, size_t i, double x, double a, double b)
: Base(
key, z, model,
typename BASIS::template VectorComponentFunctor<P>(N, i, x, a, b)) {
}
virtual ~VectorComponentFactor() {}
};
/**
* For a measurement value of type T i.e. `T z = g(x)`, this unary
* factor enforces that the polynomial basis, when evaluated at `x`, gives us
* the same `z`, i.e. `T z = g(x) = f(x;p)`.
*
* This is done via computations on the tangent space of the
* manifold of T.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
* @param T: Object type which is synthesized by the provided functor.
*
* Example:
* ManifoldEvaluationFactor<Chebyshev2, Rot3> rotationFactor(key, measurement,
* model, N, x, a, b);
*
* where `x` is the value (e.g. timestep) at which the rotation was evaluated.
*/
template <class BASIS, typename T>
class GTSAM_EXPORT ManifoldEvaluationFactor
: public FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>> {
private:
using Base = FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>>;
public:
ManifoldEvaluationFactor() {}
/**
* @brief Construct a new ManifoldEvaluationFactor object.
*
* @param key Key for the state matrix parameterizing the function to estimate
* via the BASIS.
* @param z The measurement value.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param x The point at which the estimated function is evaluated.
*/
ManifoldEvaluationFactor(Key key, const T &z, const SharedNoiseModel &model,
const size_t N, double x)
: Base(key, z, model,
typename BASIS::template ManifoldEvaluationFunctor<T>(N, x)) {}
/**
* @brief Construct a new ManifoldEvaluationFactor object.
*
* @param key Key for the state matrix parameterizing the function to estimate
* via the BASIS.
* @param z The measurement value.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param x The point at which the estimated function is evaluated.
* @param a Lower bound for the polynomial.
* @param b Upper bound for the polynomial.
*/
ManifoldEvaluationFactor(Key key, const T &z, const SharedNoiseModel &model,
const size_t N, double x, double a, double b)
: Base(
key, z, model,
typename BASIS::template ManifoldEvaluationFunctor<T>(N, x, a, b)) {
}
virtual ~ManifoldEvaluationFactor() {}
};
/**
* A unary factor which enforces the evaluation of the derivative of a BASIS
* polynomial at a specified point`x` is equal to the scalar measurement `z`.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
*/
template <class BASIS>
class GTSAM_EXPORT DerivativeFactor
: public FunctorizedFactor<double, typename BASIS::Parameters> {
private:
using Base = FunctorizedFactor<double, typename BASIS::Parameters>;
public:
DerivativeFactor() {}
/**
* @brief Construct a new DerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
*/
DerivativeFactor(Key key, const double &z, const SharedNoiseModel &model,
const size_t N, double x)
: Base(key, z, model, typename BASIS::DerivativeFunctor(N, x)) {}
/**
* @brief Construct a new DerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
* @param a Lower bound for the polynomial.
* @param b Upper bound for the polynomial.
*/
DerivativeFactor(Key key, const double &z, const SharedNoiseModel &model,
const size_t N, double x, double a, double b)
: Base(key, z, model, typename BASIS::DerivativeFunctor(N, x, a, b)) {}
virtual ~DerivativeFactor() {}
};
/**
* A unary factor which enforces the evaluation of the derivative of a BASIS
* polynomial at a specified point `x` is equal to the vector value `z`.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
* @param M: Size of the evaluated state vector derivative.
*/
template <class BASIS, int M>
class GTSAM_EXPORT VectorDerivativeFactor
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
private:
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
using Func = typename BASIS::template VectorDerivativeFunctor<M>;
public:
VectorDerivativeFactor() {}
/**
* @brief Construct a new VectorDerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
*/
VectorDerivativeFactor(Key key, const Vector &z,
const SharedNoiseModel &model, const size_t N,
double x)
: Base(key, z, model, Func(N, x)) {}
/**
* @brief Construct a new VectorDerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* polynomial.
* @param z The measurement value.
* @param model The noise model.
* @param N The degree of the polynomial.
* @param x The point at which to evaluate the basis polynomial.
* @param a Lower bound for the polynomial.
* @param b Upper bound for the polynomial.
*/
VectorDerivativeFactor(Key key, const Vector &z,
const SharedNoiseModel &model, const size_t N,
double x, double a, double b)
: Base(key, z, model, Func(N, x, a, b)) {}
virtual ~VectorDerivativeFactor() {}
};
/**
* A unary factor which enforces the evaluation of the derivative of a BASIS
* polynomial is equal to the scalar value at a specific index `i` of a
* vector-valued measurement `z`.
*
* @param BASIS: The basis class to use e.g. Chebyshev2
* @param P: Size of the control component derivative.
*/
template <class BASIS, int P>
class GTSAM_EXPORT ComponentDerivativeFactor
: public FunctorizedFactor<double, ParameterMatrix<P>> {
private:
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
using Func = typename BASIS::template ComponentDerivativeFunctor<P>;
public:
ComponentDerivativeFactor() {}
/**
* @brief Construct a new ComponentDerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* polynomial.
* @param z The scalar measurement value at a specific index `i` of the full
* measurement vector.
* @param model The degree of the polynomial.
* @param N The degree of the polynomial.
* @param i The index for the evaluated vector to give us the desired scalar
* value.
* @param x The point at which to evaluate the basis polynomial.
*/
ComponentDerivativeFactor(Key key, const double &z,
const SharedNoiseModel &model, const size_t N,
size_t i, double x)
: Base(key, z, model, Func(N, i, x)) {}
/**
* @brief Construct a new ComponentDerivativeFactor object.
*
* @param key The key to the ParameterMatrix which represents the basis
* polynomial.
* @param z The scalar measurement value at a specific index `i` of the full
* measurement vector.
* @param model The degree of the polynomial.
* @param N The degree of the polynomial.
* @param i The index for the evaluated vector to give us the desired scalar
* value.
* @param x The point at which to evaluate the basis polynomial.
* @param a Lower bound for the polynomial.
* @param b Upper bound for the polynomial.
*/
ComponentDerivativeFactor(Key key, const double &z,
const SharedNoiseModel &model, const size_t N,
size_t i, double x, double a, double b)
: Base(key, z, model, Func(N, i, x, a, b)) {}
virtual ~ComponentDerivativeFactor() {}
};
} // namespace gtsam

View File

@ -0,0 +1,6 @@
# Install headers
file(GLOB basis_headers "*.h")
install(FILES ${basis_headers} DESTINATION include/gtsam/basis)
# Build tests
add_subdirectory(tests)

98
gtsam/basis/Chebyshev.cpp Normal file
View File

@ -0,0 +1,98 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Chebyshev.cpp
* @brief Chebyshev basis decompositions
* @author Varun Agrawal, Jing Dong, Frank Dellaert
* @date July 4, 2020
*/
#include <gtsam/basis/Chebyshev.h>
namespace gtsam {
/**
* @brief Scale x from [a, b] to [t1, t2]
*
* ((b'-a') * (x - a) / (b - a)) + a'
*
* @param x Value to scale to new range.
* @param a Original lower limit.
* @param b Original upper limit.
* @param t1 New lower limit.
* @param t2 New upper limit.
* @return double
*/
static double scale(double x, double a, double b, double t1, double t2) {
return ((t2 - t1) * (x - a) / (b - a)) + t1;
}
Weights Chebyshev1Basis::CalculateWeights(size_t N, double x, double a,
double b) {
Weights Tx(1, N);
x = scale(x, a, b, -1, 1);
Tx(0) = 1;
Tx(1) = x;
for (size_t i = 2; i < N; i++) {
// instead of cos(i*acos(x)), this recurrence is much faster
Tx(i) = 2 * x * Tx(i - 1) - Tx(i - 2);
}
return Tx;
}
Weights Chebyshev1Basis::DerivativeWeights(size_t N, double x, double a,
double b) {
Weights Ux = Chebyshev2Basis::CalculateWeights(N, x, a, b);
Weights weights = Weights::Zero(N);
for (size_t n = 1; n < N; n++) {
weights(n) = n * Ux(n - 1);
}
return weights;
}
Weights Chebyshev2Basis::CalculateWeights(size_t N, double x, double a,
double b) {
Weights Ux(N);
x = scale(x, a, b, -1, 1);
Ux(0) = 1;
Ux(1) = 2 * x;
for (size_t i = 2; i < N; i++) {
// instead of cos(i*acos(x)), this recurrence is much faster
Ux(i) = 2 * x * Ux(i - 1) - Ux(i - 2);
}
return Ux;
}
Weights Chebyshev2Basis::DerivativeWeights(size_t N, double x, double a,
double b) {
Weights Tx = Chebyshev1Basis::CalculateWeights(N + 1, x, a, b);
Weights Ux = Chebyshev2Basis::CalculateWeights(N, x, a, b);
Weights weights(N);
x = scale(x, a, b, -1, 1);
if (x == -1 || x == 1) {
throw std::runtime_error(
"Derivative of Chebyshev2 Basis does not exist at range limits.");
}
for (size_t n = 0; n < N; n++) {
weights(n) = ((n + 1) * Tx(n + 1) - x * Ux(n)) / (x * x - 1);
}
return weights;
}
} // namespace gtsam

109
gtsam/basis/Chebyshev.h Normal file
View File

@ -0,0 +1,109 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Chebyshev.h
* @brief Chebyshev basis decompositions
* @author Varun Agrawal, Jing Dong, Frank Dellaert
* @date July 4, 2020
*/
#pragma once
#include <gtsam/base/Manifold.h>
#include <gtsam/basis/Basis.h>
#include <unsupported/Eigen/KroneckerProduct>
namespace gtsam {
/**
* Basis of Chebyshev polynomials of the first kind
* https://en.wikipedia.org/wiki/Chebyshev_polynomials#First_kind
* These are typically denoted with the symbol T_n, where n is the degree.
* The parameter N is the number of coefficients, i.e., N = n+1.
*/
struct Chebyshev1Basis : Basis<Chebyshev1Basis> {
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
Parameters parameters_;
/**
* @brief Evaluate Chebyshev Weights on [-1,1] at x up to order N-1 (N values)
*
* @param N Degree of the polynomial.
* @param x Point to evaluate polynomial at.
* @param a Lower limit of polynomial (default=-1).
* @param b Upper limit of polynomial (default=1).
*/
static Weights CalculateWeights(size_t N, double x, double a = -1,
double b = 1);
/**
* @brief Evaluate Chebyshev derivative at x.
* The derivative weights are pre-multiplied to the polynomial Parameters.
*
* From Wikipedia we have D[T_n(x),x] = n*U_{n-1}(x)
* I.e. the derivative fo a first kind cheb is just a second kind cheb
* So, we define a second kind basis here of order N-1
* Note that it has one less weight.
*
* The Parameters pertain to 1st kind chebs up to order N-1
* But of course the first one (order 0) is constant, so omit that weight.
*
* @param N Degree of the polynomial.
* @param x Point to evaluate polynomial at.
* @param a Lower limit of polynomial (default=-1).
* @param b Upper limit of polynomial (default=1).
* @return Weights
*/
static Weights DerivativeWeights(size_t N, double x, double a = -1,
double b = 1);
}; // Chebyshev1Basis
/**
* Basis of Chebyshev polynomials of the second kind.
* https://en.wikipedia.org/wiki/Chebyshev_polynomials#Second_kind
* These are typically denoted with the symbol U_n, where n is the degree.
* The parameter N is the number of coefficients, i.e., N = n+1.
* In contrast to the templates in Chebyshev2, the classes below specify
* basis functions, weighted combinations of which are used to approximate
* functions. In this sense, they are like the sines and cosines of the Fourier
* basis.
*/
struct Chebyshev2Basis : Basis<Chebyshev2Basis> {
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
/**
* Evaluate Chebyshev Weights on [-1,1] at any x up to order N-1 (N values).
*
* @param N Degree of the polynomial.
* @param x Point to evaluate polynomial at.
* @param a Lower limit of polynomial (default=-1).
* @param b Upper limit of polynomial (default=1).
*/
static Weights CalculateWeights(size_t N, double x, double a = -1,
double b = 1);
/**
* @brief Evaluate Chebyshev derivative at x.
*
* @param N Degree of the polynomial.
* @param x Point to evaluate polynomial at.
* @param a Lower limit of polynomial (default=-1).
* @param b Upper limit of polynomial (default=1).
* @return Weights
*/
static Weights DerivativeWeights(size_t N, double x, double a = -1,
double b = 1);
}; // Chebyshev2Basis
} // namespace gtsam

205
gtsam/basis/Chebyshev2.cpp Normal file
View File

@ -0,0 +1,205 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Chebyshev2.cpp
* @brief Chebyshev parameterizations on Chebyshev points of second kind
* @author Varun Agrawal, Jing Dong, Frank Dellaert
* @date July 4, 2020
*/
#include <gtsam/basis/Chebyshev2.h>
namespace gtsam {
Weights Chebyshev2::CalculateWeights(size_t N, double x, double a, double b) {
// Allocate space for weights
Weights weights(N);
// We start by getting distances from x to all Chebyshev points
// as well as getting smallest distance
Weights distances(N);
for (size_t j = 0; j < N; j++) {
const double dj =
x - Point(N, j, a, b); // only thing that depends on [a,b]
if (std::abs(dj) < 1e-10) {
// exceptional case: x coincides with a Chebyshev point
weights.setZero();
weights(j) = 1;
return weights;
}
distances(j) = dj;
}
// Beginning of interval, j = 0, x(0) = a
weights(0) = 0.5 / distances(0);
// All intermediate points j=1:N-2
double d = weights(0), s = -1; // changes sign s at every iteration
for (size_t j = 1; j < N - 1; j++, s = -s) {
weights(j) = s / distances(j);
d += weights(j);
}
// End of interval, j = N-1, x(N-1) = b
weights(N - 1) = 0.5 * s / distances(N - 1);
d += weights(N - 1);
// normalize
return weights / d;
}
Weights Chebyshev2::DerivativeWeights(size_t N, double x, double a, double b) {
// Allocate space for weights
Weights weightDerivatives(N);
// toggle variable so we don't need to use `pow` for -1
double t = -1;
// We start by getting distances from x to all Chebyshev points
// as well as getting smallest distance
Weights distances(N);
for (size_t j = 0; j < N; j++) {
const double dj =
x - Point(N, j, a, b); // only thing that depends on [a,b]
if (std::abs(dj) < 1e-10) {
// exceptional case: x coincides with a Chebyshev point
weightDerivatives.setZero();
// compute the jth row of the differentiation matrix for this point
double cj = (j == 0 || j == N - 1) ? 2. : 1.;
for (size_t k = 0; k < N; k++) {
if (j == 0 && k == 0) {
// we reverse the sign since we order the cheb points from -1 to 1
weightDerivatives(k) = -(cj * (N - 1) * (N - 1) + 1) / 6.0;
} else if (j == N - 1 && k == N - 1) {
// we reverse the sign since we order the cheb points from -1 to 1
weightDerivatives(k) = (cj * (N - 1) * (N - 1) + 1) / 6.0;
} else if (k == j) {
double xj = Point(N, j);
double xj2 = xj * xj;
weightDerivatives(k) = -0.5 * xj / (1 - xj2);
} else {
double xj = Point(N, j);
double xk = Point(N, k);
double ck = (k == 0 || k == N - 1) ? 2. : 1.;
t = ((j + k) % 2) == 0 ? 1 : -1;
weightDerivatives(k) = (cj / ck) * t / (xj - xk);
}
}
return 2 * weightDerivatives / (b - a);
}
distances(j) = dj;
}
// This section of code computes the derivative of
// the Barycentric Interpolation weights formula by applying
// the chain rule on the original formula.
// g and k are multiplier terms which represent the derivatives of
// the numerator and denominator
double g = 0, k = 0;
double w = 1;
for (size_t j = 0; j < N; j++) {
if (j == 0 || j == N - 1) {
w = 0.5;
} else {
w = 1.0;
}
t = (j % 2 == 0) ? 1 : -1;
double c = t / distances(j);
g += w * c;
k += (w * c / distances(j));
}
double s = 1; // changes sign s at every iteration
double g2 = g * g;
for (size_t j = 0; j < N; j++, s = -s) {
// Beginning of interval, j = 0, x0 = -1.0 and end of interval, j = N-1,
// x0 = 1.0
if (j == 0 || j == N - 1) {
w = 0.5;
} else {
// All intermediate points j=1:N-2
w = 1.0;
}
weightDerivatives(j) = (w * -s / (g * distances(j) * distances(j))) -
(w * -s * k / (g2 * distances(j)));
}
return weightDerivatives;
}
Chebyshev2::DiffMatrix Chebyshev2::DifferentiationMatrix(size_t N, double a,
double b) {
DiffMatrix D(N, N);
if (N == 1) {
D(0, 0) = 1;
return D;
}
// toggle variable so we don't need to use `pow` for -1
double t = -1;
for (size_t i = 0; i < N; i++) {
double xi = Point(N, i);
double ci = (i == 0 || i == N - 1) ? 2. : 1.;
for (size_t j = 0; j < N; j++) {
if (i == 0 && j == 0) {
// we reverse the sign since we order the cheb points from -1 to 1
D(i, j) = -(ci * (N - 1) * (N - 1) + 1) / 6.0;
} else if (i == N - 1 && j == N - 1) {
// we reverse the sign since we order the cheb points from -1 to 1
D(i, j) = (ci * (N - 1) * (N - 1) + 1) / 6.0;
} else if (i == j) {
double xi2 = xi * xi;
D(i, j) = -xi / (2 * (1 - xi2));
} else {
double xj = Point(N, j);
double cj = (j == 0 || j == N - 1) ? 2. : 1.;
t = ((i + j) % 2) == 0 ? 1 : -1;
D(i, j) = (ci / cj) * t / (xi - xj);
}
}
}
// scale the matrix to the range
return D / ((b - a) / 2.0);
}
Weights Chebyshev2::IntegrationWeights(size_t N, double a, double b) {
// Allocate space for weights
Weights weights(N);
size_t K = N - 1, // number of intervals between N points
K2 = K * K;
weights(0) = 0.5 * (b - a) / (K2 + K % 2 - 1);
weights(N - 1) = weights(0);
size_t last_k = K / 2 + K % 2 - 1;
for (size_t i = 1; i <= N - 2; ++i) {
double theta = i * M_PI / K;
weights(i) = (K % 2 == 0) ? 1 - cos(K * theta) / (K2 - 1) : 1;
for (size_t k = 1; k <= last_k; ++k)
weights(i) -= 2 * cos(2 * k * theta) / (4 * k * k - 1);
weights(i) *= (b - a) / K;
}
return weights;
}
} // namespace gtsam

148
gtsam/basis/Chebyshev2.h Normal file
View File

@ -0,0 +1,148 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Chebyshev2.h
* @brief Pseudo-spectral parameterization for Chebyshev polynomials of the
* second kind.
*
* In a pseudo-spectral case, rather than the parameters acting as
* weights for the bases polynomials (as in Chebyshev2Basis), here the
* parameters are the *values* at a specific set of points in the interval, the
* "Chebyshev points". These values uniquely determine the polynomial that
* interpolates them at the Chebyshev points.
*
* This is different from Chebyshev.h since it leverage ideas from
* pseudo-spectral optimization, i.e. we don't decompose into basis functions,
* rather estimate function parameters that enforce function nodes at Chebyshev
* points.
*
* Please refer to Agrawal21icra for more details.
*
* @author Varun Agrawal, Jing Dong, Frank Dellaert
* @date July 4, 2020
*/
#pragma once
#include <gtsam/base/Manifold.h>
#include <gtsam/base/OptionalJacobian.h>
#include <gtsam/basis/Basis.h>
#include <boost/function.hpp>
namespace gtsam {
/**
* Chebyshev Interpolation on Chebyshev points of the second kind
* Note that N here, the number of points, is one less than N from
* 'Approximation Theory and Approximation Practice by L. N. Trefethen (pg.42)'.
*/
class GTSAM_EXPORT Chebyshev2 : public Basis<Chebyshev2> {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
using Base = Basis<Chebyshev2>;
using Parameters = Eigen::Matrix<double, /*Nx1*/ -1, 1>;
using DiffMatrix = Eigen::Matrix<double, /*NxN*/ -1, -1>;
/// Specific Chebyshev point
static double Point(size_t N, int j) {
assert(j >= 0 && size_t(j) < N);
const double dtheta = M_PI / (N > 1 ? (N - 1) : 1);
// We add -PI so that we get values ordered from -1 to +1
// sin(- M_PI_2 + dtheta*j); also works
return cos(-M_PI + dtheta * j);
}
/// Specific Chebyshev point, within [a,b] interval
static double Point(size_t N, int j, double a, double b) {
assert(j >= 0 && size_t(j) < N);
const double dtheta = M_PI / (N - 1);
// We add -PI so that we get values ordered from -1 to +1
return a + (b - a) * (1. + cos(-M_PI + dtheta * j)) / 2;
}
/// All Chebyshev points
static Vector Points(size_t N) {
Vector points(N);
for (size_t j = 0; j < N; j++) points(j) = Point(N, j);
return points;
}
/// All Chebyshev points, within [a,b] interval
static Vector Points(size_t N, double a, double b) {
Vector points = Points(N);
const double T1 = (a + b) / 2, T2 = (b - a) / 2;
points = T1 + (T2 * points).array();
return points;
}
/**
* Evaluate Chebyshev Weights on [-1,1] at any x up to order N-1 (N values)
* These weights implement barycentric interpolation at a specific x.
* More precisely, f(x) ~ [w0;...;wN] * [f0;...;fN], where the fj are the
* values of the function f at the Chebyshev points. As such, for a given x we
* obtain a linear map from parameter vectors f to interpolated values f(x).
* Optional [a,b] interval can be specified as well.
*/
static Weights CalculateWeights(size_t N, double x, double a = -1,
double b = 1);
/**
* Evaluate derivative of barycentric weights.
* This is easy and efficient via the DifferentiationMatrix.
*/
static Weights DerivativeWeights(size_t N, double x, double a = -1,
double b = 1);
/// compute D = differentiation matrix, Trefethen00book p.53
/// when given a parameter vector f of function values at the Chebyshev
/// points, D*f are the values of f'.
/// https://people.maths.ox.ac.uk/trefethen/8all.pdf Theorem 8.4
static DiffMatrix DifferentiationMatrix(size_t N, double a = -1,
double b = 1);
/**
* Evaluate Clenshaw-Curtis integration weights.
* Trefethen00book, pg 128, clencurt.m
* Note that N in clencurt.m is 1 less than our N
* K = N-1;
theta = pi*(0:K)'/K;
w = zeros(1,N); ii = 2:K; v = ones(K-1, 1);
if mod(K,2) == 0
w(1) = 1/(K^2-1); w(N) = w(1);
for k=1:K/2-1, v = v-2*cos(2*k*theta(ii))/(4*k^2-1); end
v = v - cos(K*theta(ii))/(K^2-1);
else
w(1) = 1/K^2; w(N) = w(1);
for k=1:K/2, v = v-2*cos(2*k*theta(ii))/(4*k^2-1); end
end
w(ii) = 2*v/K;
*/
static Weights IntegrationWeights(size_t N, double a = -1, double b = 1);
/**
* Create matrix of values at Chebyshev points given vector-valued function.
*/
template <size_t M>
static Matrix matrix(boost::function<Eigen::Matrix<double, M, 1>(double)> f,
size_t N, double a = -1, double b = 1) {
Matrix Xmat(M, N);
for (size_t j = 0; j < N; j++) {
Xmat.col(j) = f(Point(N, j, a, b));
}
return Xmat;
}
}; // \ Chebyshev2
} // namespace gtsam

99
gtsam/basis/FitBasis.h Normal file
View File

@ -0,0 +1,99 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file FitBasis.h
* @date July 4, 2020
* @author Varun Agrawal, Frank Dellaert
* @brief Fit a Basis using least-squares
*/
/*
* Concept needed for LS. Parameters = Coefficients | Values
* - Parameters, Jacobian
* - PredictFactor(double x)(Parameters p, OptionalJacobian<1,N> H)
*/
#pragma once
#include <gtsam/basis/Basis.h>
#include <gtsam/basis/BasisFactors.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
namespace gtsam {
/// Our sequence representation is a map of {x: y} values where y = f(x)
using Sequence = std::map<double, double>;
/// A sample is a key-value pair from a sequence.
using Sample = std::pair<double, double>;
/**
* Class that does regression via least squares
* Example usage:
* size_t N = 3;
* auto fit = FitBasis<Chebyshev2>(data_points, noise_model, N);
* Vector coefficients = fit.parameters();
*
* where `data_points` are a map from `x` to `y` values indicating a function
* mapping at specific points, `noise_model` is the gaussian noise model, and
* `N` is the degree of the polynomial basis used to fit the function.
*/
template <class Basis>
class FitBasis {
public:
using Parameters = typename Basis::Parameters;
private:
Parameters parameters_;
public:
/// Create nonlinear FG from Sequence
static NonlinearFactorGraph NonlinearGraph(const Sequence& sequence,
const SharedNoiseModel& model,
size_t N) {
NonlinearFactorGraph graph;
for (const Sample sample : sequence) {
graph.emplace_shared<EvaluationFactor<Basis>>(0, sample.second, model, N,
sample.first);
}
return graph;
}
/// Create linear FG from Sequence
static GaussianFactorGraph::shared_ptr LinearGraph(
const Sequence& sequence, const SharedNoiseModel& model, size_t N) {
NonlinearFactorGraph graph = NonlinearGraph(sequence, model, N);
Values values;
values.insert<Parameters>(0, Parameters::Zero(N));
GaussianFactorGraph::shared_ptr gfg = graph.linearize(values);
return gfg;
}
/**
* @brief Construct a new FitBasis object.
*
* @param sequence map of x->y values for a function, a.k.a. y = f(x).
* @param model The noise model to use.
* @param N The degree of the polynomial to fit.
*/
FitBasis(const Sequence& sequence, const SharedNoiseModel& model, size_t N) {
GaussianFactorGraph::shared_ptr gfg = LinearGraph(sequence, model, N);
VectorValues solution = gfg->optimize();
parameters_ = solution.at(0);
}
/// Return Fourier coefficients
Parameters parameters() const { return parameters_; }
};
} // namespace gtsam

112
gtsam/basis/Fourier.h Normal file
View File

@ -0,0 +1,112 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Fourier.h
* @brief Fourier decomposition, see e.g.
* http://mathworld.wolfram.com/FourierSeries.html
* @author Varun Agrawal, Frank Dellaert
* @date July 4, 2020
*/
#pragma once
#include <gtsam/basis/Basis.h>
namespace gtsam {
/// Fourier basis
class GTSAM_EXPORT FourierBasis : public Basis<FourierBasis> {
public:
using Parameters = Eigen::Matrix<double, /*Nx1*/ -1, 1>;
using DiffMatrix = Eigen::Matrix<double, /*NxN*/ -1, -1>;
/**
* @brief Evaluate Real Fourier Weights of size N in interval [a, b],
* e.g. N=5 yields bases: 1, cos(x), sin(x), cos(2*x), sin(2*x)
*
* @param N The degree of the polynomial to use.
* @param x The point at which to compute the derivaive weights.
* @return Weights
*/
static Weights CalculateWeights(size_t N, double x) {
Weights b(N);
b[0] = 1;
for (size_t i = 1, n = 1; i < N; i++) {
if (i % 2 == 1) {
b[i] = cos(n * x);
} else {
b[i] = sin(n * x);
n++;
}
}
return b;
}
/**
* @brief Evaluate Real Fourier Weights of size N in interval [a, b],
* e.g. N=5 yields bases: 1, cos(x), sin(x), cos(2*x), sin(2*x)
*
* @param N The degree of the polynomial to use.
* @param x The point at which to compute the weights.
* @param a Lower bound of interval.
* @param b Upper bound of interval.
* @return Weights
*/
static Weights CalculateWeights(size_t N, double x, double a, double b) {
// TODO(Varun) How do we enforce an interval for Fourier series?
return CalculateWeights(N, x);
}
/**
* Compute D = differentiation matrix.
* Given coefficients c of a Fourier series c, D*c are the values of c'.
*/
static DiffMatrix DifferentiationMatrix(size_t N) {
DiffMatrix D = DiffMatrix::Zero(N, N);
double k = 1;
for (size_t i = 1; i < N; i += 2) {
D(i, i + 1) = k; // sin'(k*x) = k*cos(k*x)
D(i + 1, i) = -k; // cos'(k*x) = -k*sin(k*x)
k += 1;
}
return D;
}
/**
* @brief Get weights at a given x that calculate the derivative.
*
* @param N The degree of the polynomial to use.
* @param x The point at which to compute the derivaive weights.
* @return Weights
*/
static Weights DerivativeWeights(size_t N, double x) {
return CalculateWeights(N, x) * DifferentiationMatrix(N);
}
/**
* @brief Get derivative weights at a given x that calculate the derivative,
in the interval [a, b].
*
* @param N The degree of the polynomial to use.
* @param x The point at which to compute the derivaive weights.
* @param a Lower bound of interval.
* @param b Upper bound of interval.
* @return Weights
*/
static Weights DerivativeWeights(size_t N, double x, double a, double b) {
return CalculateWeights(N, x, a, b) * DifferentiationMatrix(N);
}
}; // FourierBasis
} // namespace gtsam

View File

@ -0,0 +1,215 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file ParamaterMatrix.h
* @brief Define ParameterMatrix class which is used to store values at
* interpolation points.
* @author Varun Agrawal, Frank Dellaert
* @date September 21, 2020
*/
#pragma once
#include <gtsam/base/Matrix.h>
#include <gtsam/base/Testable.h>
#include <gtsam/base/VectorSpace.h>
#include <iostream>
namespace gtsam {
/**
* A matrix abstraction of MxN values at the Basis points.
* This class serves as a wrapper over an Eigen matrix.
* @tparam M: The dimension of the type you wish to evaluate.
* @param N: the number of Basis points (e.g. Chebyshev points of the second
* kind).
*/
template <int M>
class ParameterMatrix {
using MatrixType = Eigen::Matrix<double, M, -1>;
private:
MatrixType matrix_;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
enum { dimension = Eigen::Dynamic };
/**
* Create ParameterMatrix using the number of basis points.
* @param N: The number of basis points (the columns).
*/
ParameterMatrix(const size_t N) : matrix_(M, N) { matrix_.setZero(); }
/**
* Create ParameterMatrix from an MxN Eigen Matrix.
* @param matrix: An Eigen matrix used to initialze the ParameterMatrix.
*/
ParameterMatrix(const MatrixType& matrix) : matrix_(matrix) {}
/// Get the number of rows.
size_t rows() const { return matrix_.rows(); }
/// Get the number of columns.
size_t cols() const { return matrix_.cols(); }
/// Get the underlying matrix.
MatrixType matrix() const { return matrix_; }
/// Return the tranpose of the underlying matrix.
Eigen::Matrix<double, -1, M> transpose() const { return matrix_.transpose(); }
/**
* Get the matrix row specified by `index`.
* @param index: The row index to retrieve.
*/
Eigen::Matrix<double, 1, -1> row(size_t index) const {
return matrix_.row(index);
}
/**
* Set the matrix row specified by `index`.
* @param index: The row index to set.
*/
auto row(size_t index) -> Eigen::Block<MatrixType, 1, -1, false> {
return matrix_.row(index);
}
/**
* Get the matrix column specified by `index`.
* @param index: The column index to retrieve.
*/
Eigen::Matrix<double, M, 1> col(size_t index) const {
return matrix_.col(index);
}
/**
* Set the matrix column specified by `index`.
* @param index: The column index to set.
*/
auto col(size_t index) -> Eigen::Block<MatrixType, M, 1, true> {
return matrix_.col(index);
}
/**
* Set all matrix coefficients to zero.
*/
void setZero() { matrix_.setZero(); }
/**
* Add a ParameterMatrix to another.
* @param other: ParameterMatrix to add.
*/
ParameterMatrix<M> operator+(const ParameterMatrix<M>& other) const {
return ParameterMatrix<M>(matrix_ + other.matrix());
}
/**
* Add a MxN-sized vector to the ParameterMatrix.
* @param other: Vector which is reshaped and added.
*/
ParameterMatrix<M> operator+(
const Eigen::Matrix<double, -1, 1>& other) const {
// This form avoids a deep copy and instead typecasts `other`.
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
return ParameterMatrix<M>(matrix_ + other_);
}
/**
* Subtract a ParameterMatrix from another.
* @param other: ParameterMatrix to subtract.
*/
ParameterMatrix<M> operator-(const ParameterMatrix<M>& other) const {
return ParameterMatrix<M>(matrix_ - other.matrix());
}
/**
* Subtract a MxN-sized vector from the ParameterMatrix.
* @param other: Vector which is reshaped and subracted.
*/
ParameterMatrix<M> operator-(
const Eigen::Matrix<double, -1, 1>& other) const {
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
return ParameterMatrix<M>(matrix_ - other_);
}
/**
* Multiply ParameterMatrix with an Eigen matrix.
* @param other: Eigen matrix which should be multiplication compatible with
* the ParameterMatrix.
*/
MatrixType operator*(const Eigen::Matrix<double, -1, -1>& other) const {
return matrix_ * other;
}
/// @name Vector Space requirements, following LieMatrix
/// @{
/**
* Print the ParameterMatrix.
* @param s: The prepend string to add more contextual info.
*/
void print(const std::string& s = "") const {
std::cout << (s == "" ? s : s + " ") << matrix_ << std::endl;
}
/**
* Check for equality up to absolute tolerance.
* @param other: The ParameterMatrix to check equality with.
* @param tol: The absolute tolerance threshold.
*/
bool equals(const ParameterMatrix<M>& other, double tol = 1e-8) const {
return gtsam::equal_with_abs_tol(matrix_, other.matrix(), tol);
}
/// Returns dimensionality of the tangent space
inline size_t dim() const { return matrix_.size(); }
/// Convert to vector form, is done row-wise
inline Vector vector() const {
using RowMajor = Eigen::Matrix<double, -1, -1, Eigen::RowMajor>;
Vector result(matrix_.size());
Eigen::Map<RowMajor>(&result(0), rows(), cols()) = matrix_;
return result;
}
/** Identity function to satisfy VectorSpace traits.
*
* NOTE: The size at compile time is unknown so this identity is zero
* length and thus not valid.
*/
inline static ParameterMatrix identity() {
// throw std::runtime_error(
// "ParameterMatrix::identity(): Don't use this function");
return ParameterMatrix(0);
}
/// @}
};
// traits for ParameterMatrix
template <int M>
struct traits<ParameterMatrix<M>>
: public internal::VectorSpace<ParameterMatrix<M>> {};
/* ************************************************************************* */
// Stream operator that takes a ParameterMatrix. Used for printing.
template <int M>
inline std::ostream& operator<<(std::ostream& os,
const ParameterMatrix<M>& parameterMatrix) {
os << parameterMatrix.matrix();
return os;
}
} // namespace gtsam

146
gtsam/basis/basis.i Normal file
View File

@ -0,0 +1,146 @@
//*************************************************************************
// basis
//*************************************************************************
namespace gtsam {
// TODO(gerry): add all the Functors to the Basis interfaces, e.g.
// `EvaluationFunctor`
#include <gtsam/basis/Fourier.h>
class FourierBasis {
static Vector CalculateWeights(size_t N, double x);
static Matrix WeightMatrix(size_t N, Vector x);
static Matrix DifferentiationMatrix(size_t N);
static Vector DerivativeWeights(size_t N, double x);
};
#include <gtsam/basis/Chebyshev.h>
class Chebyshev1Basis {
static Matrix CalculateWeights(size_t N, double x);
static Matrix WeightMatrix(size_t N, Vector X);
};
class Chebyshev2Basis {
static Matrix CalculateWeights(size_t N, double x);
static Matrix WeightMatrix(size_t N, Vector x);
};
#include <gtsam/basis/Chebyshev2.h>
class Chebyshev2 {
static double Point(size_t N, int j);
static double Point(size_t N, int j, double a, double b);
static Vector Points(size_t N);
static Vector Points(size_t N, double a, double b);
static Matrix WeightMatrix(size_t N, Vector X);
static Matrix WeightMatrix(size_t N, Vector X, double a, double b);
static Matrix CalculateWeights(size_t N, double x, double a, double b);
static Matrix DerivativeWeights(size_t N, double x, double a, double b);
static Matrix IntegrationWeights(size_t N, double a, double b);
static Matrix DifferentiationMatrix(size_t N, double a, double b);
// TODO Needs OptionalJacobian
// static double Derivative(double x, Vector f);
};
#include <gtsam/basis/ParameterMatrix.h>
template <M = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}>
class ParameterMatrix {
ParameterMatrix(const size_t N);
ParameterMatrix(const Matrix& matrix);
Matrix matrix() const;
void print(const string& s = "") const;
};
#include <gtsam/basis/BasisFactors.h>
template <BASIS = {gtsam::Chebyshev2, gtsam::Chebyshev1Basis,
gtsam::Chebyshev2Basis, gtsam::FourierBasis}>
virtual class EvaluationFactor : gtsam::NoiseModelFactor {
EvaluationFactor();
EvaluationFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model, const size_t N,
double x);
EvaluationFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model, const size_t N,
double x, double a, double b);
};
template <BASIS, M>
virtual class VectorEvaluationFactor : gtsam::NoiseModelFactor {
VectorEvaluationFactor();
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
const gtsam::noiseModel::Base* model, const size_t N,
double x);
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
const gtsam::noiseModel::Base* model, const size_t N,
double x, double a, double b);
};
// TODO(Varun) Better way to support arbitrary dimensions?
// Especially if users mainly do `pip install gtsam` for the Python wrapper.
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 3>
VectorEvaluationFactorChebyshev2D3;
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 4>
VectorEvaluationFactorChebyshev2D4;
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 12>
VectorEvaluationFactorChebyshev2D12;
template <BASIS, P>
virtual class VectorComponentFactor : gtsam::NoiseModelFactor {
VectorComponentFactor();
VectorComponentFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model, const size_t N,
size_t i, double x);
VectorComponentFactor(gtsam::Key key, const double z,
const gtsam::noiseModel::Base* model, const size_t N,
size_t i, double x, double a, double b);
};
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 3>
VectorComponentFactorChebyshev2D3;
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 4>
VectorComponentFactorChebyshev2D4;
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 12>
VectorComponentFactorChebyshev2D12;
template <BASIS, T>
virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor {
ManifoldEvaluationFactor();
ManifoldEvaluationFactor(gtsam::Key key, const T& z,
const gtsam::noiseModel::Base* model, const size_t N,
double x);
ManifoldEvaluationFactor(gtsam::Key key, const T& z,
const gtsam::noiseModel::Base* model, const size_t N,
double x, double a, double b);
};
// TODO(gerry): Add `DerivativeFactor`, `VectorDerivativeFactor`, and
// `ComponentDerivativeFactor`
#include <gtsam/basis/FitBasis.h>
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
class FitBasis {
FitBasis(const std::map<double, double>& sequence,
const gtsam::noiseModel::Base* model, size_t N);
static gtsam::NonlinearFactorGraph NonlinearGraph(
const std::map<double, double>& sequence,
const gtsam::noiseModel::Base* model, size_t N);
static gtsam::GaussianFactorGraph::shared_ptr LinearGraph(
const std::map<double, double>& sequence,
const gtsam::noiseModel::Base* model, size_t N);
Parameters parameters() const;
};
} // namespace gtsam

View File

@ -0,0 +1 @@
gtsamAddTestsGlob(basis "test*.cpp" "" "gtsam")

View File

@ -0,0 +1,236 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testChebyshev.cpp
* @date July 4, 2020
* @author Varun Agrawal
* @brief Unit tests for Chebyshev Basis Decompositions
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/basis/Chebyshev.h>
#include <gtsam/basis/FitBasis.h>
#include <gtsam/nonlinear/factorTesting.h>
using namespace std;
using namespace gtsam;
auto model = noiseModel::Unit::Create(1);
const size_t N = 3;
//******************************************************************************
TEST(Chebyshev, Chebyshev1) {
using Synth = Chebyshev1Basis::EvaluationFunctor;
Vector c(N);
double x;
c << 12, 3, 1;
x = -1.0;
EXPECT_DOUBLES_EQUAL(12 + 3 * x + 2 * x * x - 1, Synth(N, x)(c), 1e-9);
x = -0.5;
EXPECT_DOUBLES_EQUAL(12 + 3 * x + 2 * x * x - 1, Synth(N, x)(c), 1e-9);
x = 0.3;
EXPECT_DOUBLES_EQUAL(12 + 3 * x + 2 * x * x - 1, Synth(N, x)(c), 1e-9);
}
//******************************************************************************
TEST(Chebyshev, Chebyshev2) {
using Synth = Chebyshev2Basis::EvaluationFunctor;
Vector c(N);
double x;
c << 12, 3, 1;
x = -1.0;
EXPECT_DOUBLES_EQUAL(12 + 6 * x + 4 * x * x - 1, Synth(N, x)(c), 1e-9);
x = -0.5;
EXPECT_DOUBLES_EQUAL(12 + 6 * x + 4 * x * x - 1, Synth(N, x)(c), 1e-9);
x = 0.3;
EXPECT_DOUBLES_EQUAL(12 + 6 * x + 4 * x * x - 1, Synth(N, x)(c), 1e-9);
}
//******************************************************************************
TEST(Chebyshev, Evaluation) {
Chebyshev1Basis::EvaluationFunctor fx(N, 0.5);
Vector c(N);
c << 3, 5, -12;
EXPECT_DOUBLES_EQUAL(11.5, fx(c), 1e-9);
}
//******************************************************************************
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
#include <gtsam/nonlinear/Marginals.h>
TEST(Chebyshev, Expression) {
// Create linear factor graph
NonlinearFactorGraph graph;
Key key(1);
// Let's pretend we have 6 GPS measurements (we just do x coordinate)
// at times
const size_t m = 6;
Vector t(m);
t << -0.7, -0.4, 0.1, 0.3, 0.7, 0.9;
Vector x(m);
x << -0.7, -0.4, 0.1, 0.3, 0.7, 0.9;
for (size_t i = 0; i < m; i++) {
graph.emplace_shared<EvaluationFactor<Chebyshev1Basis>>(key, x(i), model, N,
t(i));
}
// Solve
Values initial;
initial.insert<Vector>(key, Vector::Zero(N)); // initial does not matter
// ... and optimize
GaussNewtonParams parameters;
GaussNewtonOptimizer optimizer(graph, initial, parameters);
Values result = optimizer.optimize();
// Check
Vector expected(N);
expected << 0, 1, 0;
Vector actual_c = result.at<Vector>(key);
EXPECT(assert_equal(expected, actual_c));
// Calculate and print covariances
Marginals marginals(graph, result);
Matrix3 cov = marginals.marginalCovariance(key);
EXPECT_DOUBLES_EQUAL(0.626, cov(1, 1), 1e-3);
// Predict x at time 1.0
Chebyshev1Basis::EvaluationFunctor f(N, 1.0);
Matrix H;
double actual = f(actual_c, H);
EXPECT_DOUBLES_EQUAL(1.0, actual, 1e-9);
// Calculate predictive variance on prediction
double actual_variance_on_prediction = (H * cov * H.transpose())(0);
EXPECT_DOUBLES_EQUAL(1.1494, actual_variance_on_prediction, 1e-4);
}
//******************************************************************************
TEST(Chebyshev, Decomposition) {
const size_t M = 16;
// Create example sequence
Sequence sequence;
for (size_t i = 0; i < M; i++) {
double x = ((double)i / M); // - 0.99;
double y = x;
sequence[x] = y;
}
// Do Chebyshev Decomposition
FitBasis<Chebyshev1Basis> actual(sequence, model, N);
// Check
Vector expected = Vector::Zero(N);
expected(1) = 1;
EXPECT(assert_equal(expected, (Vector)actual.parameters(), 1e-4));
}
//******************************************************************************
TEST(Chebyshev1, Derivative) {
Vector c(N);
c << 12, 3, 2;
Weights D;
double x = -1.0;
D = Chebyshev1Basis::DerivativeWeights(N, x);
// regression
EXPECT_DOUBLES_EQUAL(-5, (D * c)(0), 1e-9);
x = -0.5;
D = Chebyshev1Basis::DerivativeWeights(N, x);
// regression
EXPECT_DOUBLES_EQUAL(-1, (D * c)(0), 1e-9);
x = 0.3;
D = Chebyshev1Basis::DerivativeWeights(N, x);
// regression
EXPECT_DOUBLES_EQUAL(5.4, (D * c)(0), 1e-9);
}
//******************************************************************************
Vector3 f(-6, 1, 0.5);
double proxy1(double x, size_t N) {
return Chebyshev1Basis::EvaluationFunctor(N, x)(Vector(f));
}
TEST(Chebyshev1, Derivative2) {
const double x = 0.5;
auto D = Chebyshev1Basis::DerivativeWeights(N, x);
Matrix numeric_dTdx =
numericalDerivative21<double, double, double>(proxy1, x, N);
// regression
EXPECT_DOUBLES_EQUAL(2, numeric_dTdx(0, 0), 1e-9);
EXPECT_DOUBLES_EQUAL(2, (D * f)(0), 1e-9);
}
//******************************************************************************
TEST(Chebyshev2, Derivative) {
Vector c(N);
c << 12, 6, 2;
Weights D;
double x = -1.0;
CHECK_EXCEPTION(Chebyshev2Basis::DerivativeWeights(N, x), std::runtime_error);
x = 1.0;
CHECK_EXCEPTION(Chebyshev2Basis::DerivativeWeights(N, x), std::runtime_error);
x = -0.5;
D = Chebyshev2Basis::DerivativeWeights(N, x);
// regression
EXPECT_DOUBLES_EQUAL(4, (D * c)(0), 1e-9);
x = 0.3;
D = Chebyshev2Basis::DerivativeWeights(N, x);
// regression
EXPECT_DOUBLES_EQUAL(16.8, (D * c)(0), 1e-9);
x = 0.75;
D = Chebyshev2Basis::DerivativeWeights(N, x);
// regression
EXPECT_DOUBLES_EQUAL(24, (D * c)(0), 1e-9);
x = 10;
D = Chebyshev2Basis::DerivativeWeights(N, x, 0, 20);
// regression
EXPECT_DOUBLES_EQUAL(12, (D * c)(0), 1e-9);
}
//******************************************************************************
double proxy2(double x, size_t N) {
return Chebyshev2Basis::EvaluationFunctor(N, x)(Vector(f));
}
TEST(Chebyshev2, Derivative2) {
const double x = 0.5;
auto D = Chebyshev2Basis::DerivativeWeights(N, x);
Matrix numeric_dTdx =
numericalDerivative21<double, double, double>(proxy2, x, N);
// regression
EXPECT_DOUBLES_EQUAL(4, numeric_dTdx(0, 0), 1e-9);
EXPECT_DOUBLES_EQUAL(4, (D * f)(0), 1e-9);
}
//******************************************************************************
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
//******************************************************************************

View File

@ -0,0 +1,435 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testChebyshev.cpp
* @date July 4, 2020
* @author Varun Agrawal
* @brief Unit tests for Chebyshev Basis Decompositions via pseudo-spectral
* methods
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/basis/Chebyshev2.h>
#include <gtsam/basis/FitBasis.h>
#include <gtsam/nonlinear/factorTesting.h>
using namespace std;
using namespace gtsam;
using namespace boost::placeholders;
noiseModel::Diagonal::shared_ptr model = noiseModel::Unit::Create(1);
const size_t N = 32;
//******************************************************************************
TEST(Chebyshev2, Point) {
static const int N = 5;
auto points = Chebyshev2::Points(N);
Vector expected(N);
expected << -1., -sqrt(2.) / 2., 0., sqrt(2.) / 2., 1.;
static const double tol = 1e-15; // changing this reveals errors
EXPECT_DOUBLES_EQUAL(expected(0), points(0), tol);
EXPECT_DOUBLES_EQUAL(expected(1), points(1), tol);
EXPECT_DOUBLES_EQUAL(expected(2), points(2), tol);
EXPECT_DOUBLES_EQUAL(expected(3), points(3), tol);
EXPECT_DOUBLES_EQUAL(expected(4), points(4), tol);
// Check symmetry
EXPECT_DOUBLES_EQUAL(Chebyshev2::Point(N, 0), -Chebyshev2::Point(N, 4), tol);
EXPECT_DOUBLES_EQUAL(Chebyshev2::Point(N, 1), -Chebyshev2::Point(N, 3), tol);
}
//******************************************************************************
TEST(Chebyshev2, PointInInterval) {
static const int N = 5;
auto points = Chebyshev2::Points(N, 0, 20);
Vector expected(N);
expected << 0., 1. - sqrt(2.) / 2., 1., 1. + sqrt(2.) / 2., 2.;
expected *= 10.0;
static const double tol = 1e-15; // changing this reveals errors
EXPECT_DOUBLES_EQUAL(expected(0), points(0), tol);
EXPECT_DOUBLES_EQUAL(expected(1), points(1), tol);
EXPECT_DOUBLES_EQUAL(expected(2), points(2), tol);
EXPECT_DOUBLES_EQUAL(expected(3), points(3), tol);
EXPECT_DOUBLES_EQUAL(expected(4), points(4), tol);
// all at once
Vector actual = Chebyshev2::Points(N, 0, 20);
CHECK(assert_equal(expected, actual));
}
//******************************************************************************
// InterpolatingPolynomial[{{-1, 4}, {0, 2}, {1, 6}}, 0.5]
TEST(Chebyshev2, Interpolate2) {
size_t N = 3;
Chebyshev2::EvaluationFunctor fx(N, 0.5);
Vector f(N);
f << 4, 2, 6;
EXPECT_DOUBLES_EQUAL(3.25, fx(f), 1e-9);
}
//******************************************************************************
// InterpolatingPolynomial[{{0, 4}, {1, 2}, {2, 6}}, 1.5]
TEST(Chebyshev2, Interpolate2_Interval) {
Chebyshev2::EvaluationFunctor fx(3, 1.5, 0, 2);
Vector3 f(4, 2, 6);
EXPECT_DOUBLES_EQUAL(3.25, fx(f), 1e-9);
}
//******************************************************************************
// InterpolatingPolynomial[{{-1, 4}, {-Sqrt[2]/2, 2}, {0, 6}, {Sqrt[2]/2,3}, {1,
// 3}}, 0.5]
TEST(Chebyshev2, Interpolate5) {
Chebyshev2::EvaluationFunctor fx(5, 0.5);
Vector f(5);
f << 4, 2, 6, 3, 3;
EXPECT_DOUBLES_EQUAL(4.34283, fx(f), 1e-5);
}
//******************************************************************************
// Interpolating vectors
TEST(Chebyshev2, InterpolateVector) {
double t = 30, a = 0, b = 100;
const size_t N = 3;
// Create 2x3 matrix with Vectors at Chebyshev points
ParameterMatrix<2> X(N);
X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp
// Check value
Vector expected(2);
expected << t, 0;
Eigen::Matrix<double, /*2x2N*/ -1, -1> actualH(2, 2 * N);
Chebyshev2::VectorEvaluationFunctor<2> fx(N, t, a, b);
EXPECT(assert_equal(expected, fx(X, actualH), 1e-9));
// Check derivative
boost::function<Vector2(ParameterMatrix<2>)> f = boost::bind(
&Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, _1, boost::none);
Matrix numericalH =
numericalDerivative11<Vector2, ParameterMatrix<2>, 2 * N>(f, X);
EXPECT(assert_equal(numericalH, actualH, 1e-9));
}
//******************************************************************************
TEST(Chebyshev2, Decomposition) {
// Create example sequence
Sequence sequence;
for (size_t i = 0; i < 16; i++) {
double x = (double)i / 16. - 0.99, y = x;
sequence[x] = y;
}
// Do Chebyshev Decomposition
FitBasis<Chebyshev2> actual(sequence, model, 3);
// Check
Vector expected(3);
expected << -1, 0, 1;
EXPECT(assert_equal(expected, actual.parameters(), 1e-4));
}
//******************************************************************************
TEST(Chebyshev2, DifferentiationMatrix3) {
// Trefethen00book, p.55
const size_t N = 3;
Matrix expected(N, N);
// Differentiation matrix computed from Chebfun
expected << 1.5000, -2.0000, 0.5000, //
0.5000, -0.0000, -0.5000, //
-0.5000, 2.0000, -1.5000;
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
// This was verified with chebfun
expected = -expected;
Matrix actual = Chebyshev2::DifferentiationMatrix(N);
EXPECT(assert_equal(expected, actual, 1e-4));
}
//******************************************************************************
TEST(Chebyshev2, DerivativeMatrix6) {
// Trefethen00book, p.55
const size_t N = 6;
Matrix expected(N, N);
expected << 8.5000, -10.4721, 2.8944, -1.5279, 1.1056, -0.5000, //
2.6180, -1.1708, -2.0000, 0.8944, -0.6180, 0.2764, //
-0.7236, 2.0000, -0.1708, -1.6180, 0.8944, -0.3820, //
0.3820, -0.8944, 1.6180, 0.1708, -2.0000, 0.7236, //
-0.2764, 0.6180, -0.8944, 2.0000, 1.1708, -2.6180, //
0.5000, -1.1056, 1.5279, -2.8944, 10.4721, -8.5000;
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
// This was verified with chebfun
expected = -expected;
Matrix actual = Chebyshev2::DifferentiationMatrix(N);
EXPECT(assert_equal((Matrix)expected, actual, 1e-4));
}
// test function for CalculateWeights and DerivativeWeights
double f(double x) {
// return 3*(x**3) - 2*(x**2) + 5*x - 11
return 3.0 * pow(x, 3) - 2.0 * pow(x, 2) + 5.0 * x - 11;
}
// its derivative
double fprime(double x) {
// return 9*(x**2) - 4*(x) + 5
return 9.0 * pow(x, 2) - 4.0 * x + 5.0;
}
//******************************************************************************
TEST(Chebyshev2, CalculateWeights) {
Eigen::Matrix<double, -1, 1> fvals(N);
for (size_t i = 0; i < N; i++) {
fvals(i) = f(Chebyshev2::Point(N, i));
}
double x1 = 0.7, x2 = -0.376;
Weights weights1 = Chebyshev2::CalculateWeights(N, x1);
Weights weights2 = Chebyshev2::CalculateWeights(N, x2);
EXPECT_DOUBLES_EQUAL(f(x1), weights1 * fvals, 1e-8);
EXPECT_DOUBLES_EQUAL(f(x2), weights2 * fvals, 1e-8);
}
TEST(Chebyshev2, CalculateWeights2) {
double a = 0, b = 10, x1 = 7, x2 = 4.12;
Eigen::Matrix<double, -1, 1> fvals(N);
for (size_t i = 0; i < N; i++) {
fvals(i) = f(Chebyshev2::Point(N, i, a, b));
}
Weights weights1 = Chebyshev2::CalculateWeights(N, x1, a, b);
EXPECT_DOUBLES_EQUAL(f(x1), weights1 * fvals, 1e-8);
Weights weights2 = Chebyshev2::CalculateWeights(N, x2, a, b);
double expected2 = f(x2); // 185.454784
double actual2 = weights2 * fvals;
EXPECT_DOUBLES_EQUAL(expected2, actual2, 1e-8);
}
TEST(Chebyshev2, DerivativeWeights) {
Eigen::Matrix<double, -1, 1> fvals(N);
for (size_t i = 0; i < N; i++) {
fvals(i) = f(Chebyshev2::Point(N, i));
}
double x1 = 0.7, x2 = -0.376, x3 = 0.0;
Weights dWeights1 = Chebyshev2::DerivativeWeights(N, x1);
EXPECT_DOUBLES_EQUAL(fprime(x1), dWeights1 * fvals, 1e-9);
Weights dWeights2 = Chebyshev2::DerivativeWeights(N, x2);
EXPECT_DOUBLES_EQUAL(fprime(x2), dWeights2 * fvals, 1e-9);
Weights dWeights3 = Chebyshev2::DerivativeWeights(N, x3);
EXPECT_DOUBLES_EQUAL(fprime(x3), dWeights3 * fvals, 1e-9);
// test if derivative calculation and cheb point is correct
double x4 = Chebyshev2::Point(N, 3);
Weights dWeights4 = Chebyshev2::DerivativeWeights(N, x4);
EXPECT_DOUBLES_EQUAL(fprime(x4), dWeights4 * fvals, 1e-9);
}
TEST(Chebyshev2, DerivativeWeights2) {
double x1 = 5, x2 = 4.12, a = 0, b = 10;
Eigen::Matrix<double, -1, 1> fvals(N);
for (size_t i = 0; i < N; i++) {
fvals(i) = f(Chebyshev2::Point(N, i, a, b));
}
Weights dWeights1 = Chebyshev2::DerivativeWeights(N, x1, a, b);
EXPECT_DOUBLES_EQUAL(fprime(x1), dWeights1 * fvals, 1e-8);
Weights dWeights2 = Chebyshev2::DerivativeWeights(N, x2, a, b);
EXPECT_DOUBLES_EQUAL(fprime(x2), dWeights2 * fvals, 1e-8);
// test if derivative calculation and cheb point is correct
double x3 = Chebyshev2::Point(N, 3, a, b);
Weights dWeights3 = Chebyshev2::DerivativeWeights(N, x3, a, b);
EXPECT_DOUBLES_EQUAL(fprime(x3), dWeights3 * fvals, 1e-8);
}
//******************************************************************************
// Check two different ways to calculate the derivative weights
TEST(Chebyshev2, DerivativeWeightsDifferentiationMatrix) {
const size_t N6 = 6;
double x1 = 0.311;
Matrix D6 = Chebyshev2::DifferentiationMatrix(N6);
Weights expected = Chebyshev2::CalculateWeights(N6, x1) * D6;
Weights actual = Chebyshev2::DerivativeWeights(N6, x1);
EXPECT(assert_equal(expected, actual, 1e-12));
double a = -3, b = 8, x2 = 5.05;
Matrix D6_2 = Chebyshev2::DifferentiationMatrix(N6, a, b);
Weights expected1 = Chebyshev2::CalculateWeights(N6, x2, a, b) * D6_2;
Weights actual1 = Chebyshev2::DerivativeWeights(N6, x2, a, b);
EXPECT(assert_equal(expected1, actual1, 1e-12));
}
//******************************************************************************
// Check two different ways to calculate the derivative weights
TEST(Chebyshev2, DerivativeWeights6) {
const size_t N6 = 6;
Matrix D6 = Chebyshev2::DifferentiationMatrix(N6);
Chebyshev2::Parameters x = Chebyshev2::Points(N6); // ramp with slope 1
EXPECT(assert_equal(Vector::Ones(N6), Vector(D6 * x)));
}
//******************************************************************************
// Check two different ways to calculate the derivative weights
TEST(Chebyshev2, DerivativeWeights7) {
const size_t N7 = 7;
Matrix D7 = Chebyshev2::DifferentiationMatrix(N7);
Chebyshev2::Parameters x = Chebyshev2::Points(N7); // ramp with slope 1
EXPECT(assert_equal(Vector::Ones(N7), Vector(D7 * x)));
}
//******************************************************************************
// Check derivative in two different ways: numerical and using D on f
Vector6 f3_at_6points = (Vector6() << 4, 2, 6, 2, 4, 3).finished();
double proxy3(double x) {
return Chebyshev2::EvaluationFunctor(6, x)(f3_at_6points);
}
TEST(Chebyshev2, Derivative6) {
// Check Derivative evaluation at point x=0.2
// calculate expected values by numerical derivative of synthesis
const double x = 0.2;
Matrix numeric_dTdx = numericalDerivative11<double, double>(proxy3, x);
// Calculate derivatives at Chebyshev points using D3, interpolate
Matrix D6 = Chebyshev2::DifferentiationMatrix(6);
Vector derivative_at_points = D6 * f3_at_6points;
Chebyshev2::EvaluationFunctor fx(6, x);
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), fx(derivative_at_points), 1e-8);
// Do directly
Chebyshev2::DerivativeFunctor dfdx(6, x);
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), dfdx(f3_at_6points), 1e-8);
}
//******************************************************************************
// Assert that derivative also works in non-standard interval [0,3]
double proxy4(double x) {
return Chebyshev2::EvaluationFunctor(6, x, 0, 3)(f3_at_6points);
}
TEST(Chebyshev2, Derivative6_03) {
// Check Derivative evaluation at point x=0.2, in interval [0,3]
// calculate expected values by numerical derivative of synthesis
const double x = 0.2;
Matrix numeric_dTdx = numericalDerivative11<double, double>(proxy4, x);
// Calculate derivatives at Chebyshev points using D3, interpolate
Matrix D6 = Chebyshev2::DifferentiationMatrix(6, 0, 3);
Vector derivative_at_points = D6 * f3_at_6points;
Chebyshev2::EvaluationFunctor fx(6, x, 0, 3);
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), fx(derivative_at_points), 1e-8);
// Do directly
Chebyshev2::DerivativeFunctor dfdx(6, x, 0, 3);
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), dfdx(f3_at_6points), 1e-8);
}
//******************************************************************************
// Test VectorDerivativeFunctor
TEST(Chebyshev2, VectorDerivativeFunctor) {
const size_t N = 3, M = 2;
const double x = 0.2;
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
VecD fx(N, x, 0, 3);
ParameterMatrix<M> X(N);
Matrix actualH(M, M * N);
EXPECT(assert_equal(Vector::Zero(M), (Vector)fx(X, actualH), 1e-8));
// Test Jacobian
Matrix expectedH = numericalDerivative11<Vector2, ParameterMatrix<M>, M * N>(
boost::bind(&VecD::operator(), fx, _1, boost::none), X);
EXPECT(assert_equal(expectedH, actualH, 1e-7));
}
//******************************************************************************
// Test VectorDerivativeFunctor with polynomial function
TEST(Chebyshev2, VectorDerivativeFunctor2) {
const size_t N = 64, M = 1, T = 15;
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
const Vector points = Chebyshev2::Points(N, 0, T);
// Assign the parameter matrix
Vector values(N);
for (size_t i = 0; i < N; ++i) {
values(i) = f(points(i));
}
ParameterMatrix<M> X(values);
// Evaluate the derivative at the chebyshev points using
// VectorDerivativeFunctor.
for (size_t i = 0; i < N; ++i) {
VecD d(N, points(i), 0, T);
Vector1 Dx = d(X);
EXPECT_DOUBLES_EQUAL(fprime(points(i)), Dx(0), 1e-6);
}
// Test Jacobian at the first chebyshev point.
Matrix actualH(M, M * N);
VecD vecd(N, points(0), 0, T);
vecd(X, actualH);
Matrix expectedH = numericalDerivative11<Vector1, ParameterMatrix<M>, M * N>(
boost::bind(&VecD::operator(), vecd, _1, boost::none), X);
EXPECT(assert_equal(expectedH, actualH, 1e-6));
}
//******************************************************************************
// Test ComponentDerivativeFunctor
TEST(Chebyshev2, ComponentDerivativeFunctor) {
const size_t N = 6, M = 2;
const double x = 0.2;
using CompFunc = Chebyshev2::ComponentDerivativeFunctor<M>;
size_t row = 1;
CompFunc fx(N, row, x, 0, 3);
ParameterMatrix<M> X(N);
Matrix actualH(1, M * N);
EXPECT_DOUBLES_EQUAL(0, fx(X, actualH), 1e-8);
Matrix expectedH = numericalDerivative11<double, ParameterMatrix<M>, M * N>(
boost::bind(&CompFunc::operator(), fx, _1, boost::none), X);
EXPECT(assert_equal(expectedH, actualH, 1e-7));
}
//******************************************************************************
TEST(Chebyshev2, IntegralWeights) {
const size_t N7 = 7;
Vector actual = Chebyshev2::IntegrationWeights(N7);
Vector expected = (Vector(N7) << 0.0285714285714286, 0.253968253968254,
0.457142857142857, 0.520634920634921, 0.457142857142857,
0.253968253968254, 0.0285714285714286)
.finished();
EXPECT(assert_equal(expected, actual));
const size_t N8 = 8;
Vector actual2 = Chebyshev2::IntegrationWeights(N8);
Vector expected2 = (Vector(N8) << 0.0204081632653061, 0.190141007218208,
0.352242423718159, 0.437208405798326, 0.437208405798326,
0.352242423718159, 0.190141007218208, 0.0204081632653061)
.finished();
EXPECT(assert_equal(expected2, actual2));
}
//******************************************************************************
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
//******************************************************************************

View File

@ -0,0 +1,254 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testFourier.cpp
* @date July 4, 2020
* @author Frank Dellaert, Varun Agrawal
* @brief Unit tests for Fourier Basis Decompositions with Expressions
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/basis/FitBasis.h>
#include <gtsam/basis/Fourier.h>
#include <gtsam/nonlinear/factorTesting.h>
using namespace std;
using namespace gtsam;
auto model = noiseModel::Unit::Create(1);
// Coefficients for testing, respectively 3 and 7 parameter Fourier basis.
// They correspond to best approximation of TestFunction(x)
const Vector k3Coefficients = (Vector3() << 1.5661, 1.2717, 1.2717).finished();
const Vector7 k7Coefficients =
(Vector7() << 1.5661, 1.2717, 1.2717, -0.0000, 0.5887, -0.0943, 0.0943)
.finished();
// The test-function used below
static double TestFunction(double x) { return exp(sin(x) + cos(x)); }
//******************************************************************************
TEST(Basis, BasisEvaluationFunctor) {
// fx(0) takes coefficients c to calculate the value f(c;x==0)
FourierBasis::EvaluationFunctor fx(3, 0);
EXPECT_DOUBLES_EQUAL(k3Coefficients[0] + k3Coefficients[1],
fx(k3Coefficients), 1e-9);
}
//******************************************************************************
TEST(Basis, BasisEvaluationFunctorDerivative) {
// If we give the H argument, we get the derivative of fx(0) wrpt coefficients
// Needs to be Matrix so it can be used by OptionalJacobian.
Matrix H(1, 3);
FourierBasis::EvaluationFunctor fx(3, 0);
EXPECT_DOUBLES_EQUAL(k3Coefficients[0] + k3Coefficients[1],
fx(k3Coefficients, H), 1e-9);
Matrix13 expectedH(1, 1, 0);
EXPECT(assert_equal(expectedH, H));
}
//******************************************************************************
TEST(Basis, Manual) {
const size_t N = 3;
// We will create a linear factor graph
GaussianFactorGraph graph;
// We create an unknown Vector expression for the coefficients
Key key(1);
// We will need values below to test the Jacobians
Values values;
values.insert<Vector>(key, Vector::Zero(N)); // value does not really matter
// At 16 different samples points x, check Predict_ expression
for (size_t i = 0; i < 16; i++) {
const double x = i * M_PI / 8;
const double desiredValue = TestFunction(x);
// Manual JacobianFactor
Matrix A(1, N);
A << 1, cos(x), sin(x);
Vector b(1);
b << desiredValue;
JacobianFactor linearFactor(key, A, b);
graph.add(linearFactor);
// Create factor to predict value at x
EvaluationFactor<FourierBasis> predictFactor(key, desiredValue, model, N,
x);
// Check expression Jacobians
EXPECT_CORRECT_FACTOR_JACOBIANS(predictFactor, values, 1e-5, 1e-9);
auto linearizedFactor = predictFactor.linearize(values);
auto linearizedJacobianFactor =
boost::dynamic_pointer_cast<JacobianFactor>(linearizedFactor);
CHECK(linearizedJacobianFactor); // makes sure it's indeed a JacobianFactor
EXPECT(assert_equal(linearFactor, *linearizedJacobianFactor, 1e-9));
}
// Solve linear graph
VectorValues actual = graph.optimize();
EXPECT(assert_equal((Vector)k3Coefficients, actual.at(key), 1e-4));
}
//******************************************************************************
TEST(Basis, EvaluationFactor) {
// Check fitting a function with a 7-parameter Fourier basis
// Create linear factor graph
NonlinearFactorGraph graph;
Key key(1);
for (size_t i = 0; i < 16; i++) {
double x = i * M_PI / 8, desiredValue = TestFunction(x);
graph.emplace_shared<EvaluationFactor<FourierBasis>>(key, desiredValue,
model, 7, x);
}
// Solve FourierFactorGraph
Values values;
values.insert<Vector>(key, Vector::Zero(7));
GaussianFactorGraph::shared_ptr lfg = graph.linearize(values);
VectorValues actual = lfg->optimize();
EXPECT(assert_equal((Vector)k7Coefficients, actual.at(key), 1e-4));
}
//******************************************************************************
TEST(Basis, WeightMatrix) {
// The WeightMatrix creates an m*n matrix, where m is the number of sample
// points, and n is the number of parameters.
Matrix expected(2, 3);
expected.row(0) << 1, cos(1), sin(1);
expected.row(1) << 1, cos(2), sin(2);
Vector2 X(1, 2);
Matrix actual = FourierBasis::WeightMatrix(3, X);
EXPECT(assert_equal(expected, actual, 1e-9));
}
//******************************************************************************
TEST(Basis, Decomposition) {
// Create example sequence
Sequence sequence;
for (size_t i = 0; i < 16; i++) {
double x = i * M_PI / 8, y = TestFunction(x);
sequence[x] = y;
}
// Do Fourier Decomposition
FitBasis<FourierBasis> actual(sequence, model, 3);
// Check
EXPECT(assert_equal((Vector)k3Coefficients, actual.parameters(), 1e-4));
}
//******************************************************************************
// Check derivative in two different ways: numerical and using D on f
double proxy(double x) {
return FourierBasis::EvaluationFunctor(7, x)(k7Coefficients);
}
TEST(Basis, Derivative7) {
// Check Derivative evaluation at point x=0.2
// Calculate expected values by numerical derivative of proxy.
const double x = 0.2;
Matrix numeric_dTdx = numericalDerivative11<double, double>(proxy, x);
// Calculate derivatives at Chebyshev points using D7, interpolate
Matrix D7 = FourierBasis::DifferentiationMatrix(7);
Vector derivativeCoefficients = D7 * k7Coefficients;
FourierBasis::EvaluationFunctor fx(7, x);
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), fx(derivativeCoefficients), 1e-8);
// Do directly
FourierBasis::DerivativeFunctor dfdx(7, x);
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), dfdx(k7Coefficients), 1e-8);
}
//******************************************************************************
TEST(Basis, VecDerivativeFunctor) {
using DotShape = typename FourierBasis::VectorDerivativeFunctor<2>;
const size_t N = 3;
// MATLAB example, Dec 27 2019, commit 014eded5
double h = 2 * M_PI / 16;
Vector2 dotShape(0.5556, -0.8315); // at h/2
DotShape dotShapeFunction(N, h / 2);
Matrix23 theta_mat = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071)
.finished()
.transpose();
ParameterMatrix<2> theta(theta_mat);
EXPECT(assert_equal(Vector(dotShape), dotShapeFunction(theta), 1e-4));
}
//******************************************************************************
// Suppose we want to parameterize a periodic function with function values at
// specific times, rather than coefficients. Can we do it? This would be a
// generalization of the Fourier transform, and constitute a "pseudo-spectral"
// parameterization. One way to do this is to establish hard constraints that
// express the relationship between the new parameters and the coefficients.
// For example, below I'd like the parameters to be the function values at
// X = {0.1,0.2,0.3}, rather than a 3-vector of coefficients.
// Because the values f(X) = at these points can be written as f(X) = W(X)*c,
// we can simply express the coefficents c as c=inv(W(X))*f, which is a
// generalized Fourier transform. That also means we can create factors with the
// unknown f-values, as done manually below.
TEST(Basis, PseudoSpectral) {
// We will create a linear factor graph
GaussianFactorGraph graph;
const size_t N = 3;
const Key key(1);
// The correct values at X = {0.1,0.2,0.3} are simply W*c
const Vector X = (Vector3() << 0.1, 0.2, 0.3).finished();
const Matrix W = FourierBasis::WeightMatrix(N, X);
const Vector expected = W * k3Coefficients;
// Check those values are indeed correct values of Fourier approximation
using Eval = FourierBasis::EvaluationFunctor;
EXPECT_DOUBLES_EQUAL(Eval(N, 0.1)(k3Coefficients), expected(0), 1e-9);
EXPECT_DOUBLES_EQUAL(Eval(N, 0.2)(k3Coefficients), expected(1), 1e-9);
EXPECT_DOUBLES_EQUAL(Eval(N, 0.3)(k3Coefficients), expected(2), 1e-9);
// Calculate "inverse Fourier transform" matrix
const Matrix invW = W.inverse();
// At 16 different samples points x, add a factor on fExpr
for (size_t i = 0; i < 16; i++) {
const double x = i * M_PI / 8;
const double desiredValue = TestFunction(x);
// Manual JacobianFactor
Matrix A(1, 3);
A << 1, cos(x), sin(x);
Vector b(1);
b << desiredValue;
JacobianFactor linearFactor(key, A * invW, b);
graph.add(linearFactor);
}
// Solve linear graph
VectorValues actual = graph.optimize();
EXPECT(assert_equal((Vector)expected, actual.at(key), 1e-4));
}
//******************************************************************************
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
//******************************************************************************

View File

@ -0,0 +1,145 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testParameterMatrix.cpp
* @date Sep 22, 2020
* @author Varun Agrawal, Frank Dellaert
* @brief Unit tests for ParameterMatrix.h
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h>
#include <gtsam/basis/BasisFactors.h>
#include <gtsam/basis/Chebyshev2.h>
#include <gtsam/basis/ParameterMatrix.h>
#include <gtsam/inference/Symbol.h>
using namespace std;
using namespace gtsam;
using Parameters = Chebyshev2::Parameters;
const size_t M = 2, N = 5;
//******************************************************************************
TEST(ParameterMatrix, Constructor) {
ParameterMatrix<2> actual1(3);
ParameterMatrix<2> expected1(Matrix::Zero(2, 3));
EXPECT(assert_equal(expected1, actual1));
ParameterMatrix<2> actual2(Matrix::Ones(2, 3));
ParameterMatrix<2> expected2(Matrix::Ones(2, 3));
EXPECT(assert_equal(expected2, actual2));
EXPECT(assert_equal(expected2.matrix(), actual2.matrix()));
}
//******************************************************************************
TEST(ParameterMatrix, Dimensions) {
ParameterMatrix<M> params(N);
EXPECT_LONGS_EQUAL(params.rows(), M);
EXPECT_LONGS_EQUAL(params.cols(), N);
EXPECT_LONGS_EQUAL(params.dim(), M * N);
}
//******************************************************************************
TEST(ParameterMatrix, Getters) {
ParameterMatrix<M> params(N);
Matrix expectedMatrix = Matrix::Zero(2, 5);
EXPECT(assert_equal(expectedMatrix, params.matrix()));
Matrix expectedMatrixTranspose = Matrix::Zero(5, 2);
EXPECT(assert_equal(expectedMatrixTranspose, params.transpose()));
ParameterMatrix<M> p2(Matrix::Ones(M, N));
Vector expectedRowVector = Vector::Ones(N);
for (size_t r = 0; r < M; ++r) {
EXPECT(assert_equal(p2.row(r), expectedRowVector));
}
ParameterMatrix<M> p3(2 * Matrix::Ones(M, N));
Vector expectedColVector = 2 * Vector::Ones(M);
for (size_t c = 0; c < M; ++c) {
EXPECT(assert_equal(p3.col(c), expectedColVector));
}
}
//******************************************************************************
TEST(ParameterMatrix, Setters) {
ParameterMatrix<M> params(Matrix::Zero(M, N));
Matrix expected = Matrix::Zero(M, N);
// row
params.row(0) = Vector::Ones(N);
expected.row(0) = Vector::Ones(N);
EXPECT(assert_equal(expected, params.matrix()));
// col
params.col(2) = Vector::Ones(M);
expected.col(2) = Vector::Ones(M);
EXPECT(assert_equal(expected, params.matrix()));
// setZero
params.setZero();
expected.setZero();
EXPECT(assert_equal(expected, params.matrix()));
}
//******************************************************************************
TEST(ParameterMatrix, Addition) {
ParameterMatrix<M> params(Matrix::Ones(M, N));
ParameterMatrix<M> expected(2 * Matrix::Ones(M, N));
// Add vector
EXPECT(assert_equal(expected, params + Vector::Ones(M * N)));
// Add another ParameterMatrix
ParameterMatrix<M> actual = params + ParameterMatrix<M>(Matrix::Ones(M, N));
EXPECT(assert_equal(expected, actual));
}
//******************************************************************************
TEST(ParameterMatrix, Subtraction) {
ParameterMatrix<M> params(2 * Matrix::Ones(M, N));
ParameterMatrix<M> expected(Matrix::Ones(M, N));
// Subtract vector
EXPECT(assert_equal(expected, params - Vector::Ones(M * N)));
// Subtract another ParameterMatrix
ParameterMatrix<M> actual = params - ParameterMatrix<M>(Matrix::Ones(M, N));
EXPECT(assert_equal(expected, actual));
}
//******************************************************************************
TEST(ParameterMatrix, Multiplication) {
ParameterMatrix<M> params(Matrix::Ones(M, N));
Matrix multiplier = 2 * Matrix::Ones(N, 2);
Matrix expected = 2 * N * Matrix::Ones(M, 2);
EXPECT(assert_equal(expected, params * multiplier));
}
//******************************************************************************
TEST(ParameterMatrix, VectorSpace) {
ParameterMatrix<M> params(Matrix::Ones(M, N));
// vector
EXPECT(assert_equal(Vector::Ones(M * N), params.vector()));
// identity
EXPECT(assert_equal(ParameterMatrix<M>::identity(),
ParameterMatrix<M>(Matrix::Zero(M, 0))));
}
//******************************************************************************
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
//******************************************************************************

View File

@ -77,3 +77,9 @@
// Support Metis-based nested dissection // Support Metis-based nested dissection
#cmakedefine GTSAM_TANGENT_PREINTEGRATION #cmakedefine GTSAM_TANGENT_PREINTEGRATION
// Whether to use the system installed Metis instead of the provided one
#cmakedefine GTSAM_USE_SYSTEM_METIS
// Toggle switch for BetweenFactor jacobian computation
#cmakedefine GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR

View File

@ -147,113 +147,123 @@ public:
* G = F' * F - F' * E * P * E' * F * G = F' * F - F' * E * P * E' * F
* g = F' * (b - E * P * E' * b) * g = F' * (b - E * P * E' * b)
* Fixed size version * Fixed size version
*/ */
template<int N, int ND> // N = 2 or 3 (point dimension), ND is the camera dimension template <int N,
static SymmetricBlockMatrix SchurComplement( int ND> // N = 2 or 3 (point dimension), ND is the camera dimension
const std::vector< Eigen::Matrix<double, ZDim, ND>, Eigen::aligned_allocator< Eigen::Matrix<double, ZDim, ND> > >& Fs, static SymmetricBlockMatrix SchurComplement(
const Matrix& E, const Eigen::Matrix<double, N, N>& P, const Vector& b) { const std::vector<
Eigen::Matrix<double, ZDim, ND>,
Eigen::aligned_allocator<Eigen::Matrix<double, ZDim, ND>>>& Fs,
const Matrix& E, const Eigen::Matrix<double, N, N>& P, const Vector& b) {
// a single point is observed in m cameras
size_t m = Fs.size();
// a single point is observed in m cameras // Create a SymmetricBlockMatrix (augmented hessian, with extra row/column with info vector)
size_t m = Fs.size(); size_t M1 = ND * m + 1;
std::vector<DenseIndex> dims(m + 1); // this also includes the b term
std::fill(dims.begin(), dims.end() - 1, ND);
dims.back() = 1;
SymmetricBlockMatrix augmentedHessian(dims, Matrix::Zero(M1, M1));
// Create a SymmetricBlockMatrix (augmented hessian, with extra row/column with info vector) // Blockwise Schur complement
size_t M1 = ND * m + 1; for (size_t i = 0; i < m; i++) { // for each camera
std::vector<DenseIndex> dims(m + 1); // this also includes the b term
std::fill(dims.begin(), dims.end() - 1, ND);
dims.back() = 1;
SymmetricBlockMatrix augmentedHessian(dims, Matrix::Zero(M1, M1));
// Blockwise Schur complement const Eigen::Matrix<double, ZDim, ND>& Fi = Fs[i];
for (size_t i = 0; i < m; i++) { // for each camera const auto FiT = Fi.transpose();
const Eigen::Matrix<double, ZDim, N> Ei_P = //
E.block(ZDim * i, 0, ZDim, N) * P;
const Eigen::Matrix<double, ZDim, ND>& Fi = Fs[i]; // D = (Dx2) * ZDim
const auto FiT = Fi.transpose(); augmentedHessian.setOffDiagonalBlock(i, m, FiT * b.segment<ZDim>(ZDim * i) // F' * b
const Eigen::Matrix<double, ZDim, N> Ei_P = // - FiT * (Ei_P * (E.transpose() * b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1)
E.block(ZDim * i, 0, ZDim, N) * P;
// D = (Dx2) * ZDim // (DxD) = (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) )
augmentedHessian.setOffDiagonalBlock(i, m, FiT * b.segment<ZDim>(ZDim * i) // F' * b augmentedHessian.setDiagonalBlock(i, FiT
- FiT * (Ei_P * (E.transpose() * b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1) * (Fi - Ei_P * E.block(ZDim * i, 0, ZDim, N).transpose() * Fi));
// (DxD) = (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) ) // upper triangular part of the hessian
augmentedHessian.setDiagonalBlock(i, FiT for (size_t j = i + 1; j < m; j++) { // for each camera
* (Fi - Ei_P * E.block(ZDim * i, 0, ZDim, N).transpose() * Fi)); const Eigen::Matrix<double, ZDim, ND>& Fj = Fs[j];
// upper triangular part of the hessian // (DxD) = (Dx2) * ( (2x2) * (2xD) )
for (size_t j = i + 1; j < m; j++) { // for each camera augmentedHessian.setOffDiagonalBlock(i, j, -FiT
const Eigen::Matrix<double, ZDim, ND>& Fj = Fs[j]; * (Ei_P * E.block(ZDim * j, 0, ZDim, N).transpose() * Fj));
}
} // end of for over cameras
// (DxD) = (Dx2) * ( (2x2) * (2xD) ) augmentedHessian.diagonalBlock(m)(0, 0) += b.squaredNorm();
augmentedHessian.setOffDiagonalBlock(i, j, -FiT return augmentedHessian;
* (Ei_P * E.block(ZDim * j, 0, ZDim, N).transpose() * Fj)); }
}
} // end of for over cameras
augmentedHessian.diagonalBlock(m)(0, 0) += b.squaredNorm();
return augmentedHessian;
}
/** /**
* Do Schur complement, given Jacobian as Fs,E,P, return SymmetricBlockMatrix * Do Schur complement, given Jacobian as Fs,E,P, return SymmetricBlockMatrix
* G = F' * F - F' * E * P * E' * F * G = F' * F - F' * E * P * E' * F
* g = F' * (b - E * P * E' * b) * g = F' * (b - E * P * E' * b)
* In this version, we allow for the case where the keys in the Jacobian are organized * In this version, we allow for the case where the keys in the Jacobian are
* differently from the keys in the output SymmetricBlockMatrix * organized differently from the keys in the output SymmetricBlockMatrix In
* In particular: each diagonal block of the Jacobian F captures 2 poses (useful for rolling shutter and extrinsic calibration) * particular: each diagonal block of the Jacobian F captures 2 poses (useful
* such that F keeps the block structure that makes the Schur complement trick fast. * for rolling shutter and extrinsic calibration) such that F keeps the block
* structure that makes the Schur complement trick fast.
*
* N = 2 or 3 (point dimension), ND is the Jacobian block dimension, NDD is
* the Hessian block dimension
*/ */
template<int N, int ND, int NDD> // N = 2 or 3 (point dimension), ND is the Jacobian block dimension, NDD is the Hessian block dimension template <int N, int ND, int NDD>
static SymmetricBlockMatrix SchurComplementAndRearrangeBlocks( static SymmetricBlockMatrix SchurComplementAndRearrangeBlocks(
const std::vector<Eigen::Matrix<double, ZDim, ND>, const std::vector<
Eigen::aligned_allocator<Eigen::Matrix<double, ZDim, ND> > >& Fs, Eigen::Matrix<double, ZDim, ND>,
Eigen::aligned_allocator<Eigen::Matrix<double, ZDim, ND>>>& Fs,
const Matrix& E, const Eigen::Matrix<double, N, N>& P, const Vector& b, const Matrix& E, const Eigen::Matrix<double, N, N>& P, const Vector& b,
const KeyVector jacobianKeys, const KeyVector hessianKeys) { const KeyVector& jacobianKeys, const KeyVector& hessianKeys) {
size_t nrNonuniqueKeys = jacobianKeys.size(); size_t nrNonuniqueKeys = jacobianKeys.size();
size_t nrUniqueKeys = hessianKeys.size(); size_t nrUniqueKeys = hessianKeys.size();
// marginalize point: note - we reuse the standard SchurComplement function // Marginalize point: note - we reuse the standard SchurComplement function.
SymmetricBlockMatrix augmentedHessian = SchurComplement<N, ND>(Fs,E,P,b); SymmetricBlockMatrix augmentedHessian = SchurComplement<N, ND>(Fs, E, P, b);
// now pack into an Hessian factor // Pack into an Hessian factor, allow space for b term.
std::vector<DenseIndex> dims(nrUniqueKeys + 1); // this also includes the b term std::vector<DenseIndex> dims(nrUniqueKeys + 1);
std::fill(dims.begin(), dims.end() - 1, NDD); std::fill(dims.begin(), dims.end() - 1, NDD);
dims.back() = 1; dims.back() = 1;
SymmetricBlockMatrix augmentedHessianUniqueKeys; SymmetricBlockMatrix augmentedHessianUniqueKeys;
// here we have to deal with the fact that some blocks may share the same keys // Deal with the fact that some blocks may share the same keys.
if (nrUniqueKeys == nrNonuniqueKeys) { // if there is 1 calibration key per camera if (nrUniqueKeys == nrNonuniqueKeys) {
// Case when there is 1 calibration key per camera:
augmentedHessianUniqueKeys = SymmetricBlockMatrix( augmentedHessianUniqueKeys = SymmetricBlockMatrix(
dims, Matrix(augmentedHessian.selfadjointView())); dims, Matrix(augmentedHessian.selfadjointView()));
} else { // if multiple cameras share a calibration we have to rearrange } else {
// the results of the Schur complement matrix // When multiple cameras share a calibration we have to rearrange
std::vector<DenseIndex> nonuniqueDims(nrNonuniqueKeys + 1); // this also includes the b term // the results of the Schur complement matrix.
std::vector<DenseIndex> nonuniqueDims(nrNonuniqueKeys + 1); // includes b
std::fill(nonuniqueDims.begin(), nonuniqueDims.end() - 1, NDD); std::fill(nonuniqueDims.begin(), nonuniqueDims.end() - 1, NDD);
nonuniqueDims.back() = 1; nonuniqueDims.back() = 1;
augmentedHessian = SymmetricBlockMatrix( augmentedHessian = SymmetricBlockMatrix(
nonuniqueDims, Matrix(augmentedHessian.selfadjointView())); nonuniqueDims, Matrix(augmentedHessian.selfadjointView()));
// get map from key to location in the new augmented Hessian matrix (the one including only unique keys) // Get map from key to location in the new augmented Hessian matrix (the
// one including only unique keys).
std::map<Key, size_t> keyToSlotMap; std::map<Key, size_t> keyToSlotMap;
for (size_t k = 0; k < nrUniqueKeys; k++) { for (size_t k = 0; k < nrUniqueKeys; k++) {
keyToSlotMap[hessianKeys[k]] = k; keyToSlotMap[hessianKeys[k]] = k;
} }
// initialize matrix to zero // Initialize matrix to zero.
augmentedHessianUniqueKeys = SymmetricBlockMatrix( augmentedHessianUniqueKeys = SymmetricBlockMatrix(
dims, Matrix::Zero(NDD * nrUniqueKeys + 1, NDD * nrUniqueKeys + 1)); dims, Matrix::Zero(NDD * nrUniqueKeys + 1, NDD * nrUniqueKeys + 1));
// add contributions for each key: note this loops over the hessian with nonUnique keys (augmentedHessian) // Add contributions for each key: note this loops over the hessian with
// and populates an Hessian that only includes the unique keys (that is what we want to return) // nonUnique keys (augmentedHessian) and populates an Hessian that only
// includes the unique keys (that is what we want to return).
for (size_t i = 0; i < nrNonuniqueKeys; i++) { // rows for (size_t i = 0; i < nrNonuniqueKeys; i++) { // rows
Key key_i = jacobianKeys.at(i); Key key_i = jacobianKeys.at(i);
// update information vector // Update information vector.
augmentedHessianUniqueKeys.updateOffDiagonalBlock( augmentedHessianUniqueKeys.updateOffDiagonalBlock(
keyToSlotMap[key_i], nrUniqueKeys, keyToSlotMap[key_i], nrUniqueKeys,
augmentedHessian.aboveDiagonalBlock(i, nrNonuniqueKeys)); augmentedHessian.aboveDiagonalBlock(i, nrNonuniqueKeys));
// update blocks // Update blocks.
for (size_t j = i; j < nrNonuniqueKeys; j++) { // cols for (size_t j = i; j < nrNonuniqueKeys; j++) { // cols
Key key_j = jacobianKeys.at(j); Key key_j = jacobianKeys.at(j);
if (i == j) { if (i == j) {
@ -267,45 +277,20 @@ public:
} else { } else {
augmentedHessianUniqueKeys.updateDiagonalBlock( augmentedHessianUniqueKeys.updateDiagonalBlock(
keyToSlotMap[key_i], keyToSlotMap[key_i],
augmentedHessian.aboveDiagonalBlock(i, j) augmentedHessian.aboveDiagonalBlock(i, j) +
+ augmentedHessian.aboveDiagonalBlock(i, j).transpose()); augmentedHessian.aboveDiagonalBlock(i, j).transpose());
} }
} }
} }
} }
// update bottom right element of the matrix
// Update bottom right element of the matrix.
augmentedHessianUniqueKeys.updateDiagonalBlock( augmentedHessianUniqueKeys.updateDiagonalBlock(
nrUniqueKeys, augmentedHessian.diagonalBlock(nrNonuniqueKeys)); nrUniqueKeys, augmentedHessian.diagonalBlock(nrNonuniqueKeys));
} }
return augmentedHessianUniqueKeys; return augmentedHessianUniqueKeys;
} }
/**
* non-templated version of function above
*/
static SymmetricBlockMatrix SchurComplementAndRearrangeBlocks_3_12_6(
const std::vector<Eigen::Matrix<double,ZDim, 12>,
Eigen::aligned_allocator<Eigen::Matrix<double,ZDim,12> > >& Fs,
const Matrix& E, const Eigen::Matrix<double,3,3>& P, const Vector& b,
const KeyVector jacobianKeys, const KeyVector hessianKeys) {
return SchurComplementAndRearrangeBlocks<3,12,6>(Fs, E, P, b,
jacobianKeys,
hessianKeys);
}
/**
* non-templated version of function above
*/
static SymmetricBlockMatrix SchurComplementAndRearrangeBlocks_3_6_6(
const std::vector<Eigen::Matrix<double,ZDim, 6>,
Eigen::aligned_allocator<Eigen::Matrix<double,ZDim,6> > >& Fs,
const Matrix& E, const Eigen::Matrix<double,3,3>& P, const Vector& b,
const KeyVector jacobianKeys, const KeyVector hessianKeys) {
return SchurComplementAndRearrangeBlocks<3,6,6>(Fs, E, P, b,
jacobianKeys,
hessianKeys);
}
/** /**
* Do Schur complement, given Jacobian as Fs,E,P, return SymmetricBlockMatrix * Do Schur complement, given Jacobian as Fs,E,P, return SymmetricBlockMatrix
* G = F' * F - F' * E * P * E' * F * G = F' * F - F' * E * P * E' * F

View File

@ -25,6 +25,12 @@ namespace gtsam {
/// As of GTSAM 4, in order to make GTSAM more lean, /// As of GTSAM 4, in order to make GTSAM more lean,
/// it is now possible to just typedef Point2 to Vector2 /// it is now possible to just typedef Point2 to Vector2
typedef Vector2 Point2; typedef Vector2 Point2;
// Convenience typedef
using Point2Pair = std::pair<Point2, Point2>;
GTSAM_EXPORT std::ostream &operator<<(std::ostream &os, const gtsam::Point2Pair &p);
using Point2Pairs = std::vector<Point2Pair>;
/// Distance of the point from the origin, with Jacobian /// Distance of the point from the origin, with Jacobian
GTSAM_EXPORT double norm2(const Point2& p, OptionalJacobian<1, 2> H = boost::none); GTSAM_EXPORT double norm2(const Point2& p, OptionalJacobian<1, 2> H = boost::none);
@ -34,10 +40,6 @@ GTSAM_EXPORT double distance2(const Point2& p1, const Point2& q,
OptionalJacobian<1, 2> H1 = boost::none, OptionalJacobian<1, 2> H1 = boost::none,
OptionalJacobian<1, 2> H2 = boost::none); OptionalJacobian<1, 2> H2 = boost::none);
// Convenience typedef
typedef std::pair<Point2, Point2> Point2Pair;
GTSAM_EXPORT std::ostream &operator<<(std::ostream &os, const gtsam::Point2Pair &p);
// For MATLAB wrapper // For MATLAB wrapper
typedef std::vector<Point2, Eigen::aligned_allocator<Point2> > Point2Vector; typedef std::vector<Point2, Eigen::aligned_allocator<Point2> > Point2Vector;

View File

@ -63,6 +63,46 @@ Matrix6 Pose3::AdjointMap() const {
return adj; return adj;
} }
/* ************************************************************************* */
// Calculate AdjointMap applied to xi_b, with Jacobians
Vector6 Pose3::Adjoint(const Vector6& xi_b, OptionalJacobian<6, 6> H_pose,
OptionalJacobian<6, 6> H_xib) const {
const Matrix6 Ad = AdjointMap();
// Jacobians
// D1 Ad_T(xi_b) = D1 Ad_T Ad_I(xi_b) = Ad_T * D1 Ad_I(xi_b) = Ad_T * ad_xi_b
// D2 Ad_T(xi_b) = Ad_T
// See docs/math.pdf for more details.
// In D1 calculation, we could be more efficient by writing it out, but do not
// for readability
if (H_pose) *H_pose = -Ad * adjointMap(xi_b);
if (H_xib) *H_xib = Ad;
return Ad * xi_b;
}
/* ************************************************************************* */
/// The dual version of Adjoint
Vector6 Pose3::AdjointTranspose(const Vector6& x, OptionalJacobian<6, 6> H_pose,
OptionalJacobian<6, 6> H_x) const {
const Matrix6 Ad = AdjointMap();
const Vector6 AdTx = Ad.transpose() * x;
// Jacobians
// See docs/math.pdf for more details.
if (H_pose) {
const auto w_T_hat = skewSymmetric(AdTx.head<3>()),
v_T_hat = skewSymmetric(AdTx.tail<3>());
*H_pose << w_T_hat, v_T_hat, //
/* */ v_T_hat, Z_3x3;
}
if (H_x) {
*H_x = Ad.transpose();
}
return AdTx;
}
/* ************************************************************************* */ /* ************************************************************************* */
Matrix6 Pose3::adjointMap(const Vector6& xi) { Matrix6 Pose3::adjointMap(const Vector6& xi) {
Matrix3 w_hat = skewSymmetric(xi(0), xi(1), xi(2)); Matrix3 w_hat = skewSymmetric(xi(0), xi(1), xi(2));

View File

@ -145,15 +145,22 @@ public:
* Calculate Adjoint map, transforming a twist in this pose's (i.e, body) frame to the world spatial frame * Calculate Adjoint map, transforming a twist in this pose's (i.e, body) frame to the world spatial frame
* Ad_pose is 6*6 matrix that when applied to twist xi \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$, returns Ad_pose(xi) * Ad_pose is 6*6 matrix that when applied to twist xi \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$, returns Ad_pose(xi)
*/ */
Matrix6 AdjointMap() const; /// FIXME Not tested - marked as incorrect Matrix6 AdjointMap() const;
/** /**
* Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a body-fixed velocity, transforming it to the spatial frame * Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a
* body-fixed velocity, transforming it to the spatial frame
* \f$ \xi^s = g*\xi^b*g^{-1} = Ad_g * \xi^b \f$ * \f$ \xi^s = g*\xi^b*g^{-1} = Ad_g * \xi^b \f$
* Note that H_xib = AdjointMap()
*/ */
Vector6 Adjoint(const Vector6& xi_b) const { Vector6 Adjoint(const Vector6& xi_b,
return AdjointMap() * xi_b; OptionalJacobian<6, 6> H_this = boost::none,
} /// FIXME Not tested - marked as incorrect OptionalJacobian<6, 6> H_xib = boost::none) const;
/// The dual version of Adjoint
Vector6 AdjointTranspose(const Vector6& x,
OptionalJacobian<6, 6> H_this = boost::none,
OptionalJacobian<6, 6> H_x = boost::none) const;
/** /**
* Compute the [ad(w,v)] operator as defined in [Kobilarov09siggraph], pg 11 * Compute the [ad(w,v)] operator as defined in [Kobilarov09siggraph], pg 11

View File

@ -261,25 +261,59 @@ Vector3 SO3::Logmap(const SO3& Q, ChartJacobian H) {
// when trace == -1, i.e., when theta = +-pi, +-3pi, +-5pi, etc. // when trace == -1, i.e., when theta = +-pi, +-3pi, +-5pi, etc.
// we do something special // we do something special
if (tr + 1.0 < 1e-10) { if (tr + 1.0 < 1e-3) {
if (std::abs(R33 + 1.0) > 1e-5) if (R33 > R22 && R33 > R11) {
omega = (M_PI / sqrt(2.0 + 2.0 * R33)) * Vector3(R13, R23, 1.0 + R33); // R33 is the largest diagonal, a=3, b=1, c=2
else if (std::abs(R22 + 1.0) > 1e-5) const double W = R21 - R12;
omega = (M_PI / sqrt(2.0 + 2.0 * R22)) * Vector3(R12, 1.0 + R22, R32); const double Q1 = 2.0 + 2.0 * R33;
else const double Q2 = R31 + R13;
// if(std::abs(R.r1_.x()+1.0) > 1e-5) This is implicit const double Q3 = R23 + R32;
omega = (M_PI / sqrt(2.0 + 2.0 * R11)) * Vector3(1.0 + R11, R21, R31); const double r = sqrt(Q1);
const double one_over_r = 1 / r;
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
const double sgn_w = W < 0 ? -1.0 : 1.0;
const double mag = M_PI - (2 * sgn_w * W) / norm;
const double scale = 0.5 * one_over_r * mag;
omega = sgn_w * scale * Vector3(Q2, Q3, Q1);
} else if (R22 > R11) {
// R22 is the largest diagonal, a=2, b=3, c=1
const double W = R13 - R31;
const double Q1 = 2.0 + 2.0 * R22;
const double Q2 = R23 + R32;
const double Q3 = R12 + R21;
const double r = sqrt(Q1);
const double one_over_r = 1 / r;
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
const double sgn_w = W < 0 ? -1.0 : 1.0;
const double mag = M_PI - (2 * sgn_w * W) / norm;
const double scale = 0.5 * one_over_r * mag;
omega = sgn_w * scale * Vector3(Q3, Q1, Q2);
} else {
// R11 is the largest diagonal, a=1, b=2, c=3
const double W = R32 - R23;
const double Q1 = 2.0 + 2.0 * R11;
const double Q2 = R12 + R21;
const double Q3 = R31 + R13;
const double r = sqrt(Q1);
const double one_over_r = 1 / r;
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
const double sgn_w = W < 0 ? -1.0 : 1.0;
const double mag = M_PI - (2 * sgn_w * W) / norm;
const double scale = 0.5 * one_over_r * mag;
omega = sgn_w * scale * Vector3(Q1, Q2, Q3);
}
} else { } else {
double magnitude; double magnitude;
const double tr_3 = tr - 3.0; // always negative const double tr_3 = tr - 3.0; // could be non-negative if the matrix is off orthogonal
if (tr_3 < -1e-7) { if (tr_3 < -1e-6) {
// this is the normal case -1 < trace < 3
double theta = acos((tr - 1.0) / 2.0); double theta = acos((tr - 1.0) / 2.0);
magnitude = theta / (2.0 * sin(theta)); magnitude = theta / (2.0 * sin(theta));
} else { } else {
// when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0) // when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0)
// use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2) // use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2)
// see https://github.com/borglab/gtsam/issues/746 for details // see https://github.com/borglab/gtsam/issues/746 for details
magnitude = 0.5 - tr_3 / 12.0; magnitude = 0.5 - tr_3 / 12.0 + tr_3*tr_3/60.0;
} }
omega = magnitude * Vector3(R32 - R23, R13 - R31, R21 - R12); omega = magnitude * Vector3(R32 - R23, R13 - R31, R21 - R12);
} }

View File

@ -31,6 +31,7 @@ namespace gtsam {
class GTSAM_EXPORT EmptyCal { class GTSAM_EXPORT EmptyCal {
public: public:
enum { dimension = 0 };
EmptyCal(){} EmptyCal(){}
virtual ~EmptyCal() = default; virtual ~EmptyCal() = default;
using shared_ptr = boost::shared_ptr<EmptyCal>; using shared_ptr = boost::shared_ptr<EmptyCal>;

View File

@ -31,6 +31,14 @@ class Point2 {
// enable pickling in python // enable pickling in python
void pickle() const; void pickle() const;
}; };
class Point2Pairs {
Point2Pairs();
size_t size() const;
bool empty() const;
gtsam::Point2Pair at(size_t n) const;
void push_back(const gtsam::Point2Pair& point_pair);
};
// std::vector<gtsam::Point2> // std::vector<gtsam::Point2>
class Point2Vector { class Point2Vector {
@ -429,6 +437,8 @@ class Pose2 {
// enable pickling in python // enable pickling in python
void pickle() const; void pickle() const;
}; };
boost::optional<gtsam::Pose2> align(const gtsam::Point2Pairs& pairs);
#include <gtsam/geometry/Pose3.h> #include <gtsam/geometry/Pose3.h>
class Pose3 { class Pose3 {

View File

@ -185,9 +185,8 @@ TEST(CameraSet, SchurComplementAndRearrangeBlocks) {
// Actual // Actual
SymmetricBlockMatrix augmentedHessianBM = SymmetricBlockMatrix augmentedHessianBM =
Set::SchurComplementAndRearrangeBlocks_3_12_6(Fs, E, P, b, Set::SchurComplementAndRearrangeBlocks<3, 12, 6>(
nonuniqueKeys, Fs, E, P, b, nonuniqueKeys, uniqueKeys);
uniqueKeys);
Matrix actualAugmentedHessian = augmentedHessianBM.selfadjointView(); Matrix actualAugmentedHessian = augmentedHessianBM.selfadjointView();
// Expected // Expected

View File

@ -145,6 +145,81 @@ TEST(Pose3, Adjoint_full)
EXPECT(assert_equal(expected3, Pose3::Expmap(xiprime3), 1e-6)); EXPECT(assert_equal(expected3, Pose3::Expmap(xiprime3), 1e-6));
} }
/* ************************************************************************* */
// Check Adjoint numerical derivatives
TEST(Pose3, Adjoint_jacobians)
{
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
// Check evaluation sanity check
EQUALITY(static_cast<gtsam::Vector>(T.AdjointMap() * xi), T.Adjoint(xi));
EQUALITY(static_cast<gtsam::Vector>(T2.AdjointMap() * xi), T2.Adjoint(xi));
EQUALITY(static_cast<gtsam::Vector>(T3.AdjointMap() * xi), T3.Adjoint(xi));
// Check jacobians
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
std::function<Vector6(const Pose3&, const Vector6&)> Adjoint_proxy =
[&](const Pose3& T, const Vector6& xi) { return T.Adjoint(xi); };
T.Adjoint(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(Adjoint_proxy, T, xi);
expectedH2 = numericalDerivative22(Adjoint_proxy, T, xi);
EXPECT(assert_equal(expectedH1, actualH1));
EXPECT(assert_equal(expectedH2, actualH2));
T2.Adjoint(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(Adjoint_proxy, T2, xi);
expectedH2 = numericalDerivative22(Adjoint_proxy, T2, xi);
EXPECT(assert_equal(expectedH1, actualH1));
EXPECT(assert_equal(expectedH2, actualH2));
T3.Adjoint(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(Adjoint_proxy, T3, xi);
expectedH2 = numericalDerivative22(Adjoint_proxy, T3, xi);
EXPECT(assert_equal(expectedH1, actualH1));
EXPECT(assert_equal(expectedH2, actualH2));
}
/* ************************************************************************* */
// Check AdjointTranspose and jacobians
TEST(Pose3, AdjointTranspose)
{
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
// Check evaluation
EQUALITY(static_cast<Vector>(T.AdjointMap().transpose() * xi),
T.AdjointTranspose(xi));
EQUALITY(static_cast<Vector>(T2.AdjointMap().transpose() * xi),
T2.AdjointTranspose(xi));
EQUALITY(static_cast<Vector>(T3.AdjointMap().transpose() * xi),
T3.AdjointTranspose(xi));
// Check jacobians
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
std::function<Vector6(const Pose3&, const Vector6&)> AdjointTranspose_proxy =
[&](const Pose3& T, const Vector6& xi) {
return T.AdjointTranspose(xi);
};
T.AdjointTranspose(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T, xi);
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T, xi);
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
EXPECT(assert_equal(expectedH2, actualH2));
T2.AdjointTranspose(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T2, xi);
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T2, xi);
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
EXPECT(assert_equal(expectedH2, actualH2));
T3.AdjointTranspose(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T3, xi);
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T3, xi);
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
EXPECT(assert_equal(expectedH2, actualH2));
}
/* ************************************************************************* */ /* ************************************************************************* */
// assert that T*wedge(xi)*T^-1 is equal to wedge(Ad_T(xi)) // assert that T*wedge(xi)*T^-1 is equal to wedge(Ad_T(xi))
TEST(Pose3, Adjoint_hat) TEST(Pose3, Adjoint_hat)

View File

@ -122,6 +122,21 @@ TEST( Rot3, AxisAngle)
CHECK(assert_equal(expected,actual3,1e-5)); CHECK(assert_equal(expected,actual3,1e-5));
} }
/* ************************************************************************* */
TEST( Rot3, AxisAngle2)
{
// constructor from a rotation matrix, as doubles in *row-major* order.
Rot3 R1(-0.999957, 0.00922903, 0.00203116, 0.00926964, 0.999739, 0.0208927, -0.0018374, 0.0209105, -0.999781);
Unit3 actualAxis;
double actualAngle;
// convert Rot3 to quaternion using GTSAM
std::tie(actualAxis, actualAngle) = R1.axisAngle();
double expectedAngle = 3.1396582;
CHECK(assert_equal(expectedAngle, actualAngle, 1e-5));
}
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Rot3, Rodrigues) TEST( Rot3, Rodrigues)
{ {
@ -181,13 +196,13 @@ TEST( Rot3, retract)
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(Rot3, log) { TEST( Rot3, log) {
static const double PI = boost::math::constants::pi<double>(); static const double PI = boost::math::constants::pi<double>();
Vector w; Vector w;
Rot3 R; Rot3 R;
#define CHECK_OMEGA(X, Y, Z) \ #define CHECK_OMEGA(X, Y, Z) \
w = (Vector(3) << X, Y, Z).finished(); \ w = (Vector(3) << (X), (Y), (Z)).finished(); \
R = Rot3::Rodrigues(w); \ R = Rot3::Rodrigues(w); \
EXPECT(assert_equal(w, Rot3::Logmap(R), 1e-12)); EXPECT(assert_equal(w, Rot3::Logmap(R), 1e-12));
@ -219,17 +234,17 @@ TEST(Rot3, log) {
CHECK_OMEGA(0, 0, PI) CHECK_OMEGA(0, 0, PI)
// Windows and Linux have flipped sign in quaternion mode // Windows and Linux have flipped sign in quaternion mode
#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS) //#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS)
w = (Vector(3) << x * PI, y * PI, z * PI).finished(); w = (Vector(3) << x * PI, y * PI, z * PI).finished();
R = Rot3::Rodrigues(w); R = Rot3::Rodrigues(w);
EXPECT(assert_equal(Vector(-w), Rot3::Logmap(R), 1e-12)); EXPECT(assert_equal(Vector(-w), Rot3::Logmap(R), 1e-12));
#else //#else
CHECK_OMEGA(x * PI, y * PI, z * PI) // CHECK_OMEGA(x * PI, y * PI, z * PI)
#endif //#endif
// Check 360 degree rotations // Check 360 degree rotations
#define CHECK_OMEGA_ZERO(X, Y, Z) \ #define CHECK_OMEGA_ZERO(X, Y, Z) \
w = (Vector(3) << X, Y, Z).finished(); \ w = (Vector(3) << (X), (Y), (Z)).finished(); \
R = Rot3::Rodrigues(w); \ R = Rot3::Rodrigues(w); \
EXPECT(assert_equal((Vector)Z_3x1, Rot3::Logmap(R))); EXPECT(assert_equal((Vector)Z_3x1, Rot3::Logmap(R)));
@ -247,15 +262,15 @@ TEST(Rot3, log) {
// Rot3's Logmap returns different, but equivalent compacted // Rot3's Logmap returns different, but equivalent compacted
// axis-angle vectors depending on whether Rot3 is implemented // axis-angle vectors depending on whether Rot3 is implemented
// by Quaternions or SO3. // by Quaternions or SO3.
#if defined(GTSAM_USE_QUATERNIONS) #if defined(GTSAM_USE_QUATERNIONS)
// Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees // Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees
EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211), EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211),
(Vector)Rot3::Logmap(Rlund), 1e-8));
#else
// SO3 will be approximate because of the non-orthogonality
EXPECT(assert_equal(Vector3(0.264452, -0.742197708, -3.04098184),
(Vector)Rot3::Logmap(Rlund), 1e-8)); (Vector)Rot3::Logmap(Rlund), 1e-8));
#else #endif
// SO3 does not bound angle resulting in ~180.1 degrees
EXPECT(assert_equal(Vector3(-0.264544406, 0.742217405, 3.04117314),
(Vector)Rot3::Logmap(Rlund), 1e-8));
#endif
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -70,16 +70,23 @@ namespace gtsam {
/// @name Standard Constructors /// @name Standard Constructors
/// @{ /// @{
/** Default constructor */ /// Default constructor
BayesTreeCliqueBase() : problemSize_(1) {} BayesTreeCliqueBase() : problemSize_(1) {}
/** Construct from a conditional, leaving parent and child pointers uninitialized */ /// Construct from a conditional, leaving parent and child pointers
BayesTreeCliqueBase(const sharedConditional& conditional) : conditional_(conditional), problemSize_(1) {} /// uninitialized.
BayesTreeCliqueBase(const sharedConditional& conditional)
: conditional_(conditional), problemSize_(1) {}
/** Shallow copy constructor */ /// Shallow copy constructor.
BayesTreeCliqueBase(const BayesTreeCliqueBase& c) : conditional_(c.conditional_), parent_(c.parent_), children(c.children), problemSize_(c.problemSize_), is_root(c.is_root) {} BayesTreeCliqueBase(const BayesTreeCliqueBase& c)
: conditional_(c.conditional_),
parent_(c.parent_),
children(c.children),
problemSize_(c.problemSize_),
is_root(c.is_root) {}
/** Shallow copy assignment constructor */ /// Shallow copy assignment constructor
BayesTreeCliqueBase& operator=(const BayesTreeCliqueBase& c) { BayesTreeCliqueBase& operator=(const BayesTreeCliqueBase& c) {
conditional_ = c.conditional_; conditional_ = c.conditional_;
parent_ = c.parent_; parent_ = c.parent_;
@ -89,6 +96,9 @@ namespace gtsam {
return *this; return *this;
} }
// Virtual destructor.
virtual ~BayesTreeCliqueBase() {}
/// @} /// @}
/// This stores the Cached separator marginal P(S) /// This stores the Cached separator marginal P(S)
@ -119,7 +129,9 @@ namespace gtsam {
bool equals(const DERIVED& other, double tol = 1e-9) const; bool equals(const DERIVED& other, double tol = 1e-9) const;
/** print this node */ /** print this node */
virtual void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; virtual void print(
const std::string& s = "",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
/// @} /// @}
/// @name Standard Interface /// @name Standard Interface

View File

@ -32,7 +32,7 @@
namespace gtsam { namespace gtsam {
/// Typedef for a function to format a key, i.e. to convert it to a string /// Typedef for a function to format a key, i.e. to convert it to a string
typedef std::function<std::string(Key)> KeyFormatter; using KeyFormatter = std::function<std::string(Key)>;
// Helper function for DefaultKeyFormatter // Helper function for DefaultKeyFormatter
GTSAM_EXPORT std::string _defaultKeyFormatter(Key key); GTSAM_EXPORT std::string _defaultKeyFormatter(Key key);
@ -83,28 +83,32 @@ class key_formatter {
}; };
/// Define collection type once and for all - also used in wrappers /// Define collection type once and for all - also used in wrappers
typedef FastVector<Key> KeyVector; using KeyVector = FastVector<Key>;
// TODO(frank): Nothing fast about these :-( // TODO(frank): Nothing fast about these :-(
typedef FastList<Key> KeyList; using KeyList = FastList<Key>;
typedef FastSet<Key> KeySet; using KeySet = FastSet<Key>;
typedef FastMap<Key, int> KeyGroupMap; using KeyGroupMap = FastMap<Key, int>;
/// Utility function to print one key with optional prefix /// Utility function to print one key with optional prefix
GTSAM_EXPORT void PrintKey(Key key, const std::string& s = "", GTSAM_EXPORT void PrintKey(
const KeyFormatter& keyFormatter = DefaultKeyFormatter); Key key, const std::string &s = "",
const KeyFormatter &keyFormatter = DefaultKeyFormatter);
/// Utility function to print sets of keys with optional prefix /// Utility function to print sets of keys with optional prefix
GTSAM_EXPORT void PrintKeyList(const KeyList& keys, const std::string& s = "", GTSAM_EXPORT void PrintKeyList(
const KeyFormatter& keyFormatter = DefaultKeyFormatter); const KeyList &keys, const std::string &s = "",
const KeyFormatter &keyFormatter = DefaultKeyFormatter);
/// Utility function to print sets of keys with optional prefix /// Utility function to print sets of keys with optional prefix
GTSAM_EXPORT void PrintKeyVector(const KeyVector& keys, const std::string& s = GTSAM_EXPORT void PrintKeyVector(
"", const KeyFormatter& keyFormatter = DefaultKeyFormatter); const KeyVector &keys, const std::string &s = "",
const KeyFormatter &keyFormatter = DefaultKeyFormatter);
/// Utility function to print sets of keys with optional prefix /// Utility function to print sets of keys with optional prefix
GTSAM_EXPORT void PrintKeySet(const KeySet& keys, const std::string& s = "", GTSAM_EXPORT void PrintKeySet(
const KeyFormatter& keyFormatter = DefaultKeyFormatter); const KeySet &keys, const std::string &s = "",
const KeyFormatter &keyFormatter = DefaultKeyFormatter);
// Define Key to be Testable by specializing gtsam::traits // Define Key to be Testable by specializing gtsam::traits
template<typename T> struct traits; template<typename T> struct traits;

View File

@ -25,8 +25,12 @@
#include <gtsam/3rdparty/CCOLAMD/Include/ccolamd.h> #include <gtsam/3rdparty/CCOLAMD/Include/ccolamd.h>
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION #ifdef GTSAM_SUPPORT_NESTED_DISSECTION
#ifdef GTSAM_USE_SYSTEM_METIS
#include <metis.h>
#else
#include <gtsam/3rdparty/metis/include/metis.h> #include <gtsam/3rdparty/metis/include/metis.h>
#endif #endif
#endif
using namespace std; using namespace std;

View File

@ -510,4 +510,33 @@ namespace gtsam {
return optimize(function); return optimize(function);
} }
/* ************************************************************************* */
void GaussianFactorGraph::printErrors(
const VectorValues& values, const std::string& str,
const KeyFormatter& keyFormatter,
const std::function<bool(const Factor* /*factor*/,
double /*whitenedError*/, size_t /*index*/)>&
printCondition) const {
cout << str << "size: " << size() << endl << endl;
for (size_t i = 0; i < (*this).size(); i++) {
const sharedFactor& factor = (*this)[i];
const double errorValue =
(factor != nullptr ? (*this)[i]->error(values) : .0);
if (!printCondition(factor.get(), errorValue, i))
continue; // User-provided filter did not pass
stringstream ss;
ss << "Factor " << i << ": ";
if (factor == nullptr) {
cout << "nullptr"
<< "\n";
} else {
factor->print(ss.str(), keyFormatter);
cout << "error = " << errorValue << "\n";
}
cout << endl; // only one "endl" at end might be faster, \n for each
// factor
}
}
} // namespace gtsam } // namespace gtsam

View File

@ -375,6 +375,14 @@ namespace gtsam {
/** In-place version e <- A*x that takes an iterator. */ /** In-place version e <- A*x that takes an iterator. */
void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const; void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const;
void printErrors(
const VectorValues& x,
const std::string& str = "GaussianFactorGraph: ",
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
const std::function<bool(const Factor* /*factor*/,
double /*whitenedError*/, size_t /*index*/)>&
printCondition =
[](const Factor*, double, size_t) { return true; }) const;
/// @} /// @}
private: private:

View File

@ -16,9 +16,9 @@ virtual class Base {
}; };
virtual class Gaussian : gtsam::noiseModel::Base { virtual class Gaussian : gtsam::noiseModel::Base {
static gtsam::noiseModel::Gaussian* Information(Matrix R); static gtsam::noiseModel::Gaussian* Information(Matrix R, bool smart = true);
static gtsam::noiseModel::Gaussian* SqrtInformation(Matrix R); static gtsam::noiseModel::Gaussian* SqrtInformation(Matrix R, bool smart = true);
static gtsam::noiseModel::Gaussian* Covariance(Matrix R); static gtsam::noiseModel::Gaussian* Covariance(Matrix R, bool smart = true);
bool equals(gtsam::noiseModel::Base& expected, double tol); bool equals(gtsam::noiseModel::Base& expected, double tol);
@ -37,9 +37,9 @@ virtual class Gaussian : gtsam::noiseModel::Base {
}; };
virtual class Diagonal : gtsam::noiseModel::Gaussian { virtual class Diagonal : gtsam::noiseModel::Gaussian {
static gtsam::noiseModel::Diagonal* Sigmas(Vector sigmas); static gtsam::noiseModel::Diagonal* Sigmas(Vector sigmas, bool smart = true);
static gtsam::noiseModel::Diagonal* Variances(Vector variances); static gtsam::noiseModel::Diagonal* Variances(Vector variances, bool smart = true);
static gtsam::noiseModel::Diagonal* Precisions(Vector precisions); static gtsam::noiseModel::Diagonal* Precisions(Vector precisions, bool smart = true);
Matrix R() const; Matrix R() const;
// access to noise model // access to noise model
@ -69,9 +69,9 @@ virtual class Constrained : gtsam::noiseModel::Diagonal {
}; };
virtual class Isotropic : gtsam::noiseModel::Diagonal { virtual class Isotropic : gtsam::noiseModel::Diagonal {
static gtsam::noiseModel::Isotropic* Sigma(size_t dim, double sigma); static gtsam::noiseModel::Isotropic* Sigma(size_t dim, double sigma, bool smart = true);
static gtsam::noiseModel::Isotropic* Variance(size_t dim, double varianace); static gtsam::noiseModel::Isotropic* Variance(size_t dim, double varianace, bool smart = true);
static gtsam::noiseModel::Isotropic* Precision(size_t dim, double precision); static gtsam::noiseModel::Isotropic* Precision(size_t dim, double precision, bool smart = true);
// access to noise model // access to noise model
double sigma() const; double sigma() const;
@ -221,6 +221,7 @@ class VectorValues {
//Constructors //Constructors
VectorValues(); VectorValues();
VectorValues(const gtsam::VectorValues& other); VectorValues(const gtsam::VectorValues& other);
VectorValues(const gtsam::VectorValues& first, const gtsam::VectorValues& second);
//Named Constructors //Named Constructors
static gtsam::VectorValues Zero(const gtsam::VectorValues& model); static gtsam::VectorValues Zero(const gtsam::VectorValues& model);
@ -289,6 +290,13 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
JacobianFactor(size_t i1, Matrix A1, size_t i2, Matrix A2, size_t i3, Matrix A3, JacobianFactor(size_t i1, Matrix A1, size_t i2, Matrix A2, size_t i3, Matrix A3,
Vector b, const gtsam::noiseModel::Diagonal* model); Vector b, const gtsam::noiseModel::Diagonal* model);
JacobianFactor(const gtsam::GaussianFactorGraph& graph); JacobianFactor(const gtsam::GaussianFactorGraph& graph);
JacobianFactor(const gtsam::GaussianFactorGraph& graph,
const gtsam::VariableSlots& p_variableSlots);
JacobianFactor(const gtsam::GaussianFactorGraph& graph,
const gtsam::Ordering& ordering);
JacobianFactor(const gtsam::GaussianFactorGraph& graph,
const gtsam::Ordering& ordering,
const gtsam::VariableSlots& p_variableSlots);
//Testable //Testable
void print(string s = "", const gtsam::KeyFormatter& keyFormatter = void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
@ -393,6 +401,7 @@ class GaussianFactorGraph {
// error and probability // error and probability
double error(const gtsam::VectorValues& c) const; double error(const gtsam::VectorValues& c) const;
double probPrime(const gtsam::VectorValues& c) const; double probPrime(const gtsam::VectorValues& c) const;
void printErrors(const gtsam::VectorValues& c, string str = "GaussianFactorGraph: ", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const;
gtsam::GaussianFactorGraph clone() const; gtsam::GaussianFactorGraph clone() const;
gtsam::GaussianFactorGraph negate() const; gtsam::GaussianFactorGraph negate() const;

View File

@ -88,6 +88,8 @@ virtual class PreintegratedRotationParams {
virtual class PreintegrationParams : gtsam::PreintegratedRotationParams { virtual class PreintegrationParams : gtsam::PreintegratedRotationParams {
PreintegrationParams(Vector n_gravity); PreintegrationParams(Vector n_gravity);
gtsam::Vector n_gravity;
static gtsam::PreintegrationParams* MakeSharedD(double g); static gtsam::PreintegrationParams* MakeSharedD(double g);
static gtsam::PreintegrationParams* MakeSharedU(double g); static gtsam::PreintegrationParams* MakeSharedU(double g);
static gtsam::PreintegrationParams* MakeSharedD(); // default g = 9.81 static gtsam::PreintegrationParams* MakeSharedD(); // default g = 9.81

View File

@ -819,7 +819,6 @@ struct ImuFactorMergeTest {
loop_(Vector3(0, -kAngularVelocity, 0), Vector3(kVelocity, 0, 0)) { loop_(Vector3(0, -kAngularVelocity, 0), Vector3(kVelocity, 0, 0)) {
// arbitrary noise values // arbitrary noise values
p_->gyroscopeCovariance = I_3x3 * 0.01; p_->gyroscopeCovariance = I_3x3 * 0.01;
p_->accelerometerCovariance = I_3x3 * 0.02;
p_->accelerometerCovariance = I_3x3 * 0.03; p_->accelerometerCovariance = I_3x3 * 0.03;
} }

View File

@ -246,6 +246,18 @@ struct apply_compose {
return x.compose(y, H1, H2); return x.compose(y, H1, H2);
} }
}; };
template <>
struct apply_compose<double> {
double operator()(const double& x, const double& y,
OptionalJacobian<1, 1> H1 = boost::none,
OptionalJacobian<1, 1> H2 = boost::none) const {
if (H1) H1->setConstant(y);
if (H2) H2->setConstant(x);
return x * y;
}
};
} }
// Global methods: // Global methods:

View File

@ -110,7 +110,7 @@ class GTSAM_EXPORT FunctorizedFactor : public NoiseModelFactor1<T> {
bool equals(const NonlinearFactor &other, double tol = 1e-9) const override { bool equals(const NonlinearFactor &other, double tol = 1e-9) const override {
const FunctorizedFactor<R, T> *e = const FunctorizedFactor<R, T> *e =
dynamic_cast<const FunctorizedFactor<R, T> *>(&other); dynamic_cast<const FunctorizedFactor<R, T> *>(&other);
return e && Base::equals(other, tol) && return e != nullptr && Base::equals(other, tol) &&
traits<R>::Equals(this->measured_, e->measured_, tol); traits<R>::Equals(this->measured_, e->measured_, tol);
} }
/// @} /// @}

View File

@ -75,12 +75,15 @@ size_t Z(size_t j);
} // namespace symbol_shorthand } // namespace symbol_shorthand
// Default keyformatter // Default keyformatter
void PrintKeyList(const gtsam::KeyList& keys); void PrintKeyList(
void PrintKeyList(const gtsam::KeyList& keys, string s); const gtsam::KeyList& keys, const string& s = "",
void PrintKeyVector(const gtsam::KeyVector& keys); const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter);
void PrintKeyVector(const gtsam::KeyVector& keys, string s); void PrintKeyVector(
void PrintKeySet(const gtsam::KeySet& keys); const gtsam::KeyVector& keys, const string& s = "",
void PrintKeySet(const gtsam::KeySet& keys, string s); const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter);
void PrintKeySet(
const gtsam::KeySet& keys, const string& s = "",
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter);
#include <gtsam/inference/LabeledSymbol.h> #include <gtsam/inference/LabeledSymbol.h>
class LabeledSymbol { class LabeledSymbol {
@ -527,7 +530,14 @@ template<PARAMS>
virtual class GncParams { virtual class GncParams {
GncParams(const PARAMS& baseOptimizerParams); GncParams(const PARAMS& baseOptimizerParams);
GncParams(); GncParams();
void setVerbosityGNC(const This::Verbosity value);
void print(const string& str) const; void print(const string& str) const;
enum Verbosity {
SILENT,
SUMMARY,
VALUES
};
}; };
typedef gtsam::GncParams<gtsam::GaussNewtonParams> GncGaussNewtonParams; typedef gtsam::GncParams<gtsam::GaussNewtonParams> GncGaussNewtonParams;

View File

@ -293,6 +293,19 @@ TEST(Expression, compose3) {
EXPECT(expected == R3.keys()); EXPECT(expected == R3.keys());
} }
/* ************************************************************************* */
// Test compose with double type (should be multiplication).
TEST(Expression, compose4) {
// Create expression
gtsam::Key key = 1;
Double_ R1(key), R2(key);
Double_ R3 = R1 * R2;
// Check keys
set<Key> expected = list_of(1);
EXPECT(expected == R3.keys());
}
/* ************************************************************************* */ /* ************************************************************************* */
// Test with ternary function. // Test with ternary function.
Rot3 composeThree(const Rot3& R1, const Rot3& R2, const Rot3& R3, OptionalJacobian<3, 3> H1, Rot3 composeThree(const Rot3& R1, const Rot3& R2, const Rot3& R3, OptionalJacobian<3, 3> H1,

View File

@ -20,8 +20,12 @@
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/base/TestableAssertions.h> #include <gtsam/base/TestableAssertions.h>
#include <gtsam/basis/Basis.h>
#include <gtsam/basis/BasisFactors.h>
#include <gtsam/basis/Chebyshev2.h>
#include <gtsam/inference/Symbol.h> #include <gtsam/inference/Symbol.h>
#include <gtsam/nonlinear/FunctorizedFactor.h> #include <gtsam/nonlinear/FunctorizedFactor.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/factorTesting.h> #include <gtsam/nonlinear/factorTesting.h>
using namespace std; using namespace std;
@ -60,7 +64,7 @@ class ProjectionFunctor {
if (H1) { if (H1) {
H1->resize(x.size(), A.size()); H1->resize(x.size(), A.size());
*H1 << I_3x3, I_3x3, I_3x3; *H1 << I_3x3, I_3x3, I_3x3;
} }
if (H2) *H2 = A; if (H2) *H2 = A;
return A * x; return A * x;
} }
@ -255,18 +259,148 @@ TEST(FunctorizedFactor, Lambda2) {
if (H1) { if (H1) {
H1->resize(x.size(), A.size()); H1->resize(x.size(), A.size());
*H1 << I_3x3, I_3x3, I_3x3; *H1 << I_3x3, I_3x3, I_3x3;
} }
if (H2) *H2 = A; if (H2) *H2 = A;
return A * x; return A * x;
}; };
// FunctorizedFactor<Matrix> factor(key, measurement, model, lambda); // FunctorizedFactor<Matrix> factor(key, measurement, model, lambda);
auto factor = MakeFunctorizedFactor2<Matrix, Vector>(keyA, keyx, measurement, model2, lambda); auto factor = MakeFunctorizedFactor2<Matrix, Vector>(keyA, keyx, measurement,
model2, lambda);
Vector error = factor.evaluateError(A, x); Vector error = factor.evaluateError(A, x);
EXPECT(assert_equal(Vector::Zero(3), error, 1e-9)); EXPECT(assert_equal(Vector::Zero(3), error, 1e-9));
} }
const size_t N = 2;
//******************************************************************************
TEST(FunctorizedFactor, Print2) {
const size_t M = 1;
Vector measured = Vector::Ones(M) * 42;
auto model = noiseModel::Isotropic::Sigma(M, 1.0);
VectorEvaluationFactor<Chebyshev2, M> priorFactor(key, measured, model, N, 0);
string expected =
" keys = { X0 }\n"
" noise model: unit (1) \n"
"FunctorizedFactor(X0)\n"
" measurement: [\n"
" 42\n"
"]\n"
" noise model sigmas: 1\n";
EXPECT(assert_print_equal(expected, priorFactor));
}
//******************************************************************************
TEST(FunctorizedFactor, VectorEvaluationFactor) {
const size_t M = 4;
Vector measured = Vector::Zero(M);
auto model = noiseModel::Isotropic::Sigma(M, 1.0);
VectorEvaluationFactor<Chebyshev2, M> priorFactor(key, measured, model, N, 0);
NonlinearFactorGraph graph;
graph.add(priorFactor);
ParameterMatrix<M> stateMatrix(N);
Values initial;
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
LevenbergMarquardtParams parameters;
parameters.verbosity = NonlinearOptimizerParams::SILENT;
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
parameters.setMaxIterations(20);
Values result =
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
}
//******************************************************************************
TEST(FunctorizedFactor, VectorComponentFactor) {
const int P = 4;
const size_t i = 2;
const double measured = 0.0, t = 3.0, a = 2.0, b = 4.0;
auto model = noiseModel::Isotropic::Sigma(1, 1.0);
VectorComponentFactor<Chebyshev2, P> controlPrior(key, measured, model, N, i,
t, a, b);
NonlinearFactorGraph graph;
graph.add(controlPrior);
ParameterMatrix<P> stateMatrix(N);
Values initial;
initial.insert<ParameterMatrix<P>>(key, stateMatrix);
LevenbergMarquardtParams parameters;
parameters.verbosity = NonlinearOptimizerParams::SILENT;
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
parameters.setMaxIterations(20);
Values result =
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
}
//******************************************************************************
TEST(FunctorizedFactor, VecDerivativePrior) {
const size_t M = 4;
Vector measured = Vector::Zero(M);
auto model = noiseModel::Isotropic::Sigma(M, 1.0);
VectorDerivativeFactor<Chebyshev2, M> vecDPrior(key, measured, model, N, 0);
NonlinearFactorGraph graph;
graph.add(vecDPrior);
ParameterMatrix<M> stateMatrix(N);
Values initial;
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
LevenbergMarquardtParams parameters;
parameters.verbosity = NonlinearOptimizerParams::SILENT;
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
parameters.setMaxIterations(20);
Values result =
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
}
//******************************************************************************
TEST(FunctorizedFactor, ComponentDerivativeFactor) {
const size_t M = 4;
double measured = 0;
auto model = noiseModel::Isotropic::Sigma(1, 1.0);
ComponentDerivativeFactor<Chebyshev2, M> controlDPrior(key, measured, model,
N, 0, 0);
NonlinearFactorGraph graph;
graph.add(controlDPrior);
Values initial;
ParameterMatrix<M> stateMatrix(N);
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
LevenbergMarquardtParams parameters;
parameters.verbosity = NonlinearOptimizerParams::SILENT;
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
parameters.setMaxIterations(20);
Values result =
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
}
/* ************************************************************************* */ /* ************************************************************************* */
int main() { int main() {
TestResult tr; TestResult tr;

View File

@ -103,7 +103,7 @@ namespace gtsam {
boost::none, boost::optional<Matrix&> H2 = boost::none) const override { boost::none, boost::optional<Matrix&> H2 = boost::none) const override {
T hx = traits<T>::Between(p1, p2, H1, H2); // h(x) T hx = traits<T>::Between(p1, p2, H1, H2); // h(x)
// manifold equivalent of h(x)-z -> log(z,h(x)) // manifold equivalent of h(x)-z -> log(z,h(x))
#ifdef SLOW_BUT_CORRECT_BETWEENFACTOR #ifdef GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR
typename traits<T>::ChartJacobian::Jacobian Hlocal; typename traits<T>::ChartJacobian::Jacobian Hlocal;
Vector rval = traits<T>::Local(measured_, hx, boost::none, (H1 || H2) ? &Hlocal : 0); Vector rval = traits<T>::Local(measured_, hx, boost::none, (H1 || H2) ? &Hlocal : 0);
if (H1) *H1 = Hlocal * (*H1); if (H1) *H1 = Hlocal * (*H1);

View File

@ -306,7 +306,7 @@ class SmartProjectionFactorP : public SmartProjectionFactor<CAMERA> {
// Build augmented Hessian (with last row/column being the information vector) // Build augmented Hessian (with last row/column being the information vector)
// Note: we need to get the augumented hessian wrt the unique keys in key_ // Note: we need to get the augumented hessian wrt the unique keys in key_
SymmetricBlockMatrix augmentedHessianUniqueKeys = SymmetricBlockMatrix augmentedHessianUniqueKeys =
Base::Cameras::SchurComplementAndRearrangeBlocks_3_6_6( Base::Cameras::template SchurComplementAndRearrangeBlocks<3, 6, 6>(
Fs, E, P, b, nonUniqueKeys_, this->keys_); Fs, E, P, b, nonUniqueKeys_, this->keys_);
return boost::make_shared < RegularHessianFactor<DimPose> return boost::make_shared < RegularHessianFactor<DimPose>

View File

@ -138,4 +138,21 @@ Point2_ uncalibrate(const Expression<CALIBRATION>& K, const Point2_& xy_hat) {
return Point2_(K, &CALIBRATION::uncalibrate, xy_hat); return Point2_(K, &CALIBRATION::uncalibrate, xy_hat);
} }
/// logmap
// TODO(dellaert): Should work but fails because of a type deduction conflict.
// template <typename T>
// gtsam::Expression<typename gtsam::traits<T>::TangentVector> logmap(
// const gtsam::Expression<T> &x1, const gtsam::Expression<T> &x2) {
// return gtsam::Expression<typename gtsam::traits<T>::TangentVector>(
// x1, &T::logmap, x2);
// }
template <typename T>
gtsam::Expression<typename gtsam::traits<T>::TangentVector> logmap(
const gtsam::Expression<T> &x1, const gtsam::Expression<T> &x2) {
return Expression<typename gtsam::traits<T>::TangentVector>(
gtsam::traits<T>::Logmap, between(x1, x2));
}
} // \namespace gtsam } // \namespace gtsam

View File

@ -36,7 +36,7 @@ static const Matrix I = I_1x1;
static const Matrix I3 = I_3x3; static const Matrix I3 = I_3x3;
static const noiseModel::Diagonal::shared_ptr priorOrientationNoise = static const noiseModel::Diagonal::shared_ptr priorOrientationNoise =
noiseModel::Diagonal::Sigmas((Vector(1) << 0).finished()); noiseModel::Diagonal::Sigmas(Vector1(0));
static const noiseModel::Diagonal::shared_ptr priorPose2Noise = static const noiseModel::Diagonal::shared_ptr priorPose2Noise =
noiseModel::Diagonal::Variances(Vector3(1e-6, 1e-6, 1e-8)); noiseModel::Diagonal::Variances(Vector3(1e-6, 1e-6, 1e-8));

View File

@ -334,5 +334,11 @@ virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor {
Vector evaluateError(const T& R1, const T& R2); Vector evaluateError(const T& R1, const T& R2);
}; };
#include <gtsam/slam/lago.h>
namespace lago {
gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph, bool useOdometricPath = true);
gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& initialGuess);
}
} // namespace gtsam } // namespace gtsam

View File

@ -58,6 +58,13 @@ TEST(SlamExpressions, unrotate) {
const Point3_ q_ = unrotate(R_, p_); const Point3_ q_ = unrotate(R_, p_);
} }
/* ************************************************************************* */
TEST(SlamExpressions, logmap) {
Pose3_ T1_(0);
Pose3_ T2_(1);
const Vector6_ l = logmap(T1_, T2_);
}
/* ************************************************************************* */ /* ************************************************************************* */
int main() { int main() {
TestResult tr; TestResult tr;

View File

@ -100,12 +100,12 @@ endif()
install( install(
TARGETS gtsam_unstable TARGETS gtsam_unstable
EXPORT GTSAM-exports EXPORT GTSAM_UNSTABLE-exports
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
list(APPEND GTSAM_EXPORTED_TARGETS gtsam_unstable) list(APPEND GTSAM_UNSTABLE_EXPORTED_TARGETS gtsam_unstable)
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) set(GTSAM_UNSTABLE_EXPORTED_TARGETS "${GTSAM_UNSTABLE_EXPORTED_TARGETS}" PARENT_SCOPE)
# Build examples # Build examples
add_subdirectory(examples) add_subdirectory(examples)

View File

@ -20,6 +20,8 @@
#include "FindSeparator.h" #include "FindSeparator.h"
#ifndef GTSAM_USE_SYSTEM_METIS
extern "C" { extern "C" {
#include <metis.h> #include <metis.h>
#include "metislib.h" #include "metislib.h"
@ -564,3 +566,5 @@ namespace gtsam { namespace partition {
} }
}} //namespace }} //namespace
#endif

View File

@ -20,6 +20,8 @@ using namespace std;
using namespace gtsam; using namespace gtsam;
using namespace gtsam::partition; using namespace gtsam::partition;
#ifndef GTSAM_USE_SYSTEM_METIS
/* ************************************************************************* */ /* ************************************************************************* */
// x0 - x1 - x2 // x0 - x1 - x2
// l3 l4 // l3 l4
@ -227,6 +229,8 @@ TEST ( Partition, findSeparator3_with_reduced_camera )
LONGS_EQUAL(2, partitionTable[28]); LONGS_EQUAL(2, partitionTable[28]);
} }
#endif
/* ************************************************************************* */ /* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr);} int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -23,7 +23,6 @@ Vector ProjectionFactorRollingShutter::evaluateError(
const Pose3& pose_a, const Pose3& pose_b, const Point3& point, const Pose3& pose_a, const Pose3& pose_b, const Point3& point,
boost::optional<Matrix&> H1, boost::optional<Matrix&> H2, boost::optional<Matrix&> H1, boost::optional<Matrix&> H2,
boost::optional<Matrix&> H3) const { boost::optional<Matrix&> H3) const {
try { try {
Pose3 pose = interpolate<Pose3>(pose_a, pose_b, alpha_, H1, H2); Pose3 pose = interpolate<Pose3>(pose_a, pose_b, alpha_, H1, H2);
gtsam::Matrix Hprj; gtsam::Matrix Hprj;
@ -32,12 +31,10 @@ Vector ProjectionFactorRollingShutter::evaluateError(
gtsam::Matrix HbodySensor; gtsam::Matrix HbodySensor;
PinholeCamera<Cal3_S2> camera( PinholeCamera<Cal3_S2> camera(
pose.compose(*body_P_sensor_, HbodySensor), *K_); pose.compose(*body_P_sensor_, HbodySensor), *K_);
Point2 reprojectionError( Point2 reprojectionError(camera.project(point, Hprj, H3, boost::none) -
camera.project(point, Hprj, H3, boost::none) - measured_); measured_);
if (H1) if (H1) *H1 = Hprj * HbodySensor * (*H1);
*H1 = Hprj * HbodySensor * (*H1); if (H2) *H2 = Hprj * HbodySensor * (*H2);
if (H2)
*H2 = Hprj * HbodySensor * (*H2);
return reprojectionError; return reprojectionError;
} else { } else {
PinholeCamera<Cal3_S2> camera(pose.compose(*body_P_sensor_), *K_); PinholeCamera<Cal3_S2> camera(pose.compose(*body_P_sensor_), *K_);
@ -45,29 +42,23 @@ Vector ProjectionFactorRollingShutter::evaluateError(
} }
} else { } else {
PinholeCamera<Cal3_S2> camera(pose, *K_); PinholeCamera<Cal3_S2> camera(pose, *K_);
Point2 reprojectionError( Point2 reprojectionError(camera.project(point, Hprj, H3, boost::none) -
camera.project(point, Hprj, H3, boost::none) - measured_); measured_);
if (H1) if (H1) *H1 = Hprj * (*H1);
*H1 = Hprj * (*H1); if (H2) *H2 = Hprj * (*H2);
if (H2)
*H2 = Hprj * (*H2);
return reprojectionError; return reprojectionError;
} }
} catch (CheiralityException& e) { } catch (CheiralityException& e) {
if (H1) if (H1) *H1 = Matrix::Zero(2, 6);
*H1 = Matrix::Zero(2, 6); if (H2) *H2 = Matrix::Zero(2, 6);
if (H2) if (H3) *H3 = Matrix::Zero(2, 3);
*H2 = Matrix::Zero(2, 6);
if (H3)
*H3 = Matrix::Zero(2, 3);
if (verboseCheirality_) if (verboseCheirality_)
std::cout << e.what() << ": Landmark " std::cout << e.what() << ": Landmark "
<< DefaultKeyFormatter(this->key2()) << " moved behind camera " << DefaultKeyFormatter(this->key2()) << " moved behind camera "
<< DefaultKeyFormatter(this->key1()) << std::endl; << DefaultKeyFormatter(this->key1()) << std::endl;
if (throwCheirality_) if (throwCheirality_) throw CheiralityException(this->key2());
throw CheiralityException(this->key2());
} }
return Vector2::Constant(2.0 * K_->fx()); return Vector2::Constant(2.0 * K_->fx());
} }
} //namespace gtsam } // namespace gtsam

View File

@ -17,41 +17,47 @@
#pragma once #pragma once
#include <gtsam/nonlinear/NonlinearFactor.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/geometry/CalibratedCamera.h>
#include <gtsam/geometry/Cal3_S2.h> #include <gtsam/geometry/Cal3_S2.h>
#include <gtsam/geometry/CalibratedCamera.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/nonlinear/NonlinearFactor.h>
#include <boost/optional.hpp> #include <boost/optional.hpp>
namespace gtsam { namespace gtsam {
/** /**
* Non-linear factor for 2D projection measurement obtained using a rolling shutter camera. The calibration is known here. * Non-linear factor for 2D projection measurement obtained using a rolling
* This version takes rolling shutter information into account as follows: consider two consecutive poses A and B, * shutter camera. The calibration is known here. This version takes rolling
* and a Point2 measurement taken starting at time A using a rolling shutter camera. * shutter information into account as follows: consider two consecutive poses A
* Pose A has timestamp t_A, and Pose B has timestamp t_B. The Point2 measurement has timestamp t_p (with t_A <= t_p <= t_B) * and B, and a Point2 measurement taken starting at time A using a rolling
* corresponding to the time of exposure of the row of the image the pixel belongs to. * shutter camera. Pose A has timestamp t_A, and Pose B has timestamp t_B. The
* Let us define the alpha = (t_p - t_A) / (t_B - t_A), we will use the pose interpolated between A and B by * Point2 measurement has timestamp t_p (with t_A <= t_p <= t_B) corresponding
* the alpha to project the corresponding landmark to Point2. * to the time of exposure of the row of the image the pixel belongs to. Let us
* define the alpha = (t_p - t_A) / (t_B - t_A), we will use the pose
* interpolated between A and B by the alpha to project the corresponding
* landmark to Point2.
* @addtogroup SLAM * @addtogroup SLAM
*/ */
class ProjectionFactorRollingShutter : public NoiseModelFactor3<Pose3, Pose3, class ProjectionFactorRollingShutter
Point3> { : public NoiseModelFactor3<Pose3, Pose3, Point3> {
protected: protected:
// Keep a copy of measurement and calibration for I/O // Keep a copy of measurement and calibration for I/O
Point2 measured_; ///< 2D measurement Point2 measured_; ///< 2D measurement
double alpha_; ///< interpolation parameter in [0,1] corresponding to the point2 measurement double alpha_; ///< interpolation parameter in [0,1] corresponding to the
///< point2 measurement
boost::shared_ptr<Cal3_S2> K_; ///< shared pointer to calibration object boost::shared_ptr<Cal3_S2> K_; ///< shared pointer to calibration object
boost::optional<Pose3> body_P_sensor_; ///< The pose of the sensor in the body frame boost::optional<Pose3>
body_P_sensor_; ///< The pose of the sensor in the body frame
// verbosity handling for Cheirality Exceptions // verbosity handling for Cheirality Exceptions
bool throwCheirality_; ///< If true, rethrows Cheirality exceptions (default: false) bool throwCheirality_; ///< If true, rethrows Cheirality exceptions (default:
bool verboseCheirality_; ///< If true, prints text for Cheirality exceptions (default: false) ///< false)
bool verboseCheirality_; ///< If true, prints text for Cheirality exceptions
///< (default: false)
public: public:
/// shorthand for base class type /// shorthand for base class type
typedef NoiseModelFactor3<Pose3, Pose3, Point3> Base; typedef NoiseModelFactor3<Pose3, Pose3, Point3> Base;
@ -66,72 +72,72 @@ class ProjectionFactorRollingShutter : public NoiseModelFactor3<Pose3, Pose3,
: measured_(0, 0), : measured_(0, 0),
alpha_(0), alpha_(0),
throwCheirality_(false), throwCheirality_(false),
verboseCheirality_(false) { verboseCheirality_(false) {}
}
/** /**
* Constructor * Constructor
* @param measured is the 2-dimensional pixel location of point in the image (the measurement) * @param measured is the 2-dimensional pixel location of point in the image
* (the measurement)
* @param alpha in [0,1] is the rolling shutter parameter for the measurement * @param alpha in [0,1] is the rolling shutter parameter for the measurement
* @param model is the noise model * @param model is the noise model
* @param poseKey_a is the key of the first camera * @param poseKey_a is the key of the first camera
* @param poseKey_b is the key of the second camera * @param poseKey_b is the key of the second camera
* @param pointKey is the key of the landmark * @param pointKey is the key of the landmark
* @param K shared pointer to the constant calibration * @param K shared pointer to the constant calibration
* @param body_P_sensor is the transform from body to sensor frame (default identity) * @param body_P_sensor is the transform from body to sensor frame (default
* identity)
*/ */
ProjectionFactorRollingShutter(const Point2& measured, double alpha, ProjectionFactorRollingShutter(
const SharedNoiseModel& model, Key poseKey_a, const Point2& measured, double alpha, const SharedNoiseModel& model,
Key poseKey_b, Key pointKey, Key poseKey_a, Key poseKey_b, Key pointKey,
const boost::shared_ptr<Cal3_S2>& K, const boost::shared_ptr<Cal3_S2>& K,
boost::optional<Pose3> body_P_sensor = boost::optional<Pose3> body_P_sensor = boost::none)
boost::none)
: Base(model, poseKey_a, poseKey_b, pointKey), : Base(model, poseKey_a, poseKey_b, pointKey),
measured_(measured), measured_(measured),
alpha_(alpha), alpha_(alpha),
K_(K), K_(K),
body_P_sensor_(body_P_sensor), body_P_sensor_(body_P_sensor),
throwCheirality_(false), throwCheirality_(false),
verboseCheirality_(false) { verboseCheirality_(false) {}
}
/** /**
* Constructor with exception-handling flags * Constructor with exception-handling flags
* @param measured is the 2-dimensional pixel location of point in the image (the measurement) * @param measured is the 2-dimensional pixel location of point in the image
* (the measurement)
* @param alpha in [0,1] is the rolling shutter parameter for the measurement * @param alpha in [0,1] is the rolling shutter parameter for the measurement
* @param model is the noise model * @param model is the noise model
* @param poseKey_a is the key of the first camera * @param poseKey_a is the key of the first camera
* @param poseKey_b is the key of the second camera * @param poseKey_b is the key of the second camera
* @param pointKey is the key of the landmark * @param pointKey is the key of the landmark
* @param K shared pointer to the constant calibration * @param K shared pointer to the constant calibration
* @param throwCheirality determines whether Cheirality exceptions are rethrown * @param throwCheirality determines whether Cheirality exceptions are
* @param verboseCheirality determines whether exceptions are printed for Cheirality * rethrown
* @param body_P_sensor is the transform from body to sensor frame (default identity) * @param verboseCheirality determines whether exceptions are printed for
* Cheirality
* @param body_P_sensor is the transform from body to sensor frame (default
* identity)
*/ */
ProjectionFactorRollingShutter(const Point2& measured, double alpha, ProjectionFactorRollingShutter(
const SharedNoiseModel& model, Key poseKey_a, const Point2& measured, double alpha, const SharedNoiseModel& model,
Key poseKey_b, Key pointKey, Key poseKey_a, Key poseKey_b, Key pointKey,
const boost::shared_ptr<Cal3_S2>& K, const boost::shared_ptr<Cal3_S2>& K, bool throwCheirality,
bool throwCheirality, bool verboseCheirality, bool verboseCheirality,
boost::optional<Pose3> body_P_sensor = boost::optional<Pose3> body_P_sensor = boost::none)
boost::none)
: Base(model, poseKey_a, poseKey_b, pointKey), : Base(model, poseKey_a, poseKey_b, pointKey),
measured_(measured), measured_(measured),
alpha_(alpha), alpha_(alpha),
K_(K), K_(K),
body_P_sensor_(body_P_sensor), body_P_sensor_(body_P_sensor),
throwCheirality_(throwCheirality), throwCheirality_(throwCheirality),
verboseCheirality_(verboseCheirality) { verboseCheirality_(verboseCheirality) {}
}
/** Virtual destructor */ /** Virtual destructor */
virtual ~ProjectionFactorRollingShutter() { virtual ~ProjectionFactorRollingShutter() {}
}
/// @return a deep copy of this factor /// @return a deep copy of this factor
virtual gtsam::NonlinearFactor::shared_ptr clone() const { gtsam::NonlinearFactor::shared_ptr clone() const override {
return boost::static_pointer_cast < gtsam::NonlinearFactor return boost::static_pointer_cast<gtsam::NonlinearFactor>(
> (gtsam::NonlinearFactor::shared_ptr(new This(*this))); gtsam::NonlinearFactor::shared_ptr(new This(*this)));
} }
/** /**
@ -139,8 +145,9 @@ class ProjectionFactorRollingShutter : public NoiseModelFactor3<Pose3, Pose3,
* @param s optional string naming the factor * @param s optional string naming the factor
* @param keyFormatter optional formatter useful for printing Symbols * @param keyFormatter optional formatter useful for printing Symbols
*/ */
void print(const std::string& s = "", const KeyFormatter& keyFormatter = void print(
DefaultKeyFormatter) const { const std::string& s = "",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override {
std::cout << s << "ProjectionFactorRollingShutter, z = "; std::cout << s << "ProjectionFactorRollingShutter, z = ";
traits<Point2>::Print(measured_); traits<Point2>::Print(measured_);
std::cout << " rolling shutter interpolation param = " << alpha_; std::cout << " rolling shutter interpolation param = " << alpha_;
@ -150,71 +157,61 @@ class ProjectionFactorRollingShutter : public NoiseModelFactor3<Pose3, Pose3,
} }
/// equals /// equals
virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { bool equals(const NonlinearFactor& p, double tol = 1e-9) const override {
const This *e = dynamic_cast<const This*>(&p); const This* e = dynamic_cast<const This*>(&p);
return e && Base::equals(p, tol) && (alpha_ == e->alpha()) return e && Base::equals(p, tol) && (alpha_ == e->alpha()) &&
&& traits<Point2>::Equals(this->measured_, e->measured_, tol) traits<Point2>::Equals(this->measured_, e->measured_, tol) &&
&& this->K_->equals(*e->K_, tol) this->K_->equals(*e->K_, tol) &&
&& (this->throwCheirality_ == e->throwCheirality_) (this->throwCheirality_ == e->throwCheirality_) &&
&& (this->verboseCheirality_ == e->verboseCheirality_) (this->verboseCheirality_ == e->verboseCheirality_) &&
&& ((!body_P_sensor_ && !e->body_P_sensor_) ((!body_P_sensor_ && !e->body_P_sensor_) ||
|| (body_P_sensor_ && e->body_P_sensor_ (body_P_sensor_ && e->body_P_sensor_ &&
&& body_P_sensor_->equals(*e->body_P_sensor_))); body_P_sensor_->equals(*e->body_P_sensor_)));
} }
/// Evaluate error h(x)-z and optionally derivatives /// Evaluate error h(x)-z and optionally derivatives
Vector evaluateError(const Pose3& pose_a, const Pose3& pose_b, Vector evaluateError(
const Point3& point, boost::optional<Matrix&> H1 = const Pose3& pose_a, const Pose3& pose_b, const Point3& point,
boost::none, boost::optional<Matrix&> H1 = boost::none,
boost::optional<Matrix&> H2 = boost::none, boost::optional<Matrix&> H2 = boost::none,
boost::optional<Matrix&> H3 = boost::none) const; boost::optional<Matrix&> H3 = boost::none) const override;
/** return the measurement */ /** return the measurement */
const Point2& measured() const { const Point2& measured() const { return measured_; }
return measured_;
}
/** return the calibration object */ /** return the calibration object */
inline const boost::shared_ptr<Cal3_S2> calibration() const { inline const boost::shared_ptr<Cal3_S2> calibration() const { return K_; }
return K_;
}
/** returns the rolling shutter interp param*/ /** returns the rolling shutter interp param*/
inline double alpha() const { inline double alpha() const { return alpha_; }
return alpha_;
}
/** return verbosity */ /** return verbosity */
inline bool verboseCheirality() const { inline bool verboseCheirality() const { return verboseCheirality_; }
return verboseCheirality_;
}
/** return flag for throwing cheirality exceptions */ /** return flag for throwing cheirality exceptions */
inline bool throwCheirality() const { inline bool throwCheirality() const { return throwCheirality_; }
return throwCheirality_;
}
private: private:
/// Serialization function /// Serialization function
friend class boost::serialization::access; friend class boost::serialization::access;
template<class ARCHIVE> template <class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) { void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(measured_); ar& BOOST_SERIALIZATION_NVP(measured_);
ar & BOOST_SERIALIZATION_NVP(alpha_); ar& BOOST_SERIALIZATION_NVP(alpha_);
ar & BOOST_SERIALIZATION_NVP(K_); ar& BOOST_SERIALIZATION_NVP(K_);
ar & BOOST_SERIALIZATION_NVP(body_P_sensor_); ar& BOOST_SERIALIZATION_NVP(body_P_sensor_);
ar & BOOST_SERIALIZATION_NVP(throwCheirality_); ar& BOOST_SERIALIZATION_NVP(throwCheirality_);
ar & BOOST_SERIALIZATION_NVP(verboseCheirality_); ar& BOOST_SERIALIZATION_NVP(verboseCheirality_);
} }
public: public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW
}; };
/// traits /// traits
template<> struct traits<ProjectionFactorRollingShutter> : public Testable< template <>
ProjectionFactorRollingShutter> { struct traits<ProjectionFactorRollingShutter>
}; : public Testable<ProjectionFactorRollingShutter> {};
} //namespace gtsam } // namespace gtsam

View File

@ -11,12 +11,14 @@
/** /**
* @file SmartProjectionPoseFactorRollingShutter.h * @file SmartProjectionPoseFactorRollingShutter.h
* @brief Smart projection factor on poses modeling rolling shutter effect with given readout time * @brief Smart projection factor on poses modeling rolling shutter effect with
* given readout time
* @author Luca Carlone * @author Luca Carlone
*/ */
#pragma once #pragma once
#include <gtsam/geometry/CameraSet.h>
#include <gtsam/slam/SmartProjectionFactor.h> #include <gtsam/slam/SmartProjectionFactor.h>
namespace gtsam { namespace gtsam {
@ -32,9 +34,10 @@ namespace gtsam {
*/ */
/** /**
* This factor optimizes two consecutive poses of the body assuming a rolling shutter model of the camera with given readout time. * This factor optimizes two consecutive poses of the body assuming a rolling
* The factor requires that values contain (for each pixel observation) two consecutive camera poses * shutter model of the camera with given readout time. The factor requires that
* from which the pixel observation pose can be interpolated. * values contain (for each pixel observation) two consecutive camera poses from
* which the pixel observation pose can be interpolated.
* @addtogroup SLAM * @addtogroup SLAM
*/ */
template<class CAMERA> template<class CAMERA>
@ -53,12 +56,14 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
protected: protected:
/// shared pointer to calibration object (one for each observation) /// shared pointer to calibration object (one for each observation)
std::vector<boost::shared_ptr<CALIBRATION> > K_all_; std::vector<boost::shared_ptr<CALIBRATION>> K_all_;
/// The keys of the pose of the body (with respect to an external world frame): two consecutive poses for each observation /// The keys of the pose of the body (with respect to an external world
/// frame): two consecutive poses for each observation
std::vector<std::pair<Key, Key>> world_P_body_key_pairs_; std::vector<std::pair<Key, Key>> world_P_body_key_pairs_;
/// interpolation factor (one for each observation) to interpolate between pair of consecutive poses /// interpolation factor (one for each observation) to interpolate between
/// pair of consecutive poses
std::vector<double> alphas_; std::vector<double> alphas_;
/// Pose of the camera in the body frame /// Pose of the camera in the body frame
@ -95,18 +100,23 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
~SmartProjectionPoseFactorRollingShutter() override = default; ~SmartProjectionPoseFactorRollingShutter() override = default;
/** /**
* add a new measurement, with 2 pose keys, interpolation factor, camera (intrinsic and extrinsic) calibration, and observed pixel. * add a new measurement, with 2 pose keys, interpolation factor, camera
* @param measured 2-dimensional location of the projection of a * (intrinsic and extrinsic) calibration, and observed pixel.
* single landmark in a single view (the measurement), interpolated from the 2 poses * @param measured 2-dimensional location of the projection of a single
* @param world_P_body_key1 key corresponding to the first body poses (time <= time pixel is acquired) * landmark in a single view (the measurement), interpolated from the 2 poses
* @param world_P_body_key2 key corresponding to the second body poses (time >= time pixel is acquired) * @param world_P_body_key1 key corresponding to the first body poses (time <=
* @param alpha interpolation factor in [0,1], such that if alpha = 0 the interpolated pose is the same as world_P_body_key1 * time pixel is acquired)
* @param world_P_body_key2 key corresponding to the second body poses (time
* >= time pixel is acquired)
* @param alpha interpolation factor in [0,1], such that if alpha = 0 the
* interpolated pose is the same as world_P_body_key1
* @param K (fixed) camera intrinsic calibration * @param K (fixed) camera intrinsic calibration
* @param body_P_sensor (fixed) camera extrinsic calibration * @param body_P_sensor (fixed) camera extrinsic calibration
*/ */
void add(const MEASUREMENT& measured, const Key& world_P_body_key1, void add(const MEASUREMENT& measured, const Key& world_P_body_key1,
const Key& world_P_body_key2, const double& alpha, const Key& world_P_body_key2, const double& alpha,
const boost::shared_ptr<CALIBRATION>& K, const Pose3 body_P_sensor = Pose3::identity()) { const boost::shared_ptr<CALIBRATION>& K,
const Pose3& body_P_sensor = Pose3::identity()) {
// store measurements in base class // store measurements in base class
this->measured_.push_back(measured); this->measured_.push_back(measured);
@ -114,10 +124,13 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
world_P_body_key_pairs_.push_back( world_P_body_key_pairs_.push_back(
std::make_pair(world_P_body_key1, world_P_body_key2)); std::make_pair(world_P_body_key1, world_P_body_key2));
// also store keys in the keys_ vector: these keys are assumed to be unique, so we avoid duplicates here // also store keys in the keys_ vector: these keys are assumed to be
if (std::find(this->keys_.begin(), this->keys_.end(), world_P_body_key1) == this->keys_.end()) // unique, so we avoid duplicates here
if (std::find(this->keys_.begin(), this->keys_.end(), world_P_body_key1) ==
this->keys_.end())
this->keys_.push_back(world_P_body_key1); // add only unique keys this->keys_.push_back(world_P_body_key1); // add only unique keys
if (std::find(this->keys_.begin(), this->keys_.end(), world_P_body_key2) == this->keys_.end()) if (std::find(this->keys_.begin(), this->keys_.end(), world_P_body_key2) ==
this->keys_.end())
this->keys_.push_back(world_P_body_key2); // add only unique keys this->keys_.push_back(world_P_body_key2); // add only unique keys
// store interpolation factor // store interpolation factor
@ -131,12 +144,15 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
} }
/** /**
* Variant of the previous "add" function in which we include multiple measurements * Variant of the previous "add" function in which we include multiple
* measurements
* @param measurements vector of the 2m dimensional location of the projection * @param measurements vector of the 2m dimensional location of the projection
* of a single landmark in the m views (the measurements) * of a single landmark in the m views (the measurements)
* @param world_P_body_key_pairs vector where the i-th element contains a pair of keys corresponding * @param world_P_body_key_pairs vector where the i-th element contains a pair
* to the pair of poses from which the observation pose for the i0-th measurement can be interpolated * of keys corresponding to the pair of poses from which the observation pose
* @param alphas vector of interpolation params (in [0,1]), one for each measurement (in the same order) * for the i0-th measurement can be interpolated
* @param alphas vector of interpolation params (in [0,1]), one for each
* measurement (in the same order)
* @param Ks vector of (fixed) intrinsic calibration objects * @param Ks vector of (fixed) intrinsic calibration objects
* @param body_P_sensors vector of (fixed) extrinsic calibration objects * @param body_P_sensors vector of (fixed) extrinsic calibration objects
*/ */
@ -144,7 +160,7 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs, const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs,
const std::vector<double>& alphas, const std::vector<double>& alphas,
const std::vector<boost::shared_ptr<CALIBRATION>>& Ks, const std::vector<boost::shared_ptr<CALIBRATION>>& Ks,
const std::vector<Pose3> body_P_sensors) { const std::vector<Pose3>& body_P_sensors) {
assert(world_P_body_key_pairs.size() == measurements.size()); assert(world_P_body_key_pairs.size() == measurements.size());
assert(world_P_body_key_pairs.size() == alphas.size()); assert(world_P_body_key_pairs.size() == alphas.size());
assert(world_P_body_key_pairs.size() == Ks.size()); assert(world_P_body_key_pairs.size() == Ks.size());
@ -156,20 +172,24 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
} }
/** /**
* Variant of the previous "add" function in which we include multiple measurements * Variant of the previous "add" function in which we include multiple
* with the same (intrinsic and extrinsic) calibration * measurements with the same (intrinsic and extrinsic) calibration
* @param measurements vector of the 2m dimensional location of the projection * @param measurements vector of the 2m dimensional location of the projection
* of a single landmark in the m views (the measurements) * of a single landmark in the m views (the measurements)
* @param world_P_body_key_pairs vector where the i-th element contains a pair of keys corresponding * @param world_P_body_key_pairs vector where the i-th element contains a pair
* to the pair of poses from which the observation pose for the i0-th measurement can be interpolated * of keys corresponding to the pair of poses from which the observation pose
* @param alphas vector of interpolation params (in [0,1]), one for each measurement (in the same order) * for the i0-th measurement can be interpolated
* @param alphas vector of interpolation params (in [0,1]), one for each
* measurement (in the same order)
* @param K (fixed) camera intrinsic calibration (same for all measurements) * @param K (fixed) camera intrinsic calibration (same for all measurements)
* @param body_P_sensor (fixed) camera extrinsic calibration (same for all measurements) * @param body_P_sensor (fixed) camera extrinsic calibration (same for all
* measurements)
*/ */
void add(const MEASUREMENTS& measurements, void add(const MEASUREMENTS& measurements,
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs, const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs,
const std::vector<double>& alphas, const std::vector<double>& alphas,
const boost::shared_ptr<CALIBRATION>& K, const Pose3 body_P_sensor = Pose3::identity()) { const boost::shared_ptr<CALIBRATION>& K,
const Pose3& body_P_sensor = Pose3::identity()) {
assert(world_P_body_key_pairs.size() == measurements.size()); assert(world_P_body_key_pairs.size() == measurements.size());
assert(world_P_body_key_pairs.size() == alphas.size()); assert(world_P_body_key_pairs.size() == alphas.size());
for (size_t i = 0; i < measurements.size(); i++) { for (size_t i = 0; i < measurements.size(); i++) {
@ -179,39 +199,37 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
} }
/// return the calibration object /// return the calibration object
inline std::vector<boost::shared_ptr<CALIBRATION>> calibration() const { const std::vector<boost::shared_ptr<CALIBRATION>>& calibration() const {
return K_all_; return K_all_;
} }
/// return (for each observation) the keys of the pair of poses from which we interpolate /// return (for each observation) the keys of the pair of poses from which we
const std::vector<std::pair<Key, Key>> world_P_body_key_pairs() const { /// interpolate
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs() const {
return world_P_body_key_pairs_; return world_P_body_key_pairs_;
} }
/// return the interpolation factors alphas /// return the interpolation factors alphas
const std::vector<double> alphas() const { const std::vector<double>& alphas() const { return alphas_; }
return alphas_;
}
/// return the extrinsic camera calibration body_P_sensors /// return the extrinsic camera calibration body_P_sensors
const std::vector<Pose3> body_P_sensors() const { const std::vector<Pose3>& body_P_sensors() const { return body_P_sensors_; }
return body_P_sensors_;
}
/** /**
* print * print
* @param s optional string naming the factor * @param s optional string naming the factor
* @param keyFormatter optional formatter useful for printing Symbols * @param keyFormatter optional formatter useful for printing Symbols
*/ */
void print(const std::string& s = "", const KeyFormatter& keyFormatter = void print(
DefaultKeyFormatter) const override { const std::string& s = "",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override {
std::cout << s << "SmartProjectionPoseFactorRollingShutter: \n "; std::cout << s << "SmartProjectionPoseFactorRollingShutter: \n ";
for (size_t i = 0; i < K_all_.size(); i++) { for (size_t i = 0; i < K_all_.size(); i++) {
std::cout << "-- Measurement nr " << i << std::endl; std::cout << "-- Measurement nr " << i << std::endl;
std::cout << " pose1 key: " std::cout << " pose1 key: "
<< keyFormatter(world_P_body_key_pairs_[i].first) << std::endl; << keyFormatter(world_P_body_key_pairs_[i].first) << std::endl;
std::cout << " pose2 key: " std::cout << " pose2 key: "
<< keyFormatter(world_P_body_key_pairs_[i].second) << std::endl; << keyFormatter(world_P_body_key_pairs_[i].second) << std::endl;
std::cout << " alpha: " << alphas_[i] << std::endl; std::cout << " alpha: " << alphas_[i] << std::endl;
body_P_sensors_[i].print("extrinsic calibration:\n"); body_P_sensors_[i].print("extrinsic calibration:\n");
K_all_[i]->print("intrinsic calibration = "); K_all_[i]->print("intrinsic calibration = ");
@ -222,32 +240,40 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
/// equals /// equals
bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { bool equals(const NonlinearFactor& p, double tol = 1e-9) const override {
const SmartProjectionPoseFactorRollingShutter<CAMERA>* e = const SmartProjectionPoseFactorRollingShutter<CAMERA>* e =
dynamic_cast<const SmartProjectionPoseFactorRollingShutter<CAMERA>*>(&p); dynamic_cast<const SmartProjectionPoseFactorRollingShutter<CAMERA>*>(
&p);
double keyPairsEqual = true; double keyPairsEqual = true;
if(this->world_P_body_key_pairs_.size() == e->world_P_body_key_pairs().size()){ if (this->world_P_body_key_pairs_.size() ==
for(size_t k=0; k< this->world_P_body_key_pairs_.size(); k++){ e->world_P_body_key_pairs().size()) {
for (size_t k = 0; k < this->world_P_body_key_pairs_.size(); k++) {
const Key key1own = world_P_body_key_pairs_[k].first; const Key key1own = world_P_body_key_pairs_[k].first;
const Key key1e = e->world_P_body_key_pairs()[k].first; const Key key1e = e->world_P_body_key_pairs()[k].first;
const Key key2own = world_P_body_key_pairs_[k].second; const Key key2own = world_P_body_key_pairs_[k].second;
const Key key2e = e->world_P_body_key_pairs()[k].second; const Key key2e = e->world_P_body_key_pairs()[k].second;
if ( !(key1own == key1e) || !(key2own == key2e) ){ if (!(key1own == key1e) || !(key2own == key2e)) {
keyPairsEqual = false; break; keyPairsEqual = false;
break;
} }
} }
}else{ keyPairsEqual = false; } } else {
keyPairsEqual = false;
}
double extrinsicCalibrationEqual = true; double extrinsicCalibrationEqual = true;
if(this->body_P_sensors_.size() == e->body_P_sensors().size()){ if (this->body_P_sensors_.size() == e->body_P_sensors().size()) {
for(size_t i=0; i< this->body_P_sensors_.size(); i++){ for (size_t i = 0; i < this->body_P_sensors_.size(); i++) {
if (!body_P_sensors_[i].equals(e->body_P_sensors()[i])){ if (!body_P_sensors_[i].equals(e->body_P_sensors()[i])) {
extrinsicCalibrationEqual = false; break; extrinsicCalibrationEqual = false;
break;
} }
} }
}else{ extrinsicCalibrationEqual = false; } } else {
extrinsicCalibrationEqual = false;
}
return e && Base::equals(p, tol) && K_all_ == e->calibration() return e && Base::equals(p, tol) && K_all_ == e->calibration() &&
&& alphas_ == e->alphas() && keyPairsEqual && extrinsicCalibrationEqual; alphas_ == e->alphas() && keyPairsEqual && extrinsicCalibrationEqual;
} }
/** /**
@ -291,9 +317,10 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
* Compute jacobian F, E and error vector at a given linearization point * Compute jacobian F, E and error vector at a given linearization point
* @param values Values structure which must contain camera poses * @param values Values structure which must contain camera poses
* corresponding to keys involved in this factor * corresponding to keys involved in this factor
* @return Return arguments are the camera jacobians Fs (including the jacobian with * @return Return arguments are the camera jacobians Fs (including the
* respect to both body poses we interpolate from), the point Jacobian E, * jacobian with respect to both body poses we interpolate from), the point
* and the error vector b. Note that the jacobians are computed for a given point. * Jacobian E, and the error vector b. Note that the jacobians are computed
* for a given point.
*/ */
void computeJacobiansWithTriangulatedPoint(FBlocks& Fs, Matrix& E, Vector& b, void computeJacobiansWithTriangulatedPoint(FBlocks& Fs, Matrix& E, Vector& b,
const Values& values) const { const Values& values) const {
@ -301,32 +328,37 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
throw("computeJacobiansWithTriangulatedPoint"); throw("computeJacobiansWithTriangulatedPoint");
} else { // valid result: compute jacobians } else { // valid result: compute jacobians
size_t numViews = this->measured_.size(); size_t numViews = this->measured_.size();
E = Matrix::Zero(2 * numViews, 3); // a Point2 for each view (point jacobian) E = Matrix::Zero(2 * numViews,
3); // a Point2 for each view (point jacobian)
b = Vector::Zero(2 * numViews); // a Point2 for each view b = Vector::Zero(2 * numViews); // a Point2 for each view
// intermediate Jacobians // intermediate Jacobians
Eigen::Matrix<double, ZDim, DimPose> dProject_dPoseCam; Eigen::Matrix<double, ZDim, DimPose> dProject_dPoseCam;
Eigen::Matrix<double, DimPose, DimPose> dInterpPose_dPoseBody1, Eigen::Matrix<double, DimPose, DimPose> dInterpPose_dPoseBody1,
dInterpPose_dPoseBody2, dPoseCam_dInterpPose; dInterpPose_dPoseBody2, dPoseCam_dInterpPose;
Eigen::Matrix<double, ZDim, 3> Ei; Eigen::Matrix<double, ZDim, 3> Ei;
for (size_t i = 0; i < numViews; i++) { // for each camera/measurement for (size_t i = 0; i < numViews; i++) { // for each camera/measurement
const Pose3& w_P_body1 = values.at<Pose3>(world_P_body_key_pairs_[i].first); auto w_P_body1 = values.at<Pose3>(world_P_body_key_pairs_[i].first);
const Pose3& w_P_body2 = values.at<Pose3>(world_P_body_key_pairs_[i].second); auto w_P_body2 = values.at<Pose3>(world_P_body_key_pairs_[i].second);
double interpolationFactor = alphas_[i]; double interpolationFactor = alphas_[i];
// get interpolated pose: // get interpolated pose:
const Pose3& w_P_body = interpolate<Pose3>(w_P_body1, w_P_body2,interpolationFactor, dInterpPose_dPoseBody1, dInterpPose_dPoseBody2); auto w_P_body =
const Pose3& body_P_cam = body_P_sensors_[i]; interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor,
const Pose3& w_P_cam = w_P_body.compose(body_P_cam, dPoseCam_dInterpPose); dInterpPose_dPoseBody1, dInterpPose_dPoseBody2);
auto body_P_cam = body_P_sensors_[i];
auto w_P_cam = w_P_body.compose(body_P_cam, dPoseCam_dInterpPose);
typename Base::Camera camera(w_P_cam, K_all_[i]); typename Base::Camera camera(w_P_cam, K_all_[i]);
// get jacobians and error vector for current measurement // get jacobians and error vector for current measurement
Point2 reprojectionError_i = camera.reprojectionError( Point2 reprojectionError_i = camera.reprojectionError(
*this->result_, this->measured_.at(i), dProject_dPoseCam, Ei); *this->result_, this->measured_.at(i), dProject_dPoseCam, Ei);
Eigen::Matrix<double, ZDim, DimBlock> J; // 2 x 12 Eigen::Matrix<double, ZDim, DimBlock> J; // 2 x 12
J.block(0, 0, ZDim, 6) = dProject_dPoseCam * dPoseCam_dInterpPose J.block(0, 0, ZDim, 6) =
* dInterpPose_dPoseBody1; // (2x6) * (6x6) * (6x6) dProject_dPoseCam * dPoseCam_dInterpPose *
J.block(0, 6, ZDim, 6) = dProject_dPoseCam * dPoseCam_dInterpPose dInterpPose_dPoseBody1; // (2x6) * (6x6) * (6x6)
* dInterpPose_dPoseBody2; // (2x6) * (6x6) * (6x6) J.block(0, 6, ZDim, 6) =
dProject_dPoseCam * dPoseCam_dInterpPose *
dInterpPose_dPoseBody2; // (2x6) * (6x6) * (6x6)
// fit into the output structures // fit into the output structures
Fs.push_back(J); Fs.push_back(J);
@ -338,37 +370,40 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
} }
/// linearize and return a Hessianfactor that is an approximation of error(p) /// linearize and return a Hessianfactor that is an approximation of error(p)
boost::shared_ptr<RegularHessianFactor<DimPose> > createHessianFactor( boost::shared_ptr<RegularHessianFactor<DimPose>> createHessianFactor(
const Values& values, const double lambda = 0.0, bool diagonalDamping = const Values& values, const double lambda = 0.0,
false) const { bool diagonalDamping = false) const {
// we may have multiple observation sharing the same keys (due to the
// we may have multiple observation sharing the same keys (due to the rolling shutter interpolation), // rolling shutter interpolation), hence the number of unique keys may be
// hence the number of unique keys may be smaller than 2 * nrMeasurements // smaller than 2 * nrMeasurements
size_t nrUniqueKeys = this->keys_.size(); // note: by construction, keys_ only contains unique keys size_t nrUniqueKeys =
this->keys_
.size(); // note: by construction, keys_ only contains unique keys
// Create structures for Hessian Factors // Create structures for Hessian Factors
KeyVector js; KeyVector js;
std::vector < Matrix > Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2); std::vector<Matrix> Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2);
std::vector < Vector > gs(nrUniqueKeys); std::vector<Vector> gs(nrUniqueKeys);
if (this->measured_.size() != this->cameras(values).size()) // 1 observation per interpolated camera if (this->measured_.size() !=
throw std::runtime_error("SmartProjectionPoseFactorRollingShutter: " this->cameras(values).size()) // 1 observation per interpolated camera
"measured_.size() inconsistent with input"); throw std::runtime_error(
"SmartProjectionPoseFactorRollingShutter: "
"measured_.size() inconsistent with input");
// triangulate 3D point at given linearization point // triangulate 3D point at given linearization point
this->triangulateSafe(this->cameras(values)); this->triangulateSafe(this->cameras(values));
if (!this->result_) { // failed: return "empty/zero" Hessian if (!this->result_) { // failed: return "empty/zero" Hessian
if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) { if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) {
for (Matrix& m : Gs) for (Matrix& m : Gs) m = Matrix::Zero(DimPose, DimPose);
m = Matrix::Zero(DimPose, DimPose); for (Vector& v : gs) v = Vector::Zero(DimPose);
for (Vector& v : gs) return boost::make_shared<RegularHessianFactor<DimPose>>(this->keys_,
v = Vector::Zero(DimPose); Gs, gs, 0.0);
return boost::make_shared < RegularHessianFactor<DimPose>
> (this->keys_, Gs, gs, 0.0);
} else { } else {
throw std::runtime_error("SmartProjectionPoseFactorRollingShutter: " throw std::runtime_error(
"only supported degeneracy mode is ZERO_ON_DEGENERACY"); "SmartProjectionPoseFactorRollingShutter: "
"only supported degeneracy mode is ZERO_ON_DEGENERACY");
} }
} }
// compute Jacobian given triangulated 3D Point // compute Jacobian given triangulated 3D Point
@ -384,21 +419,23 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
Matrix3 P = Cameras::PointCov(E, lambda, diagonalDamping); Matrix3 P = Cameras::PointCov(E, lambda, diagonalDamping);
// Collect all the key pairs: these are the keys that correspond to the blocks in Fs (on which we apply the Schur Complement) // Collect all the key pairs: these are the keys that correspond to the
// blocks in Fs (on which we apply the Schur Complement)
KeyVector nonuniqueKeys; KeyVector nonuniqueKeys;
for (size_t i = 0; i < world_P_body_key_pairs_.size(); i++) { for (size_t i = 0; i < world_P_body_key_pairs_.size(); i++) {
nonuniqueKeys.push_back(world_P_body_key_pairs_.at(i).first); nonuniqueKeys.push_back(world_P_body_key_pairs_.at(i).first);
nonuniqueKeys.push_back(world_P_body_key_pairs_.at(i).second); nonuniqueKeys.push_back(world_P_body_key_pairs_.at(i).second);
} }
// Build augmented Hessian (with last row/column being the information vector) // Build augmented Hessian (with last row/column being the information
// Note: we need to get the augumented hessian wrt the unique keys in key_ // vector) Note: we need to get the augumented hessian wrt the unique keys
// in key_
SymmetricBlockMatrix augmentedHessianUniqueKeys = SymmetricBlockMatrix augmentedHessianUniqueKeys =
Cameras::SchurComplementAndRearrangeBlocks_3_12_6( Base::Cameras::template SchurComplementAndRearrangeBlocks<3, 12, 6>(
Fs, E, P, b, nonuniqueKeys, this->keys_); Fs, E, P, b, nonuniqueKeys, this->keys_);
return boost::make_shared < RegularHessianFactor<DimPose> return boost::make_shared<RegularHessianFactor<DimPose>>(
> (this->keys_, augmentedHessianUniqueKeys); this->keys_, augmentedHessianUniqueKeys);
} }
/** /**
@ -408,38 +445,38 @@ class SmartProjectionPoseFactorRollingShutter : public SmartProjectionFactor<CAM
*/ */
boost::shared_ptr<GaussianFactor> linearizeDamped( boost::shared_ptr<GaussianFactor> linearizeDamped(
const Values& values, const double lambda = 0.0) const { const Values& values, const double lambda = 0.0) const {
// depending on flag set on construction we may linearize to different linear factors // depending on flag set on construction we may linearize to different
// linear factors
switch (this->params_.linearizationMode) { switch (this->params_.linearizationMode) {
case HESSIAN: case HESSIAN:
return this->createHessianFactor(values, lambda); return this->createHessianFactor(values, lambda);
default: default:
throw std::runtime_error( throw std::runtime_error(
"SmartProjectionPoseFactorRollingShutter: unknown linearization mode"); "SmartProjectionPoseFactorRollingShutter: unknown linearization "
"mode");
} }
} }
/// linearize /// linearize
boost::shared_ptr<GaussianFactor> linearize(const Values& values) const boost::shared_ptr<GaussianFactor> linearize(
override { const Values& values) const override {
return this->linearizeDamped(values); return this->linearizeDamped(values);
} }
private: private:
/// Serialization function /// Serialization function
friend class boost::serialization::access; friend class boost::serialization::access;
template<class ARCHIVE> template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) { void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(K_all_); ar& BOOST_SERIALIZATION_NVP(K_all_);
} }
}; };
// end of class declaration // end of class declaration
/// traits /// traits
template<class CAMERA> template <class CAMERA>
struct traits<SmartProjectionPoseFactorRollingShutter<CAMERA> > : struct traits<SmartProjectionPoseFactorRollingShutter<CAMERA>>
public Testable<SmartProjectionPoseFactorRollingShutter<CAMERA> > { : public Testable<SmartProjectionPoseFactorRollingShutter<CAMERA>> {};
};
} // namespace gtsam } // namespace gtsam

View File

@ -16,34 +16,33 @@
* @date July 2021 * @date July 2021
*/ */
#include <gtsam/base/numericalDerivative.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam/base/TestableAssertions.h> #include <gtsam/base/TestableAssertions.h>
#include <gtsam_unstable/slam/ProjectionFactorRollingShutter.h> #include <gtsam/base/numericalDerivative.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/geometry/Cal3DS2.h> #include <gtsam/geometry/Cal3DS2.h>
#include <gtsam/geometry/Cal3_S2.h> #include <gtsam/geometry/Cal3_S2.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/geometry/Point3.h>
#include <gtsam/geometry/Point2.h> #include <gtsam/geometry/Point2.h>
#include <gtsam/geometry/Point3.h>
#include <CppUnitLite/TestHarness.h> #include <gtsam/geometry/Pose3.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam_unstable/slam/ProjectionFactorRollingShutter.h>
using namespace std::placeholders; using namespace std::placeholders;
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
// make a realistic calibration matrix // make a realistic calibration matrix
static double fov = 60; // degrees static double fov = 60; // degrees
static size_t w=640,h=480; static size_t w = 640, h = 480;
static Cal3_S2::shared_ptr K(new Cal3_S2(fov,w,h)); static Cal3_S2::shared_ptr K(new Cal3_S2(fov, w, h));
// Create a noise model for the pixel error // Create a noise model for the pixel error
static SharedNoiseModel model(noiseModel::Unit::Create(2)); static SharedNoiseModel model(noiseModel::Unit::Create(2));
// Convenience for named keys // Convenience for named keys
using symbol_shorthand::X;
using symbol_shorthand::L; using symbol_shorthand::L;
using symbol_shorthand::T; using symbol_shorthand::T;
using symbol_shorthand::X;
// Convenience to define common variables across many tests // Convenience to define common variables across many tests
static Key poseKey1(X(1)); static Key poseKey1(X(1));
@ -51,74 +50,84 @@ static Key poseKey2(X(2));
static Key pointKey(L(1)); static Key pointKey(L(1));
static double interp_params = 0.5; static double interp_params = 0.5;
static Point2 measurement(323.0, 240.0); static Point2 measurement(323.0, 240.0);
static Pose3 body_P_sensor(Rot3::RzRyRx(-M_PI_2, 0.0, -M_PI_2), Point3(0.25, -0.10, 1.0)); static Pose3 body_P_sensor(Rot3::RzRyRx(-M_PI_2, 0.0, -M_PI_2),
Point3(0.25, -0.10, 1.0));
/* ************************************************************************* */ /* ************************************************************************* */
TEST( ProjectionFactorRollingShutter, Constructor) { TEST(ProjectionFactorRollingShutter, Constructor) {
ProjectionFactorRollingShutter factor(measurement, interp_params, model, poseKey1, poseKey2, pointKey, K);
}
/* ************************************************************************* */
TEST( ProjectionFactorRollingShutter, ConstructorWithTransform) {
ProjectionFactorRollingShutter factor(measurement, interp_params, model, ProjectionFactorRollingShutter factor(measurement, interp_params, model,
poseKey1, poseKey2, pointKey, K, body_P_sensor); poseKey1, poseKey2, pointKey, K);
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( ProjectionFactorRollingShutter, Equals ) { TEST(ProjectionFactorRollingShutter, ConstructorWithTransform) {
{ // factors are equal ProjectionFactorRollingShutter factor(measurement, interp_params, model,
ProjectionFactorRollingShutter factor1(measurement, interp_params, poseKey1, poseKey2, pointKey, K,
model, poseKey1, poseKey2, pointKey, K); body_P_sensor);
ProjectionFactorRollingShutter factor2(measurement, interp_params, }
model, poseKey1, poseKey2, pointKey, K);
/* ************************************************************************* */
TEST(ProjectionFactorRollingShutter, Equals) {
{ // factors are equal
ProjectionFactorRollingShutter factor1(measurement, interp_params, model,
poseKey1, poseKey2, pointKey, K);
ProjectionFactorRollingShutter factor2(measurement, interp_params, model,
poseKey1, poseKey2, pointKey, K);
CHECK(assert_equal(factor1, factor2)); CHECK(assert_equal(factor1, factor2));
} }
{ // factors are NOT equal (keys are different) { // factors are NOT equal (keys are different)
ProjectionFactorRollingShutter factor1(measurement, interp_params, ProjectionFactorRollingShutter factor1(measurement, interp_params, model,
model, poseKey1, poseKey2, pointKey, K); poseKey1, poseKey2, pointKey, K);
ProjectionFactorRollingShutter factor2(measurement, interp_params, ProjectionFactorRollingShutter factor2(measurement, interp_params, model,
model, poseKey1, poseKey1, pointKey, K); poseKey1, poseKey1, pointKey, K);
CHECK(!assert_equal(factor1, factor2)); // not equal CHECK(!assert_equal(factor1, factor2)); // not equal
} }
{ // factors are NOT equal (different interpolation) { // factors are NOT equal (different interpolation)
ProjectionFactorRollingShutter factor1(measurement, 0.1, ProjectionFactorRollingShutter factor1(measurement, 0.1, model, poseKey1,
model, poseKey1, poseKey1, pointKey, K); poseKey1, pointKey, K);
ProjectionFactorRollingShutter factor2(measurement, 0.5, ProjectionFactorRollingShutter factor2(measurement, 0.5, model, poseKey1,
model, poseKey1, poseKey2, pointKey, K); poseKey2, pointKey, K);
CHECK(!assert_equal(factor1, factor2)); // not equal CHECK(!assert_equal(factor1, factor2)); // not equal
} }
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( ProjectionFactorRollingShutter, EqualsWithTransform ) { TEST(ProjectionFactorRollingShutter, EqualsWithTransform) {
{ // factors are equal { // factors are equal
ProjectionFactorRollingShutter factor1(measurement, interp_params, model, ProjectionFactorRollingShutter factor1(measurement, interp_params, model,
poseKey1, poseKey2, pointKey, K, body_P_sensor); poseKey1, poseKey2, pointKey, K,
body_P_sensor);
ProjectionFactorRollingShutter factor2(measurement, interp_params, model, ProjectionFactorRollingShutter factor2(measurement, interp_params, model,
poseKey1, poseKey2, pointKey, K, body_P_sensor); poseKey1, poseKey2, pointKey, K,
body_P_sensor);
CHECK(assert_equal(factor1, factor2)); CHECK(assert_equal(factor1, factor2));
} }
{ // factors are NOT equal { // factors are NOT equal
ProjectionFactorRollingShutter factor1(measurement, interp_params, model, ProjectionFactorRollingShutter factor1(measurement, interp_params, model,
poseKey1, poseKey2, pointKey, K, body_P_sensor); poseKey1, poseKey2, pointKey, K,
Pose3 body_P_sensor2(Rot3::RzRyRx(0.0, 0.0, 0.0), Point3(0.25, -0.10, 1.0)); // rotation different from body_P_sensor body_P_sensor);
Pose3 body_P_sensor2(
Rot3::RzRyRx(0.0, 0.0, 0.0),
Point3(0.25, -0.10, 1.0)); // rotation different from body_P_sensor
ProjectionFactorRollingShutter factor2(measurement, interp_params, model, ProjectionFactorRollingShutter factor2(measurement, interp_params, model,
poseKey1, poseKey2, pointKey, K, body_P_sensor2); poseKey1, poseKey2, pointKey, K,
body_P_sensor2);
CHECK(!assert_equal(factor1, factor2)); CHECK(!assert_equal(factor1, factor2));
} }
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( ProjectionFactorRollingShutter, Error ) { TEST(ProjectionFactorRollingShutter, Error) {
{ {
// Create the factor with a measurement that is 3 pixels off in x // Create the factor with a measurement that is 3 pixels off in x
// Camera pose corresponds to the first camera // Camera pose corresponds to the first camera
double t = 0.0; double t = 0.0;
ProjectionFactorRollingShutter factor(measurement, t, model, poseKey1, poseKey2, pointKey, K); ProjectionFactorRollingShutter factor(measurement, t, model, poseKey1,
poseKey2, pointKey, K);
// Set the linearization point // Set the linearization point
Pose3 pose1(Rot3(), Point3(0,0,-6)); Pose3 pose1(Rot3(), Point3(0, 0, -6));
Pose3 pose2(Rot3(), Point3(0,0,-4)); Pose3 pose2(Rot3(), Point3(0, 0, -4));
Point3 point(0.0, 0.0, 0.0); Point3 point(0.0, 0.0, 0.0);
// Use the factor to calculate the error // Use the factor to calculate the error
@ -134,11 +143,12 @@ TEST( ProjectionFactorRollingShutter, Error ) {
// Create the factor with a measurement that is 3 pixels off in x // Create the factor with a measurement that is 3 pixels off in x
// Camera pose is actually interpolated now // Camera pose is actually interpolated now
double t = 0.5; double t = 0.5;
ProjectionFactorRollingShutter factor(measurement, t, model, poseKey1, poseKey2, pointKey, K); ProjectionFactorRollingShutter factor(measurement, t, model, poseKey1,
poseKey2, pointKey, K);
// Set the linearization point // Set the linearization point
Pose3 pose1(Rot3(), Point3(0,0,-8)); Pose3 pose1(Rot3(), Point3(0, 0, -8));
Pose3 pose2(Rot3(), Point3(0,0,-4)); Pose3 pose2(Rot3(), Point3(0, 0, -4));
Point3 point(0.0, 0.0, 0.0); Point3 point(0.0, 0.0, 0.0);
// Use the factor to calculate the error // Use the factor to calculate the error
@ -153,15 +163,16 @@ TEST( ProjectionFactorRollingShutter, Error ) {
{ {
// Create measurement by projecting 3D landmark // Create measurement by projecting 3D landmark
double t = 0.3; double t = 0.3;
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0,0,0)); Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0,0,1)); Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t); Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
PinholeCamera<Cal3_S2> camera(poseInterp, *K); PinholeCamera<Cal3_S2> camera(poseInterp, *K);
Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera
Point2 measured = camera.project(point); Point2 measured = camera.project(point);
// create factor // create factor
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2, pointKey, K); ProjectionFactorRollingShutter factor(measured, t, model, poseKey1,
poseKey2, pointKey, K);
// Use the factor to calculate the error // Use the factor to calculate the error
Vector actualError(factor.evaluateError(pose1, pose2, point)); Vector actualError(factor.evaluateError(pose1, pose2, point));
@ -175,19 +186,20 @@ TEST( ProjectionFactorRollingShutter, Error ) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( ProjectionFactorRollingShutter, ErrorWithTransform ) { TEST(ProjectionFactorRollingShutter, ErrorWithTransform) {
// Create measurement by projecting 3D landmark // Create measurement by projecting 3D landmark
double t = 0.3; double t = 0.3;
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0,0,0)); Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0,0,1)); Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t); Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
Pose3 body_P_sensor3(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0,0.2,0.1)); Pose3 body_P_sensor3(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0.2, 0.1));
PinholeCamera<Cal3_S2> camera(poseInterp*body_P_sensor3, *K); PinholeCamera<Cal3_S2> camera(poseInterp * body_P_sensor3, *K);
Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera
Point2 measured = camera.project(point); Point2 measured = camera.project(point);
// create factor // create factor
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2, pointKey, K, body_P_sensor3); ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2,
pointKey, K, body_P_sensor3);
// Use the factor to calculate the error // Use the factor to calculate the error
Vector actualError(factor.evaluateError(pose1, pose2, point)); Vector actualError(factor.evaluateError(pose1, pose2, point));
@ -200,18 +212,19 @@ TEST( ProjectionFactorRollingShutter, ErrorWithTransform ) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( ProjectionFactorRollingShutter, Jacobian ) { TEST(ProjectionFactorRollingShutter, Jacobian) {
// Create measurement by projecting 3D landmark // Create measurement by projecting 3D landmark
double t = 0.3; double t = 0.3;
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0,0,0)); Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0,0,1)); Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t); Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
PinholeCamera<Cal3_S2> camera(poseInterp, *K); PinholeCamera<Cal3_S2> camera(poseInterp, *K);
Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera
Point2 measured = camera.project(point); Point2 measured = camera.project(point);
// create factor // create factor
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2, pointKey, K); ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2,
pointKey, K);
// Use the factor to calculate the Jacobians // Use the factor to calculate the Jacobians
Matrix H1Actual, H2Actual, H3Actual; Matrix H1Actual, H2Actual, H3Actual;
@ -222,22 +235,25 @@ TEST( ProjectionFactorRollingShutter, Jacobian ) {
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>( std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor, std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, boost::none, boost::none, boost::none)), std::placeholders::_3, boost::none, boost::none,
pose1, pose2, point); boost::none)),
pose1, pose2, point);
Matrix H2Expected = numericalDerivative32<Vector, Pose3, Pose3, Point3>( Matrix H2Expected = numericalDerivative32<Vector, Pose3, Pose3, Point3>(
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>( std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor, std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, boost::none, boost::none, boost::none)), std::placeholders::_3, boost::none, boost::none,
pose1, pose2, point); boost::none)),
pose1, pose2, point);
Matrix H3Expected = numericalDerivative33<Vector, Pose3, Pose3, Point3>( Matrix H3Expected = numericalDerivative33<Vector, Pose3, Pose3, Point3>(
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>( std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor, std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, boost::none, boost::none, boost::none)), std::placeholders::_3, boost::none, boost::none,
pose1, pose2, point); boost::none)),
pose1, pose2, point);
CHECK(assert_equal(H1Expected, H1Actual, 1e-5)); CHECK(assert_equal(H1Expected, H1Actual, 1e-5));
CHECK(assert_equal(H2Expected, H2Actual, 1e-5)); CHECK(assert_equal(H2Expected, H2Actual, 1e-5));
@ -245,19 +261,20 @@ TEST( ProjectionFactorRollingShutter, Jacobian ) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( ProjectionFactorRollingShutter, JacobianWithTransform ) { TEST(ProjectionFactorRollingShutter, JacobianWithTransform) {
// Create measurement by projecting 3D landmark // Create measurement by projecting 3D landmark
double t = 0.6; double t = 0.6;
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0,0,0)); Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0,0,1)); Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t); Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
Pose3 body_P_sensor3(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0,0.2,0.1)); Pose3 body_P_sensor3(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0.2, 0.1));
PinholeCamera<Cal3_S2> camera(poseInterp*body_P_sensor3, *K); PinholeCamera<Cal3_S2> camera(poseInterp * body_P_sensor3, *K);
Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera
Point2 measured = camera.project(point); Point2 measured = camera.project(point);
// create factor // create factor
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2, pointKey, K, body_P_sensor3); ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2,
pointKey, K, body_P_sensor3);
// Use the factor to calculate the Jacobians // Use the factor to calculate the Jacobians
Matrix H1Actual, H2Actual, H3Actual; Matrix H1Actual, H2Actual, H3Actual;
@ -268,22 +285,25 @@ TEST( ProjectionFactorRollingShutter, JacobianWithTransform ) {
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>( std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor, std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, boost::none, boost::none, boost::none)), std::placeholders::_3, boost::none, boost::none,
pose1, pose2, point); boost::none)),
pose1, pose2, point);
Matrix H2Expected = numericalDerivative32<Vector, Pose3, Pose3, Point3>( Matrix H2Expected = numericalDerivative32<Vector, Pose3, Pose3, Point3>(
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>( std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor, std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, boost::none, boost::none, boost::none)), std::placeholders::_3, boost::none, boost::none,
pose1, pose2, point); boost::none)),
pose1, pose2, point);
Matrix H3Expected = numericalDerivative33<Vector, Pose3, Pose3, Point3>( Matrix H3Expected = numericalDerivative33<Vector, Pose3, Pose3, Point3>(
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>( std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor, std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, boost::none, boost::none, boost::none)), std::placeholders::_3, boost::none, boost::none,
pose1, pose2, point); boost::none)),
pose1, pose2, point);
CHECK(assert_equal(H1Expected, H1Actual, 1e-5)); CHECK(assert_equal(H1Expected, H1Actual, 1e-5));
CHECK(assert_equal(H2Expected, H2Actual, 1e-5)); CHECK(assert_equal(H2Expected, H2Actual, 1e-5));
@ -291,41 +311,48 @@ TEST( ProjectionFactorRollingShutter, JacobianWithTransform ) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( ProjectionFactorRollingShutter, cheirality ) { TEST(ProjectionFactorRollingShutter, cheirality) {
// Create measurement by projecting 3D landmark behind camera // Create measurement by projecting 3D landmark behind camera
double t = 0.3; double t = 0.3;
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0,0,0)); Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0,0,1)); Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t); Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
PinholeCamera<Cal3_S2> camera(poseInterp, *K); PinholeCamera<Cal3_S2> camera(poseInterp, *K);
Point3 point(0.0, 0.0, -5.0); // 5 meters behind the camera Point3 point(0.0, 0.0, -5.0); // 5 meters behind the camera
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
Point2 measured = Point2(0.0,0.0); // project would throw an exception Point2 measured = Point2(0.0, 0.0); // project would throw an exception
{ // check that exception is thrown if we set throwCheirality = true { // check that exception is thrown if we set throwCheirality = true
bool throwCheirality = true; bool throwCheirality = true;
bool verboseCheirality = true; bool verboseCheirality = true;
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2, pointKey, K, throwCheirality, verboseCheirality); ProjectionFactorRollingShutter factor(measured, t, model, poseKey1,
poseKey2, pointKey, K,
throwCheirality, verboseCheirality);
CHECK_EXCEPTION(factor.evaluateError(pose1, pose2, point), CHECK_EXCEPTION(factor.evaluateError(pose1, pose2, point),
CheiralityException); CheiralityException);
} }
{ // check that exception is NOT thrown if we set throwCheirality = false, and outputs are correct { // check that exception is NOT thrown if we set throwCheirality = false,
bool throwCheirality = false; // default // and outputs are correct
bool verboseCheirality = false; // default bool throwCheirality = false; // default
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2, pointKey, K, throwCheirality, verboseCheirality); bool verboseCheirality = false; // default
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1,
poseKey2, pointKey, K,
throwCheirality, verboseCheirality);
// Use the factor to calculate the error // Use the factor to calculate the error
Matrix H1Actual, H2Actual, H3Actual; Matrix H1Actual, H2Actual, H3Actual;
Vector actualError(factor.evaluateError(pose1, pose2, point, H1Actual, H2Actual, H3Actual)); Vector actualError(factor.evaluateError(pose1, pose2, point, H1Actual,
H2Actual, H3Actual));
// The expected error is zero // The expected error is zero
Vector expectedError = Vector2::Constant(2.0 * K->fx()); // this is what we return when point is behind camera Vector expectedError = Vector2::Constant(
2.0 * K->fx()); // this is what we return when point is behind camera
// Verify we get the expected error // Verify we get the expected error
CHECK(assert_equal(expectedError, actualError, 1e-9)); CHECK(assert_equal(expectedError, actualError, 1e-9));
CHECK(assert_equal(Matrix::Zero(2,6), H1Actual, 1e-5)); CHECK(assert_equal(Matrix::Zero(2, 6), H1Actual, 1e-5));
CHECK(assert_equal(Matrix::Zero(2,6), H2Actual, 1e-5)); CHECK(assert_equal(Matrix::Zero(2, 6), H2Actual, 1e-5));
CHECK(assert_equal(Matrix::Zero(2,3), H3Actual, 1e-5)); CHECK(assert_equal(Matrix::Zero(2, 3), H3Actual, 1e-5));
} }
#else #else
{ {
@ -333,7 +360,8 @@ TEST( ProjectionFactorRollingShutter, cheirality ) {
Point2 measured = camera.project(point); Point2 measured = camera.project(point);
// create factor // create factor
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2, pointKey, K); ProjectionFactorRollingShutter factor(measured, t, model, poseKey1,
poseKey2, pointKey, K);
// Use the factor to calculate the Jacobians // Use the factor to calculate the Jacobians
Matrix H1Actual, H2Actual, H3Actual; Matrix H1Actual, H2Actual, H3Actual;
@ -344,22 +372,25 @@ TEST( ProjectionFactorRollingShutter, cheirality ) {
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>( std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor, std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, boost::none, boost::none, boost::none)), std::placeholders::_3, boost::none, boost::none,
pose1, pose2, point); boost::none)),
pose1, pose2, point);
Matrix H2Expected = numericalDerivative32<Vector, Pose3, Pose3, Point3>( Matrix H2Expected = numericalDerivative32<Vector, Pose3, Pose3, Point3>(
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>( std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor, std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, boost::none, boost::none, boost::none)), std::placeholders::_3, boost::none, boost::none,
pose1, pose2, point); boost::none)),
pose1, pose2, point);
Matrix H3Expected = numericalDerivative33<Vector, Pose3, Pose3, Point3>( Matrix H3Expected = numericalDerivative33<Vector, Pose3, Pose3, Point3>(
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>( std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor, std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, boost::none, boost::none, boost::none)), std::placeholders::_3, boost::none, boost::none,
pose1, pose2, point); boost::none)),
pose1, pose2, point);
CHECK(assert_equal(H1Expected, H1Actual, 1e-5)); CHECK(assert_equal(H1Expected, H1Actual, 1e-5));
CHECK(assert_equal(H2Expected, H2Actual, 1e-5)); CHECK(assert_equal(H2Expected, H2Actual, 1e-5));
@ -368,8 +399,9 @@ TEST( ProjectionFactorRollingShutter, cheirality ) {
#endif #endif
} }
/* ************************************************************************* */ /* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); } int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -43,6 +43,7 @@ set(ignore
gtsam::BetweenFactorPose2s gtsam::BetweenFactorPose2s
gtsam::BetweenFactorPose3s gtsam::BetweenFactorPose3s
gtsam::Point2Vector gtsam::Point2Vector
gtsam::Point2Pairs
gtsam::Point3Pairs gtsam::Point3Pairs
gtsam::Pose3Pairs gtsam::Pose3Pairs
gtsam::Pose3Vector gtsam::Pose3Vector
@ -61,10 +62,13 @@ set(interface_headers
${PROJECT_SOURCE_DIR}/gtsam/slam/slam.i ${PROJECT_SOURCE_DIR}/gtsam/slam/slam.i
${PROJECT_SOURCE_DIR}/gtsam/sfm/sfm.i ${PROJECT_SOURCE_DIR}/gtsam/sfm/sfm.i
${PROJECT_SOURCE_DIR}/gtsam/navigation/navigation.i ${PROJECT_SOURCE_DIR}/gtsam/navigation/navigation.i
${PROJECT_SOURCE_DIR}/gtsam/basis/basis.i
) )
set(GTSAM_PYTHON_TARGET gtsam_py)
set(GTSAM_PYTHON_UNSTABLE_TARGET gtsam_unstable_py)
pybind_wrap(gtsam_py # target pybind_wrap(${GTSAM_PYTHON_TARGET} # target
"${interface_headers}" # interface_headers "${interface_headers}" # interface_headers
"gtsam.cpp" # generated_cpp "gtsam.cpp" # generated_cpp
"gtsam" # module_name "gtsam" # module_name
@ -76,7 +80,7 @@ pybind_wrap(gtsam_py # target
ON # use_boost ON # use_boost
) )
set_target_properties(gtsam_py PROPERTIES set_target_properties(${GTSAM_PYTHON_TARGET} PROPERTIES
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
INSTALL_RPATH_USE_LINK_PATH TRUE INSTALL_RPATH_USE_LINK_PATH TRUE
OUTPUT_NAME "gtsam" OUTPUT_NAME "gtsam"
@ -88,7 +92,7 @@ set_target_properties(gtsam_py PROPERTIES
set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam) set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam)
# Symlink all tests .py files to build folder. # Symlink all tests .py files to build folder.
create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam" copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
"${GTSAM_MODULE_PATH}") "${GTSAM_MODULE_PATH}")
# Common directory for data/datasets stored with the package. # Common directory for data/datasets stored with the package.
@ -96,7 +100,7 @@ create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}") file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}")
# Add gtsam as a dependency to the install target # Add gtsam as a dependency to the install target
set(GTSAM_PYTHON_DEPENDENCIES gtsam_py) set(GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_TARGET})
if(GTSAM_UNSTABLE_BUILD_PYTHON) if(GTSAM_UNSTABLE_BUILD_PYTHON)
@ -120,7 +124,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
gtsam::CameraSetCal3Fisheye gtsam::CameraSetCal3Fisheye
gtsam::KeyPairDoubleMap) gtsam::KeyPairDoubleMap)
pybind_wrap(gtsam_unstable_py # target pybind_wrap(${GTSAM_PYTHON_UNSTABLE_TARGET} # target
${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header
"gtsam_unstable.cpp" # generated_cpp "gtsam_unstable.cpp" # generated_cpp
"gtsam_unstable" # module_name "gtsam_unstable" # module_name
@ -132,7 +136,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
ON # use_boost ON # use_boost
) )
set_target_properties(gtsam_unstable_py PROPERTIES set_target_properties(${GTSAM_PYTHON_UNSTABLE_TARGET} PROPERTIES
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
INSTALL_RPATH_USE_LINK_PATH TRUE INSTALL_RPATH_USE_LINK_PATH TRUE
OUTPUT_NAME "gtsam_unstable" OUTPUT_NAME "gtsam_unstable"
@ -144,11 +148,11 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable) set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable)
# Symlink all tests .py files to build folder. # Symlink all tests .py files to build folder.
create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable" copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable"
"${GTSAM_UNSTABLE_MODULE_PATH}") "${GTSAM_UNSTABLE_MODULE_PATH}")
# Add gtsam_unstable to the install target # Add gtsam_unstable to the install target
list(APPEND GTSAM_PYTHON_DEPENDENCIES gtsam_unstable_py) list(APPEND GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_UNSTABLE_TARGET})
endif() endif()
@ -165,5 +169,6 @@ add_custom_target(
COMMAND COMMAND
${CMAKE_COMMAND} -E env # add package to python path so no need to install ${CMAKE_COMMAND} -E env # add package to python path so no need to install
"PYTHONPATH=${GTSAM_PYTHON_BUILD_DIRECTORY}/$ENV{PYTHONPATH}" "PYTHONPATH=${GTSAM_PYTHON_BUILD_DIRECTORY}/$ENV{PYTHONPATH}"
${PYTHON_EXECUTABLE} -m unittest discover -s "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam/tests" ${PYTHON_EXECUTABLE} -m unittest discover -v -s .
DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}) DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}
WORKING_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam/tests")

View File

@ -1,6 +1,12 @@
from . import utils """Module definition file for GTSAM"""
from .gtsam import *
from .utils import findExampleDataFile # pylint: disable=import-outside-toplevel, global-variable-not-assigned, possibly-unused-variable, import-error, import-self
import sys
from gtsam import gtsam, utils
from gtsam.gtsam import *
from gtsam.utils import findExampleDataFile
def _init(): def _init():
@ -13,7 +19,7 @@ def _init():
def Point2(x=np.nan, y=np.nan): def Point2(x=np.nan, y=np.nan):
"""Shim for the deleted Point2 type.""" """Shim for the deleted Point2 type."""
if isinstance(x, np.ndarray): if isinstance(x, np.ndarray):
assert x.shape == (2,), "Point2 takes 2-vector" assert x.shape == (2, ), "Point2 takes 2-vector"
return x # "copy constructor" return x # "copy constructor"
return np.array([x, y], dtype=float) return np.array([x, y], dtype=float)
@ -22,7 +28,7 @@ def _init():
def Point3(x=np.nan, y=np.nan, z=np.nan): def Point3(x=np.nan, y=np.nan, z=np.nan):
"""Shim for the deleted Point3 type.""" """Shim for the deleted Point3 type."""
if isinstance(x, np.ndarray): if isinstance(x, np.ndarray):
assert x.shape == (3,), "Point3 takes 3-vector" assert x.shape == (3, ), "Point3 takes 3-vector"
return x # "copy constructor" return x # "copy constructor"
return np.array([x, y, z], dtype=float) return np.array([x, y, z], dtype=float)

View File

@ -9,15 +9,17 @@ CustomFactor demo that simulates a 1-D sensor fusion task.
Author: Fan Jiang, Frank Dellaert Author: Fan Jiang, Frank Dellaert
""" """
from functools import partial
from typing import List, Optional
import gtsam import gtsam
import numpy as np import numpy as np
from typing import List, Optional I = np.eye(1)
from functools import partial
def simulate_car(): def simulate_car() -> List[float]:
# Simulate a car for one second """Simulate a car for one second"""
x0 = 0 x0 = 0
dt = 0.25 # 4 Hz, typical GPS dt = 0.25 # 4 Hz, typical GPS
v = 144 * 1000 / 3600 # 144 km/hour = 90mph, pretty fast v = 144 * 1000 / 3600 # 144 km/hour = 90mph, pretty fast
@ -26,46 +28,9 @@ def simulate_car():
return x return x
x = simulate_car() def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor,
print(f"Simulated car trajectory: {x}") values: gtsam.Values,
jacobians: Optional[List[np.ndarray]]) -> float:
# %%
add_noise = True # set this to False to run with "perfect" measurements
# GPS measurements
sigma_gps = 3.0 # assume GPS is +/- 3m
g = [x[k] + (np.random.normal(scale=sigma_gps) if add_noise else 0)
for k in range(5)]
# Odometry measurements
sigma_odo = 0.1 # assume Odometry is 10cm accurate at 4Hz
o = [x[k + 1] - x[k] + (np.random.normal(scale=sigma_odo) if add_noise else 0)
for k in range(4)]
# Landmark measurements:
sigma_lm = 1 # assume landmark measurement is accurate up to 1m
# Assume first landmark is at x=5, we measure it at time k=0
lm_0 = 5.0
z_0 = x[0] - lm_0 + (np.random.normal(scale=sigma_lm) if add_noise else 0)
# Assume other landmark is at x=28, we measure it at time k=3
lm_3 = 28.0
z_3 = x[3] - lm_3 + (np.random.normal(scale=sigma_lm) if add_noise else 0)
unknown = [gtsam.symbol('x', k) for k in range(5)]
print("unknowns = ", list(map(gtsam.DefaultKeyFormatter, unknown)))
# We now can use nonlinear factor graphs
factor_graph = gtsam.NonlinearFactorGraph()
# Add factors for GPS measurements
I = np.eye(1)
gps_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_gps)
def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobians: Optional[List[np.ndarray]]):
"""GPS Factor error function """GPS Factor error function
:param measurement: GPS measurement, to be filled with `partial` :param measurement: GPS measurement, to be filled with `partial`
:param this: gtsam.CustomFactor handle :param this: gtsam.CustomFactor handle
@ -82,36 +47,9 @@ def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobia
return error return error
# Add the GPS factors def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor,
for k in range(5): values: gtsam.Values,
gf = gtsam.CustomFactor(gps_model, [unknown[k]], partial(error_gps, np.array([g[k]]))) jacobians: Optional[List[np.ndarray]]) -> float:
factor_graph.add(gf)
# New Values container
v = gtsam.Values()
# Add initial estimates to the Values container
for i in range(5):
v.insert(unknown[i], np.array([0.0]))
# Initialize optimizer
params = gtsam.GaussNewtonParams()
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
# Optimize the factor graph
result = optimizer.optimize()
# calculate the error from ground truth
error = np.array([(result.atVector(unknown[k]) - x[k])[0] for k in range(5)])
print("Result with only GPS")
print(result, np.round(error, 2), f"\nJ(X)={0.5 * np.sum(np.square(error))}")
# Adding odometry will improve things a lot
odo_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_odo)
def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobians: Optional[List[np.ndarray]]):
"""Odometry Factor error function """Odometry Factor error function
:param measurement: Odometry measurement, to be filled with `partial` :param measurement: Odometry measurement, to be filled with `partial`
:param this: gtsam.CustomFactor handle :param this: gtsam.CustomFactor handle
@ -130,25 +68,9 @@ def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobi
return error return error
for k in range(4): def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor,
odof = gtsam.CustomFactor(odo_model, [unknown[k], unknown[k + 1]], partial(error_odom, np.array([o[k]]))) values: gtsam.Values,
factor_graph.add(odof) jacobians: Optional[List[np.ndarray]]) -> float:
params = gtsam.GaussNewtonParams()
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
result = optimizer.optimize()
error = np.array([(result.atVector(unknown[k]) - x[k])[0] for k in range(5)])
print("Result with GPS+Odometry")
print(result, np.round(error, 2), f"\nJ(X)={0.5 * np.sum(np.square(error))}")
# This is great, but GPS noise is still apparent, so now we add the two landmarks
lm_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_lm)
def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobians: Optional[List[np.ndarray]]):
"""Landmark Factor error function """Landmark Factor error function
:param measurement: Landmark measurement, to be filled with `partial` :param measurement: Landmark measurement, to be filled with `partial`
:param this: gtsam.CustomFactor handle :param this: gtsam.CustomFactor handle
@ -165,15 +87,120 @@ def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor, values, jacobian
return error return error
factor_graph.add(gtsam.CustomFactor(lm_model, [unknown[0]], partial(error_lm, np.array([lm_0 + z_0])))) def main():
factor_graph.add(gtsam.CustomFactor(lm_model, [unknown[3]], partial(error_lm, np.array([lm_3 + z_3])))) """Main runner."""
params = gtsam.GaussNewtonParams() x = simulate_car()
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params) print(f"Simulated car trajectory: {x}")
result = optimizer.optimize() add_noise = True # set this to False to run with "perfect" measurements
error = np.array([(result.atVector(unknown[k]) - x[k])[0] for k in range(5)]) # GPS measurements
sigma_gps = 3.0 # assume GPS is +/- 3m
g = [
x[k] + (np.random.normal(scale=sigma_gps) if add_noise else 0)
for k in range(5)
]
print("Result with GPS+Odometry+Landmark") # Odometry measurements
print(result, np.round(error, 2), f"\nJ(X)={0.5 * np.sum(np.square(error))}") sigma_odo = 0.1 # assume Odometry is 10cm accurate at 4Hz
o = [
x[k + 1] - x[k] +
(np.random.normal(scale=sigma_odo) if add_noise else 0)
for k in range(4)
]
# Landmark measurements:
sigma_lm = 1 # assume landmark measurement is accurate up to 1m
# Assume first landmark is at x=5, we measure it at time k=0
lm_0 = 5.0
z_0 = x[0] - lm_0 + (np.random.normal(scale=sigma_lm) if add_noise else 0)
# Assume other landmark is at x=28, we measure it at time k=3
lm_3 = 28.0
z_3 = x[3] - lm_3 + (np.random.normal(scale=sigma_lm) if add_noise else 0)
unknown = [gtsam.symbol('x', k) for k in range(5)]
print("unknowns = ", list(map(gtsam.DefaultKeyFormatter, unknown)))
# We now can use nonlinear factor graphs
factor_graph = gtsam.NonlinearFactorGraph()
# Add factors for GPS measurements
gps_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_gps)
# Add the GPS factors
for k in range(5):
gf = gtsam.CustomFactor(gps_model, [unknown[k]],
partial(error_gps, np.array([g[k]])))
factor_graph.add(gf)
# New Values container
v = gtsam.Values()
# Add initial estimates to the Values container
for i in range(5):
v.insert(unknown[i], np.array([0.0]))
# Initialize optimizer
params = gtsam.GaussNewtonParams()
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
# Optimize the factor graph
result = optimizer.optimize()
# calculate the error from ground truth
error = np.array([(result.atVector(unknown[k]) - x[k])[0]
for k in range(5)])
print("Result with only GPS")
print(result, np.round(error, 2),
f"\nJ(X)={0.5 * np.sum(np.square(error))}")
# Adding odometry will improve things a lot
odo_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_odo)
for k in range(4):
odof = gtsam.CustomFactor(odo_model, [unknown[k], unknown[k + 1]],
partial(error_odom, np.array([o[k]])))
factor_graph.add(odof)
params = gtsam.GaussNewtonParams()
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
result = optimizer.optimize()
error = np.array([(result.atVector(unknown[k]) - x[k])[0]
for k in range(5)])
print("Result with GPS+Odometry")
print(result, np.round(error, 2),
f"\nJ(X)={0.5 * np.sum(np.square(error))}")
# This is great, but GPS noise is still apparent, so now we add the two landmarks
lm_model = gtsam.noiseModel.Isotropic.Sigma(1, sigma_lm)
factor_graph.add(
gtsam.CustomFactor(lm_model, [unknown[0]],
partial(error_lm, np.array([lm_0 + z_0]))))
factor_graph.add(
gtsam.CustomFactor(lm_model, [unknown[3]],
partial(error_lm, np.array([lm_3 + z_3]))))
params = gtsam.GaussNewtonParams()
optimizer = gtsam.GaussNewtonOptimizer(factor_graph, v, params)
result = optimizer.optimize()
error = np.array([(result.atVector(unknown[k]) - x[k])[0]
for k in range(5)])
print("Result with GPS+Odometry+Landmark")
print(result, np.round(error, 2),
f"\nJ(X)={0.5 * np.sum(np.square(error))}")
if __name__ == "__main__":
main()

View File

@ -13,13 +13,8 @@ Author: Mandy Xie
from __future__ import print_function from __future__ import print_function
import numpy as np
import gtsam import gtsam
import matplotlib.pyplot as plt
import gtsam.utils.plot as gtsam_plot
# ENU Origin is where the plane was in hold next to runway # ENU Origin is where the plane was in hold next to runway
lat0 = 33.86998 lat0 = 33.86998
lon0 = -84.30626 lon0 = -84.30626
@ -29,28 +24,34 @@ h0 = 274
GPS_NOISE = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) GPS_NOISE = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
PRIOR_NOISE = gtsam.noiseModel.Isotropic.Sigma(6, 0.25) PRIOR_NOISE = gtsam.noiseModel.Isotropic.Sigma(6, 0.25)
# Create an empty nonlinear factor graph
graph = gtsam.NonlinearFactorGraph()
# Add a prior on the first point, setting it to the origin def main():
# A prior factor consists of a mean and a noise model (covariance matrix) """Main runner."""
priorMean = gtsam.Pose3() # prior at origin # Create an empty nonlinear factor graph
graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE)) graph = gtsam.NonlinearFactorGraph()
# Add GPS factors # Add a prior on the first point, setting it to the origin
gps = gtsam.Point3(lat0, lon0, h0) # A prior factor consists of a mean and a noise model (covariance matrix)
graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE)) priorMean = gtsam.Pose3() # prior at origin
print("\nFactor Graph:\n{}".format(graph)) graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE))
# Create the data structure to hold the initialEstimate estimate to the solution # Add GPS factors
# For illustrative purposes, these have been deliberately set to incorrect values gps = gtsam.Point3(lat0, lon0, h0)
initial = gtsam.Values() graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE))
initial.insert(1, gtsam.Pose3()) print("\nFactor Graph:\n{}".format(graph))
print("\nInitial Estimate:\n{}".format(initial))
# optimize using Levenberg-Marquardt optimization # Create the data structure to hold the initialEstimate estimate to the solution
params = gtsam.LevenbergMarquardtParams() # For illustrative purposes, these have been deliberately set to incorrect values
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) initial = gtsam.Values()
result = optimizer.optimize() initial.insert(1, gtsam.Pose3())
print("\nFinal Result:\n{}".format(result)) print("\nInitial Estimate:\n{}".format(initial))
# optimize using Levenberg-Marquardt optimization
params = gtsam.LevenbergMarquardtParams()
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = optimizer.optimize()
print("\nFinal Result:\n{}".format(result))
if __name__ == "__main__":
main()

View File

@ -0,0 +1,366 @@
"""
Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
Author: Varun Agrawal
"""
import argparse
from typing import List, Tuple
import gtsam
import numpy as np
from gtsam import ISAM2, Pose3, noiseModel
from gtsam.symbol_shorthand import B, V, X
GRAVITY = 9.8
class KittiCalibration:
"""Class to hold KITTI calibration info."""
def __init__(self, body_ptx: float, body_pty: float, body_ptz: float,
body_prx: float, body_pry: float, body_prz: float,
accelerometer_sigma: float, gyroscope_sigma: float,
integration_sigma: float, accelerometer_bias_sigma: float,
gyroscope_bias_sigma: float, average_delta_t: float):
self.bodyTimu = Pose3(gtsam.Rot3.RzRyRx(body_prx, body_pry, body_prz),
gtsam.Point3(body_ptx, body_pty, body_ptz))
self.accelerometer_sigma = accelerometer_sigma
self.gyroscope_sigma = gyroscope_sigma
self.integration_sigma = integration_sigma
self.accelerometer_bias_sigma = accelerometer_bias_sigma
self.gyroscope_bias_sigma = gyroscope_bias_sigma
self.average_delta_t = average_delta_t
class ImuMeasurement:
"""An instance of an IMU measurement."""
def __init__(self, time: float, dt: float, accelerometer: gtsam.Point3,
gyroscope: gtsam.Point3):
self.time = time
self.dt = dt
self.accelerometer = accelerometer
self.gyroscope = gyroscope
class GpsMeasurement:
"""An instance of a GPS measurement."""
def __init__(self, time: float, position: gtsam.Point3):
self.time = time
self.position = position
def loadImuData(imu_data_file: str) -> List[ImuMeasurement]:
"""Helper to load the IMU data."""
# Read IMU data
# Time dt accelX accelY accelZ omegaX omegaY omegaZ
imu_data_file = gtsam.findExampleDataFile(imu_data_file)
imu_measurements = []
print("-- Reading IMU measurements from file")
with open(imu_data_file, encoding='UTF-8') as imu_data:
data = imu_data.readlines()
for i in range(1, len(data)): # ignore the first line
time, dt, acc_x, acc_y, acc_z, gyro_x, gyro_y, gyro_z = map(
float, data[i].split(' '))
imu_measurement = ImuMeasurement(
time, dt, np.asarray([acc_x, acc_y, acc_z]),
np.asarray([gyro_x, gyro_y, gyro_z]))
imu_measurements.append(imu_measurement)
return imu_measurements
def loadGpsData(gps_data_file: str) -> List[GpsMeasurement]:
"""Helper to load the GPS data."""
# Read GPS data
# Time,X,Y,Z
gps_data_file = gtsam.findExampleDataFile(gps_data_file)
gps_measurements = []
print("-- Reading GPS measurements from file")
with open(gps_data_file, encoding='UTF-8') as gps_data:
data = gps_data.readlines()
for i in range(1, len(data)):
time, x, y, z = map(float, data[i].split(','))
gps_measurement = GpsMeasurement(time, np.asarray([x, y, z]))
gps_measurements.append(gps_measurement)
return gps_measurements
def loadKittiData(
imu_data_file: str = "KittiEquivBiasedImu.txt",
gps_data_file: str = "KittiGps_converted.txt",
imu_metadata_file: str = "KittiEquivBiasedImu_metadata.txt"
) -> Tuple[KittiCalibration, List[ImuMeasurement], List[GpsMeasurement]]:
"""
Load the KITTI Dataset.
"""
# Read IMU metadata and compute relative sensor pose transforms
# BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma
# GyroscopeSigma IntegrationSigma AccelerometerBiasSigma GyroscopeBiasSigma
# AverageDeltaT
imu_metadata_file = gtsam.findExampleDataFile(imu_metadata_file)
with open(imu_metadata_file, encoding='UTF-8') as imu_metadata:
print("-- Reading sensor metadata")
line = imu_metadata.readline() # Ignore the first line
line = imu_metadata.readline().strip()
data = list(map(float, line.split(' ')))
kitti_calibration = KittiCalibration(*data)
print("IMU metadata:", data)
imu_measurements = loadImuData(imu_data_file)
gps_measurements = loadGpsData(gps_data_file)
return kitti_calibration, imu_measurements, gps_measurements
def getImuParams(kitti_calibration: KittiCalibration):
"""Get the IMU parameters from the KITTI calibration data."""
w_coriolis = np.zeros(3)
# Set IMU preintegration parameters
measured_acc_cov = np.eye(3) * np.power(
kitti_calibration.accelerometer_sigma, 2)
measured_omega_cov = np.eye(3) * np.power(
kitti_calibration.gyroscope_sigma, 2)
# error committed in integrating position from velocities
integration_error_cov = np.eye(3) * np.power(
kitti_calibration.integration_sigma, 2)
imu_params = gtsam.PreintegrationParams.MakeSharedU(GRAVITY)
# acc white noise in continuous
imu_params.setAccelerometerCovariance(measured_acc_cov)
# integration uncertainty continuous
imu_params.setIntegrationCovariance(integration_error_cov)
# gyro white noise in continuous
imu_params.setGyroscopeCovariance(measured_omega_cov)
imu_params.setOmegaCoriolis(w_coriolis)
return imu_params
def save_results(isam: gtsam.ISAM2, output_filename: str, first_gps_pose: int,
gps_measurements: List[GpsMeasurement]):
"""Write the results from `isam` to `output_filename`."""
# Save results to file
print("Writing results to file...")
with open(output_filename, 'w', encoding='UTF-8') as fp_out:
fp_out.write(
"#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n")
result = isam.calculateEstimate()
for i in range(first_gps_pose, len(gps_measurements)):
pose_key = X(i)
vel_key = V(i)
bias_key = B(i)
pose = result.atPose3(pose_key)
velocity = result.atVector(vel_key)
bias = result.atConstantBias(bias_key)
pose_quat = pose.rotation().toQuaternion()
gps = gps_measurements[i].position
print(f"State at #{i}")
print(f"Pose:\n{pose}")
print(f"Velocity:\n{velocity}")
print(f"Bias:\n{bias}")
fp_out.write("{},{},{},{},{},{},{},{},{},{},{}\n".format(
gps_measurements[i].time, pose.x(), pose.y(), pose.z(),
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
gps[0], gps[1], gps[2]))
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--output_filename",
default="IMUKittiExampleGPSResults.csv")
return parser.parse_args()
def optimize(gps_measurements: List[GpsMeasurement],
imu_measurements: List[ImuMeasurement],
sigma_init_x: gtsam.noiseModel.Diagonal,
sigma_init_v: gtsam.noiseModel.Diagonal,
sigma_init_b: gtsam.noiseModel.Diagonal,
noise_model_gps: gtsam.noiseModel.Diagonal,
kitti_calibration: KittiCalibration, first_gps_pose: int,
gps_skip: int) -> gtsam.ISAM2:
"""Run ISAM2 optimization on the measurements."""
# Set initial conditions for the estimated trajectory
# initial pose is the reference frame (navigation frame)
current_pose_global = Pose3(gtsam.Rot3(),
gps_measurements[first_gps_pose].position)
# the vehicle is stationary at the beginning at position 0,0,0
current_velocity_global = np.zeros(3)
current_bias = gtsam.imuBias.ConstantBias() # init with zero bias
imu_params = getImuParams(kitti_calibration)
# Set ISAM2 parameters and create ISAM2 solver object
isam_params = gtsam.ISAM2Params()
isam_params.setFactorization("CHOLESKY")
isam_params.setRelinearizeSkip(10)
isam = gtsam.ISAM2(isam_params)
# Create the factor graph and values object that will store new factors and
# values to add to the incremental graph
new_factors = gtsam.NonlinearFactorGraph()
# values storing the initial estimates of new nodes in the factor graph
new_values = gtsam.Values()
# Main loop:
# (1) we read the measurements
# (2) we create the corresponding factors in the graph
# (3) we solve the graph to obtain and optimal estimate of robot trajectory
print("-- Starting main loop: inference is performed at each time step, "
"but we plot trajectory every 10 steps")
j = 0
included_imu_measurement_count = 0
for i in range(first_gps_pose, len(gps_measurements)):
# At each non=IMU measurement we initialize a new node in the graph
current_pose_key = X(i)
current_vel_key = V(i)
current_bias_key = B(i)
t = gps_measurements[i].time
if i == first_gps_pose:
# Create initial estimate and prior on initial pose, velocity, and biases
new_values.insert(current_pose_key, current_pose_global)
new_values.insert(current_vel_key, current_velocity_global)
new_values.insert(current_bias_key, current_bias)
new_factors.addPriorPose3(current_pose_key, current_pose_global,
sigma_init_x)
new_factors.addPriorVector(current_vel_key,
current_velocity_global, sigma_init_v)
new_factors.addPriorConstantBias(current_bias_key, current_bias,
sigma_init_b)
else:
t_previous = gps_measurements[i - 1].time
# Summarize IMU data between the previous GPS measurement and now
current_summarized_measurement = gtsam.PreintegratedImuMeasurements(
imu_params, current_bias)
while (j < len(imu_measurements)
and imu_measurements[j].time <= t):
if imu_measurements[j].time >= t_previous:
current_summarized_measurement.integrateMeasurement(
imu_measurements[j].accelerometer,
imu_measurements[j].gyroscope, imu_measurements[j].dt)
included_imu_measurement_count += 1
j += 1
# Create IMU factor
previous_pose_key = X(i - 1)
previous_vel_key = V(i - 1)
previous_bias_key = B(i - 1)
new_factors.push_back(
gtsam.ImuFactor(previous_pose_key, previous_vel_key,
current_pose_key, current_vel_key,
previous_bias_key,
current_summarized_measurement))
# Bias evolution as given in the IMU metadata
sigma_between_b = gtsam.noiseModel.Diagonal.Sigmas(
np.asarray([
np.sqrt(included_imu_measurement_count) *
kitti_calibration.accelerometer_bias_sigma
] * 3 + [
np.sqrt(included_imu_measurement_count) *
kitti_calibration.gyroscope_bias_sigma
] * 3))
new_factors.push_back(
gtsam.BetweenFactorConstantBias(previous_bias_key,
current_bias_key,
gtsam.imuBias.ConstantBias(),
sigma_between_b))
# Create GPS factor
gps_pose = Pose3(current_pose_global.rotation(),
gps_measurements[i].position)
if (i % gps_skip) == 0:
new_factors.addPriorPose3(current_pose_key, gps_pose,
noise_model_gps)
new_values.insert(current_pose_key, gps_pose)
print(f"############ POSE INCLUDED AT TIME {t} ############")
print(gps_pose.translation(), "\n")
else:
new_values.insert(current_pose_key, current_pose_global)
# Add initial values for velocity and bias based on the previous
# estimates
new_values.insert(current_vel_key, current_velocity_global)
new_values.insert(current_bias_key, current_bias)
# Update solver
# =======================================================================
# We accumulate 2*GPSskip GPS measurements before updating the solver at
# first so that the heading becomes observable.
if i > (first_gps_pose + 2 * gps_skip):
print(f"############ NEW FACTORS AT TIME {t:.6f} ############")
new_factors.print()
isam.update(new_factors, new_values)
# Reset the newFactors and newValues list
new_factors.resize(0)
new_values.clear()
# Extract the result/current estimates
result = isam.calculateEstimate()
current_pose_global = result.atPose3(current_pose_key)
current_velocity_global = result.atVector(current_vel_key)
current_bias = result.atConstantBias(current_bias_key)
print(f"############ POSE AT TIME {t} ############")
current_pose_global.print()
print("\n")
return isam
def main():
"""Main runner."""
args = parse_args()
kitti_calibration, imu_measurements, gps_measurements = loadKittiData()
if not kitti_calibration.bodyTimu.equals(Pose3(), 1e-8):
raise ValueError(
"Currently only support IMUinBody is identity, i.e. IMU and body frame are the same"
)
# Configure different variables
first_gps_pose = 1
gps_skip = 10
# Configure noise models
noise_model_gps = noiseModel.Diagonal.Precisions(
np.asarray([0, 0, 0] + [1.0 / 0.07] * 3))
sigma_init_x = noiseModel.Diagonal.Precisions(
np.asarray([0, 0, 0, 1, 1, 1]))
sigma_init_v = noiseModel.Diagonal.Sigmas(np.ones(3) * 1000.0)
sigma_init_b = noiseModel.Diagonal.Sigmas(
np.asarray([0.1] * 3 + [5.00e-05] * 3))
isam = optimize(gps_measurements, imu_measurements, sigma_init_x,
sigma_init_v, sigma_init_b, noise_model_gps,
kitti_calibration, first_gps_pose, gps_skip)
save_results(isam, args.output_filename, first_gps_pose, gps_measurements)
if __name__ == "__main__":
main()

View File

@ -10,6 +10,8 @@ A script validating and demonstrating the ImuFactor inference.
Author: Frank Dellaert, Varun Agrawal Author: Frank Dellaert, Varun Agrawal
""" """
# pylint: disable=no-name-in-module,unused-import,arguments-differ,import-error,wrong-import-order
from __future__ import print_function from __future__ import print_function
import argparse import argparse
@ -25,14 +27,34 @@ from mpl_toolkits.mplot3d import Axes3D
from PreintegrationExample import POSES_FIG, PreintegrationExample from PreintegrationExample import POSES_FIG, PreintegrationExample
BIAS_KEY = B(0) BIAS_KEY = B(0)
GRAVITY = 9.81
np.set_printoptions(precision=3, suppress=True) np.set_printoptions(precision=3, suppress=True)
class ImuFactorExample(PreintegrationExample): def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser("ImuFactorExample.py")
parser.add_argument("--twist_scenario",
default="sick_twist",
choices=("zero_twist", "forward_twist", "loop_twist",
"sick_twist"))
parser.add_argument("--time",
"-T",
default=12,
type=int,
help="Total navigation time in seconds")
parser.add_argument("--compute_covariances",
default=False,
action='store_true')
parser.add_argument("--verbose", default=False, action='store_true')
args = parser.parse_args()
return args
def __init__(self, twist_scenario="sick_twist"):
class ImuFactorExample(PreintegrationExample):
"""Class to run example of the Imu Factor."""
def __init__(self, twist_scenario: str = "sick_twist"):
self.velocity = np.array([2, 0, 0]) self.velocity = np.array([2, 0, 0])
self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1)
self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
@ -42,32 +64,92 @@ class ImuFactorExample(PreintegrationExample):
zero_twist=(np.zeros(3), np.zeros(3)), zero_twist=(np.zeros(3), np.zeros(3)),
forward_twist=(np.zeros(3), self.velocity), forward_twist=(np.zeros(3), self.velocity),
loop_twist=(np.array([0, -math.radians(30), 0]), self.velocity), loop_twist=(np.array([0, -math.radians(30), 0]), self.velocity),
sick_twist=(np.array([math.radians(30), -math.radians(30), 0]), sick_twist=(np.array([math.radians(30), -math.radians(30),
self.velocity) 0]), self.velocity))
)
accBias = np.array([-0.3, 0.1, 0.2]) accBias = np.array([-0.3, 0.1, 0.2])
gyroBias = np.array([0.1, 0.3, -0.1]) gyroBias = np.array([0.1, 0.3, -0.1])
bias = gtsam.imuBias.ConstantBias(accBias, gyroBias) bias = gtsam.imuBias.ConstantBias(accBias, gyroBias)
params = gtsam.PreintegrationParams.MakeSharedU(GRAVITY)
# Some arbitrary noise sigmas
gyro_sigma = 1e-3
accel_sigma = 1e-3
I_3x3 = np.eye(3)
params.setGyroscopeCovariance(gyro_sigma**2 * I_3x3)
params.setAccelerometerCovariance(accel_sigma**2 * I_3x3)
params.setIntegrationCovariance(1e-7**2 * I_3x3)
dt = 1e-2 dt = 1e-2
super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario], super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario],
bias, dt) bias, params, dt)
def addPrior(self, i, graph): def addPrior(self, i: int, graph: gtsam.NonlinearFactorGraph):
"""Add a prior on the navigation state at time `i`."""
state = self.scenario.navState(i) state = self.scenario.navState(i)
graph.push_back(gtsam.PriorFactorPose3( graph.push_back(
X(i), state.pose(), self.priorNoise)) gtsam.PriorFactorPose3(X(i), state.pose(), self.priorNoise))
graph.push_back(gtsam.PriorFactorVector( graph.push_back(
V(i), state.velocity(), self.velNoise)) gtsam.PriorFactorVector(V(i), state.velocity(), self.velNoise))
def run(self, T=12, compute_covariances=False, verbose=True): def optimize(self, graph: gtsam.NonlinearFactorGraph,
initial: gtsam.Values):
"""Optimize using Levenberg-Marquardt optimization."""
params = gtsam.LevenbergMarquardtParams()
params.setVerbosityLM("SUMMARY")
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = optimizer.optimize()
return result
def plot(self,
values: gtsam.Values,
title: str = "Estimated Trajectory",
fignum: int = POSES_FIG + 1,
show: bool = False):
"""
Plot poses in values.
Args:
values: The values object with the poses to plot.
title: The title of the plot.
fignum: The matplotlib figure number.
POSES_FIG is a value from the PreintegrationExample which we simply increment to generate a new figure.
show: Flag indicating whether to display the figure.
"""
i = 0
while values.exists(X(i)):
pose_i = values.atPose3(X(i))
plot_pose3(fignum, pose_i, 1)
i += 1
plt.title(title)
gtsam.utils.plot.set_axes_equal(fignum)
print("Bias Values", values.atConstantBias(BIAS_KEY))
plt.ioff()
if show:
plt.show()
def run(self,
T: int = 12,
compute_covariances: bool = False,
verbose: bool = True):
"""
Main runner.
Args:
T: Total trajectory time.
compute_covariances: Flag indicating whether to compute marginal covariances.
verbose: Flag indicating if printing should be verbose.
"""
graph = gtsam.NonlinearFactorGraph() graph = gtsam.NonlinearFactorGraph()
# initialize data structure for pre-integrated IMU measurements # initialize data structure for pre-integrated IMU measurements
pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias) pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)
T = 12
num_poses = T # assumes 1 factor per second num_poses = T # assumes 1 factor per second
initial = gtsam.Values() initial = gtsam.Values()
initial.insert(BIAS_KEY, self.actualBias) initial.insert(BIAS_KEY, self.actualBias)
@ -91,14 +173,13 @@ class ImuFactorExample(PreintegrationExample):
if k % 10 == 0: if k % 10 == 0:
self.plotImu(t, measuredOmega, measuredAcc) self.plotImu(t, measuredOmega, measuredAcc)
if (k+1) % int(1 / self.dt) == 0: if (k + 1) % int(1 / self.dt) == 0:
# Plot every second # Plot every second
self.plotGroundTruthPose(t, scale=1) self.plotGroundTruthPose(t, scale=1)
plt.title("Ground Truth Trajectory") plt.title("Ground Truth Trajectory")
# create IMU factor every second # create IMU factor every second
factor = gtsam.ImuFactor(X(i), V(i), factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1),
X(i + 1), V(i + 1),
BIAS_KEY, pim) BIAS_KEY, pim)
graph.push_back(factor) graph.push_back(factor)
@ -108,34 +189,34 @@ class ImuFactorExample(PreintegrationExample):
pim.resetIntegration() pim.resetIntegration()
rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3)*0.1) rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3) * 0.1)
translationNoise = gtsam.Point3(*np.random.randn(3)*1) translationNoise = gtsam.Point3(*np.random.randn(3) * 1)
poseNoise = gtsam.Pose3(rotationNoise, translationNoise) poseNoise = gtsam.Pose3(rotationNoise, translationNoise)
actual_state_i = self.scenario.navState(t + self.dt) actual_state_i = self.scenario.navState(t + self.dt)
print("Actual state at {0}:\n{1}".format( print("Actual state at {0}:\n{1}".format(
t+self.dt, actual_state_i)) t + self.dt, actual_state_i))
noisy_state_i = gtsam.NavState( noisy_state_i = gtsam.NavState(
actual_state_i.pose().compose(poseNoise), actual_state_i.pose().compose(poseNoise),
actual_state_i.velocity() + np.random.randn(3)*0.1) actual_state_i.velocity() + np.random.randn(3) * 0.1)
initial.insert(X(i+1), noisy_state_i.pose()) initial.insert(X(i + 1), noisy_state_i.pose())
initial.insert(V(i+1), noisy_state_i.velocity()) initial.insert(V(i + 1), noisy_state_i.velocity())
i += 1 i += 1
# add priors on end # add priors on end
self.addPrior(num_poses - 1, graph) self.addPrior(num_poses - 1, graph)
initial.print_("Initial values:") initial.print("Initial values:")
# optimize using Levenberg-Marquardt optimization result = self.optimize(graph, initial)
params = gtsam.LevenbergMarquardtParams()
params.setVerbosityLM("SUMMARY")
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = optimizer.optimize()
result.print_("Optimized values:") result.print("Optimized values:")
print("------------------")
print(graph.error(initial))
print(graph.error(result))
print("------------------")
if compute_covariances: if compute_covariances:
# Calculate and print marginal covariances # Calculate and print marginal covariances
@ -148,33 +229,12 @@ class ImuFactorExample(PreintegrationExample):
print("Covariance on vel {}:\n{}\n".format( print("Covariance on vel {}:\n{}\n".format(
i, marginals.marginalCovariance(V(i)))) i, marginals.marginalCovariance(V(i))))
# Plot resulting poses self.plot(result, show=True)
i = 0
while result.exists(X(i)):
pose_i = result.atPose3(X(i))
plot_pose3(POSES_FIG+1, pose_i, 1)
i += 1
plt.title("Estimated Trajectory")
gtsam.utils.plot.set_axes_equal(POSES_FIG+1)
print("Bias Values", result.atConstantBias(BIAS_KEY))
plt.ioff()
plt.show()
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser("ImuFactorExample.py") args = parse_args()
parser.add_argument("--twist_scenario",
default="sick_twist",
choices=("zero_twist", "forward_twist", "loop_twist", "sick_twist"))
parser.add_argument("--time", "-T", default=12,
type=int, help="Total time in seconds")
parser.add_argument("--compute_covariances",
default=False, action='store_true')
parser.add_argument("--verbose", default=False, action='store_true')
args = parser.parse_args()
ImuFactorExample(args.twist_scenario).run( ImuFactorExample(args.twist_scenario).run(args.time,
args.time, args.compute_covariances, args.verbose) args.compute_covariances,
args.verbose)

View File

@ -13,57 +13,60 @@ Author: Frank Dellaert
from __future__ import print_function from __future__ import print_function
import numpy as np
import gtsam import gtsam
import matplotlib.pyplot as plt
import gtsam.utils.plot as gtsam_plot import gtsam.utils.plot as gtsam_plot
import matplotlib.pyplot as plt
import numpy as np
# Create noise models # Create noise models
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
# Create an empty nonlinear factor graph
graph = gtsam.NonlinearFactorGraph()
# Add a prior on the first pose, setting it to the origin def main():
# A prior factor consists of a mean and a noise model (covariance matrix) """Main runner"""
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin # Create an empty nonlinear factor graph
graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE)) graph = gtsam.NonlinearFactorGraph()
# Add odometry factors # Add a prior on the first pose, setting it to the origin
odometry = gtsam.Pose2(2.0, 0.0, 0.0) # A prior factor consists of a mean and a noise model (covariance matrix)
# For simplicity, we will use the same noise model for each odometry factor priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin
# Create odometry (Between) factors between consecutive poses graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, ODOMETRY_NOISE))
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, ODOMETRY_NOISE))
print("\nFactor Graph:\n{}".format(graph))
# Create the data structure to hold the initialEstimate estimate to the solution # Add odometry factors
# For illustrative purposes, these have been deliberately set to incorrect values odometry = gtsam.Pose2(2.0, 0.0, 0.0)
initial = gtsam.Values() # For simplicity, we will use the same noise model for each odometry factor
initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) # Create odometry (Between) factors between consecutive poses
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, ODOMETRY_NOISE))
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1)) graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, ODOMETRY_NOISE))
print("\nInitial Estimate:\n{}".format(initial)) print("\nFactor Graph:\n{}".format(graph))
# optimize using Levenberg-Marquardt optimization # Create the data structure to hold the initialEstimate estimate to the solution
params = gtsam.LevenbergMarquardtParams() # For illustrative purposes, these have been deliberately set to incorrect values
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) initial = gtsam.Values()
result = optimizer.optimize() initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
print("\nFinal Result:\n{}".format(result)) initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
print("\nInitial Estimate:\n{}".format(initial))
# 5. Calculate and print marginal covariances for all variables # optimize using Levenberg-Marquardt optimization
marginals = gtsam.Marginals(graph, result) params = gtsam.LevenbergMarquardtParams()
for i in range(1, 4): optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i))) result = optimizer.optimize()
print("\nFinal Result:\n{}".format(result))
fig = plt.figure(0)
for i in range(1, 4):
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i))
plt.axis('equal')
plt.show()
# 5. Calculate and print marginal covariances for all variables
marginals = gtsam.Marginals(graph, result)
for i in range(1, 4):
print("X{} covariance:\n{}\n".format(i,
marginals.marginalCovariance(i)))
for i in range(1, 4):
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
marginals.marginalCovariance(i))
plt.axis('equal')
plt.show()
if __name__ == "__main__":
main()

View File

@ -13,69 +13,85 @@ Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
from __future__ import print_function from __future__ import print_function
import numpy as np
import gtsam import gtsam
from gtsam.symbol_shorthand import X, L import numpy as np
from gtsam.symbol_shorthand import L, X
# Create noise models # Create noise models
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2])) MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2]))
# Create an empty nonlinear factor graph
graph = gtsam.NonlinearFactorGraph()
# Create the keys corresponding to unknown variables in the factor graph def main():
X1 = X(1) """Main runner"""
X2 = X(2)
X3 = X(3)
L1 = L(4)
L2 = L(5)
# Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model # Create an empty nonlinear factor graph
graph.add(gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE)) graph = gtsam.NonlinearFactorGraph()
# Add odometry factors between X1,X2 and X2,X3, respectively # Create the keys corresponding to unknown variables in the factor graph
graph.add(gtsam.BetweenFactorPose2( X1 = X(1)
X1, X2, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE)) X2 = X(2)
graph.add(gtsam.BetweenFactorPose2( X3 = X(3)
X2, X3, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE)) L1 = L(4)
L2 = L(5)
# Add Range-Bearing measurements to two different landmarks L1 and L2 # Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model
graph.add(gtsam.BearingRangeFactor2D( graph.add(
X1, L1, gtsam.Rot2.fromDegrees(45), np.sqrt(4.0+4.0), MEASUREMENT_NOISE)) gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE))
graph.add(gtsam.BearingRangeFactor2D(
X2, L1, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE))
graph.add(gtsam.BearingRangeFactor2D(
X3, L2, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE))
# Print graph # Add odometry factors between X1,X2 and X2,X3, respectively
print("Factor Graph:\n{}".format(graph)) graph.add(
gtsam.BetweenFactorPose2(X1, X2, gtsam.Pose2(2.0, 0.0, 0.0),
ODOMETRY_NOISE))
graph.add(
gtsam.BetweenFactorPose2(X2, X3, gtsam.Pose2(2.0, 0.0, 0.0),
ODOMETRY_NOISE))
# Create (deliberately inaccurate) initial estimate # Add Range-Bearing measurements to two different landmarks L1 and L2
initial_estimate = gtsam.Values() graph.add(
initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15)) gtsam.BearingRangeFactor2D(X1, L1, gtsam.Rot2.fromDegrees(45),
initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20)) np.sqrt(4.0 + 4.0), MEASUREMENT_NOISE))
initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10)) graph.add(
initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10)) gtsam.BearingRangeFactor2D(X2, L1, gtsam.Rot2.fromDegrees(90), 2.0,
initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80)) MEASUREMENT_NOISE))
graph.add(
gtsam.BearingRangeFactor2D(X3, L2, gtsam.Rot2.fromDegrees(90), 2.0,
MEASUREMENT_NOISE))
# Print # Print graph
print("Initial Estimate:\n{}".format(initial_estimate)) print("Factor Graph:\n{}".format(graph))
# Optimize using Levenberg-Marquardt optimization. The optimizer # Create (deliberately inaccurate) initial estimate
# accepts an optional set of configuration parameters, controlling initial_estimate = gtsam.Values()
# things like convergence criteria, the type of linear system solver initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))
# to use, and the amount of information displayed during optimization. initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20))
# Here we will use the default set of parameters. See the initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10))
# documentation for the full set of parameters. initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10))
params = gtsam.LevenbergMarquardtParams() initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80))
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params)
result = optimizer.optimize()
print("\nFinal Result:\n{}".format(result))
# Calculate and print marginal covariances for all variables # Print
marginals = gtsam.Marginals(graph, result) print("Initial Estimate:\n{}".format(initial_estimate))
for (key, str) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"), (L2, "L2")]:
print("{} covariance:\n{}\n".format(str, marginals.marginalCovariance(key))) # Optimize using Levenberg-Marquardt optimization. The optimizer
# accepts an optional set of configuration parameters, controlling
# things like convergence criteria, the type of linear system solver
# to use, and the amount of information displayed during optimization.
# Here we will use the default set of parameters. See the
# documentation for the full set of parameters.
params = gtsam.LevenbergMarquardtParams()
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate,
params)
result = optimizer.optimize()
print("\nFinal Result:\n{}".format(result))
# Calculate and print marginal covariances for all variables
marginals = gtsam.Marginals(graph, result)
for (key, s) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"),
(L2, "L2")]:
print("{} covariance:\n{}\n".format(s,
marginals.marginalCovariance(key)))
if __name__ == "__main__":
main()

View File

@ -0,0 +1,178 @@
"""
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
Pose SLAM example using iSAM2 in the 2D plane.
Author: Jerred Chen, Yusuf Ali
Modeled after:
- VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
- Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
"""
import math
import matplotlib.pyplot as plt
import numpy as np
import gtsam
import gtsam.utils.plot as gtsam_plot
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
key: int):
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
# Print the current estimates computed using iSAM2.
print("*"*50 + f"\nInference after State {key+1}:\n")
print(current_estimate)
# Compute the marginals for all states in the graph.
marginals = gtsam.Marginals(graph, current_estimate)
# Plot the newly updated iSAM2 inference.
fig = plt.figure(0)
axes = fig.gca()
plt.cla()
i = 1
while current_estimate.exists(i):
gtsam_plot.plot_pose2(0, current_estimate.atPose2(i), 0.5, marginals.marginalCovariance(i))
i += 1
plt.axis('equal')
axes.set_xlim(-1, 5)
axes.set_ylim(-1, 3)
plt.pause(1)
def determine_loop_closure(odom: np.ndarray, current_estimate: gtsam.Values,
key: int, xy_tol=0.6, theta_tol=17) -> int:
"""Simple brute force approach which iterates through previous states
and checks for loop closure.
Args:
odom: Vector representing noisy odometry (x, y, theta) measurement in the body frame.
current_estimate: The current estimates computed by iSAM2.
key: Key corresponding to the current state estimate of the robot.
xy_tol: Optional argument for the x-y measurement tolerance, in meters.
theta_tol: Optional argument for the theta measurement tolerance, in degrees.
Returns:
k: The key of the state which is helping add the loop closure constraint.
If loop closure is not found, then None is returned.
"""
if current_estimate:
prev_est = current_estimate.atPose2(key+1)
rotated_odom = prev_est.rotation().matrix() @ odom[:2]
curr_xy = np.array([prev_est.x() + rotated_odom[0],
prev_est.y() + rotated_odom[1]])
curr_theta = prev_est.theta() + odom[2]
for k in range(1, key+1):
pose_xy = np.array([current_estimate.atPose2(k).x(),
current_estimate.atPose2(k).y()])
pose_theta = current_estimate.atPose2(k).theta()
if (abs(pose_xy - curr_xy) <= xy_tol).all() and \
(abs(pose_theta - curr_theta) <= theta_tol*np.pi/180):
return k
def Pose2SLAM_ISAM2_example():
"""Perform 2D SLAM given the ground truth changes in pose as well as
simple loop closure detection."""
plt.ion()
# Declare the 2D translational standard deviations of the prior factor's Gaussian model, in meters.
prior_xy_sigma = 0.3
# Declare the 2D rotational standard deviation of the prior factor's Gaussian model, in degrees.
prior_theta_sigma = 5
# Declare the 2D translational standard deviations of the odometry factor's Gaussian model, in meters.
odometry_xy_sigma = 0.2
# Declare the 2D rotational standard deviation of the odometry factor's Gaussian model, in degrees.
odometry_theta_sigma = 5
# Although this example only uses linear measurements and Gaussian noise models, it is important
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_xy_sigma,
prior_xy_sigma,
prior_theta_sigma*np.pi/180]))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_xy_sigma,
odometry_xy_sigma,
odometry_theta_sigma*np.pi/180]))
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
graph = gtsam.NonlinearFactorGraph()
initial_estimate = gtsam.Values()
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
# update calls are required to perform the relinearization.
parameters = gtsam.ISAM2Params()
parameters.setRelinearizeThreshold(0.1)
parameters.setRelinearizeSkip(1)
isam = gtsam.ISAM2(parameters)
# Create the ground truth odometry measurements of the robot during the trajectory.
true_odometry = [(2, 0, 0),
(2, 0, math.pi/2),
(2, 0, math.pi/2),
(2, 0, math.pi/2),
(2, 0, math.pi/2)]
# Corrupt the odometry measurements with gaussian noise to create noisy odometry measurements.
odometry_measurements = [np.random.multivariate_normal(true_odom, ODOMETRY_NOISE.covariance())
for true_odom in true_odometry]
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
# iSAM2 incremental optimization.
graph.push_back(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
# Initialize the current estimate which is used during the incremental inference loop.
current_estimate = initial_estimate
for i in range(len(true_odometry)):
# Obtain the noisy odometry that is received by the robot and corrupted by gaussian noise.
noisy_odom_x, noisy_odom_y, noisy_odom_theta = odometry_measurements[i]
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
loop = determine_loop_closure(odometry_measurements[i], current_estimate, i, xy_tol=0.8, theta_tol=25)
# Add a binary factor in between two existing states if loop closure is detected.
# Otherwise, add a binary factor between a newly observed state and the previous state.
if loop:
graph.push_back(gtsam.BetweenFactorPose2(i + 1, loop,
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
else:
graph.push_back(gtsam.BetweenFactorPose2(i + 1, i + 2,
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
# Compute and insert the initialization estimate for the current pose using the noisy odometry measurement.
computed_estimate = current_estimate.atPose2(i + 1).compose(gtsam.Pose2(noisy_odom_x,
noisy_odom_y,
noisy_odom_theta))
initial_estimate.insert(i + 2, computed_estimate)
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
isam.update(graph, initial_estimate)
current_estimate = isam.calculateEstimate()
# Report all current state estimates from the iSAM2 optimzation.
report_on_progress(graph, current_estimate, i)
initial_estimate.clear()
# Print the final covariance matrix for each pose after completing inference on the trajectory.
marginals = gtsam.Marginals(graph, current_estimate)
i = 1
for i in range(1, len(true_odometry)+1):
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
plt.ioff()
plt.show()
if __name__ == "__main__":
Pose2SLAM_ISAM2_example()

View File

@ -15,82 +15,88 @@ from __future__ import print_function
import math import math
import numpy as np
import gtsam import gtsam
import matplotlib.pyplot as plt
import gtsam.utils.plot as gtsam_plot import gtsam.utils.plot as gtsam_plot
import matplotlib.pyplot as plt
def vector3(x, y, z): def main():
"""Create 3d double numpy array.""" """Main runner."""
return np.array([x, y, z], dtype=float) # Create noise models
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(gtsam.Point3(0.3, 0.3, 0.1))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(
gtsam.Point3(0.2, 0.2, 0.1))
# Create noise models # 1. Create a factor graph container and add factors to it
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) graph = gtsam.NonlinearFactorGraph()
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.2, 0.2, 0.1))
# 1. Create a factor graph container and add factors to it # 2a. Add a prior on the first pose, setting it to the origin
graph = gtsam.NonlinearFactorGraph() # A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix)
graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
# 2a. Add a prior on the first pose, setting it to the origin # 2b. Add odometry factors
# A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix) # Create odometry (Between) factors between consecutive poses
graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE)) graph.add(
gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE))
graph.add(
gtsam.BetweenFactorPose2(2, 3, gtsam.Pose2(2, 0, math.pi / 2),
ODOMETRY_NOISE))
graph.add(
gtsam.BetweenFactorPose2(3, 4, gtsam.Pose2(2, 0, math.pi / 2),
ODOMETRY_NOISE))
graph.add(
gtsam.BetweenFactorPose2(4, 5, gtsam.Pose2(2, 0, math.pi / 2),
ODOMETRY_NOISE))
# 2b. Add odometry factors # 2c. Add the loop closure constraint
# Create odometry (Between) factors between consecutive poses # This factor encodes the fact that we have returned to the same pose. In real
graph.add(gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE)) # systems, these constraints may be identified in many ways, such as appearance-based
graph.add(gtsam.BetweenFactorPose2( # techniques with camera images. We will use another Between Factor to enforce this constraint:
2, 3, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) graph.add(
graph.add(gtsam.BetweenFactorPose2( gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2, 0, math.pi / 2),
3, 4, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) ODOMETRY_NOISE))
graph.add(gtsam.BetweenFactorPose2( print("\nFactor Graph:\n{}".format(graph)) # print
4, 5, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE))
# 2c. Add the loop closure constraint # 3. Create the data structure to hold the initial_estimate estimate to the
# This factor encodes the fact that we have returned to the same pose. In real # solution. For illustrative purposes, these have been deliberately set to incorrect values
# systems, these constraints may be identified in many ways, such as appearance-based initial_estimate = gtsam.Values()
# techniques with camera images. We will use another Between Factor to enforce this constraint: initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
graph.add(gtsam.BetweenFactorPose2( initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
5, 2, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE)) initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
print("\nFactor Graph:\n{}".format(graph)) # print initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))
print("\nInitial Estimate:\n{}".format(initial_estimate)) # print
# 3. Create the data structure to hold the initial_estimate estimate to the # 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
# solution. For illustrative purposes, these have been deliberately set to incorrect values # The optimizer accepts an optional set of configuration parameters,
initial_estimate = gtsam.Values() # controlling things like convergence criteria, the type of linear
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2)) # system solver to use, and the amount of information displayed during
initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2)) # optimization. We will set a few parameters as a demonstration.
initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2)) parameters = gtsam.GaussNewtonParams()
initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))
print("\nInitial Estimate:\n{}".format(initial_estimate)) # print
# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer # Stop iterating once the change in error between steps is less than this value
# The optimizer accepts an optional set of configuration parameters, parameters.setRelativeErrorTol(1e-5)
# controlling things like convergence criteria, the type of linear # Do not perform more than N iteration steps
# system solver to use, and the amount of information displayed during parameters.setMaxIterations(100)
# optimization. We will set a few parameters as a demonstration. # Create the optimizer ...
parameters = gtsam.GaussNewtonParams() optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters)
# ... and optimize
result = optimizer.optimize()
print("Final Result:\n{}".format(result))
# Stop iterating once the change in error between steps is less than this value # 5. Calculate and print marginal covariances for all variables
parameters.setRelativeErrorTol(1e-5) marginals = gtsam.Marginals(graph, result)
# Do not perform more than N iteration steps for i in range(1, 6):
parameters.setMaxIterations(100) print("X{} covariance:\n{}\n".format(i,
# Create the optimizer ... marginals.marginalCovariance(i)))
optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters)
# ... and optimize
result = optimizer.optimize()
print("Final Result:\n{}".format(result))
# 5. Calculate and print marginal covariances for all variables for i in range(1, 6):
marginals = gtsam.Marginals(graph, result) gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5,
for i in range(1, 6): marginals.marginalCovariance(i))
print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i)))
fig = plt.figure(0) plt.axis('equal')
for i in range(1, 6): plt.show()
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i))
plt.axis('equal')
plt.show() if __name__ == "__main__":
main()

View File

@ -12,77 +12,86 @@ and does the optimization. Output is written on a file, in g2o format
# pylint: disable=invalid-name, E1101 # pylint: disable=invalid-name, E1101
from __future__ import print_function from __future__ import print_function
import argparse import argparse
import math
import numpy as np
import matplotlib.pyplot as plt
import gtsam import gtsam
import matplotlib.pyplot as plt
from gtsam.utils import plot from gtsam.utils import plot
def vector3(x, y, z): def main():
"""Create 3d double numpy array.""" """Main runner."""
return np.array([x, y, z], dtype=float)
parser = argparse.ArgumentParser(
description="A 2D Pose SLAM example that reads input from g2o, "
"converts it to a factor graph and does the optimization. "
"Output is written on a file, in g2o format")
parser.add_argument('-i', '--input', help='input file g2o format')
parser.add_argument(
'-o',
'--output',
help="the path to the output file with optimized graph")
parser.add_argument('-m',
'--maxiter',
type=int,
help="maximum number of iterations for optimizer")
parser.add_argument('-k',
'--kernel',
choices=['none', 'huber', 'tukey'],
default="none",
help="Type of kernel used")
parser.add_argument("-p",
"--plot",
action="store_true",
help="Flag to plot results")
args = parser.parse_args()
g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\
else args.input
maxIterations = 100 if args.maxiter is None else args.maxiter
is3D = False
graph, initial = gtsam.readG2o(g2oFile, is3D)
assert args.kernel == "none", "Supplied kernel type is not yet implemented"
# Add prior on the pose having index (key) = 0
priorModel = gtsam.noiseModel.Diagonal.Variances(gtsam.Point3(1e-6, 1e-6, 1e-8))
graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))
params = gtsam.GaussNewtonParams()
params.setVerbosity("Termination")
params.setMaxIterations(maxIterations)
# parameters.setRelativeErrorTol(1e-5)
# Create the optimizer ...
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
# ... and optimize
result = optimizer.optimize()
print("Optimization complete")
print("initial error = ", graph.error(initial))
print("final error = ", graph.error(result))
if args.output is None:
print("\nFactor Graph:\n{}".format(graph))
print("\nInitial Estimate:\n{}".format(initial))
print("Final Result:\n{}".format(result))
else:
outputFile = args.output
print("Writing results to file: ", outputFile)
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
gtsam.writeG2o(graphNoKernel, result, outputFile)
print("Done!")
if args.plot:
resultPoses = gtsam.utilities.extractPose2(result)
for i in range(resultPoses.shape[0]):
plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :]))
plt.show()
parser = argparse.ArgumentParser( if __name__ == "__main__":
description="A 2D Pose SLAM example that reads input from g2o, " main()
"converts it to a factor graph and does the optimization. "
"Output is written on a file, in g2o format")
parser.add_argument('-i', '--input', help='input file g2o format')
parser.add_argument('-o', '--output',
help="the path to the output file with optimized graph")
parser.add_argument('-m', '--maxiter', type=int,
help="maximum number of iterations for optimizer")
parser.add_argument('-k', '--kernel', choices=['none', 'huber', 'tukey'],
default="none", help="Type of kernel used")
parser.add_argument("-p", "--plot", action="store_true",
help="Flag to plot results")
args = parser.parse_args()
g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\
else args.input
maxIterations = 100 if args.maxiter is None else args.maxiter
is3D = False
graph, initial = gtsam.readG2o(g2oFile, is3D)
assert args.kernel == "none", "Supplied kernel type is not yet implemented"
# Add prior on the pose having index (key) = 0
priorModel = gtsam.noiseModel.Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8))
graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))
params = gtsam.GaussNewtonParams()
params.setVerbosity("Termination")
params.setMaxIterations(maxIterations)
# parameters.setRelativeErrorTol(1e-5)
# Create the optimizer ...
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
# ... and optimize
result = optimizer.optimize()
print("Optimization complete")
print("initial error = ", graph.error(initial))
print("final error = ", graph.error(result))
if args.output is None:
print("\nFactor Graph:\n{}".format(graph))
print("\nInitial Estimate:\n{}".format(initial))
print("Final Result:\n{}".format(result))
else:
outputFile = args.output
print("Writing results to file: ", outputFile)
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
gtsam.writeG2o(graphNoKernel, result, outputFile)
print ("Done!")
if args.plot:
resultPoses = gtsam.utilities.extractPose2(result)
for i in range(resultPoses.shape[0]):
plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :]))
plt.show()

View File

@ -0,0 +1,67 @@
"""
GTSAM Copyright 2010, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
A 2D Pose SLAM example that reads input from g2o, and solve the Pose2 problem
using LAGO (Linear Approximation for Graph Optimization).
Output is written to a file, in g2o format
Reference:
L. Carlone, R. Aragues, J. Castellanos, and B. Bona, A fast and accurate
approximation for planar pose graph optimization, IJRR, 2014.
L. Carlone, R. Aragues, J.A. Castellanos, and B. Bona, A linear approximation
for graph-based simultaneous localization and mapping, RSS, 2011.
Author: Luca Carlone (C++), John Lambert (Python)
"""
import argparse
from argparse import Namespace
import numpy as np
import gtsam
from gtsam import Point3, Pose2, PriorFactorPose2, Values
def run(args: Namespace) -> None:
"""Run LAGO on input data stored in g2o file."""
g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None else args.input
graph = gtsam.NonlinearFactorGraph()
graph, initial = gtsam.readG2o(g2oFile)
# Add prior on the pose having index (key) = 0
priorModel = gtsam.noiseModel.Diagonal.Variances(Point3(1e-6, 1e-6, 1e-8))
graph.add(PriorFactorPose2(0, Pose2(), priorModel))
print(graph)
print("Computing LAGO estimate")
estimateLago: Values = gtsam.lago.initialize(graph)
print("done!")
if args.output is None:
estimateLago.print("estimateLago")
else:
outputFile = args.output
print("Writing results to file: ", outputFile)
graphNoKernel = gtsam.NonlinearFactorGraph()
graphNoKernel, initial2 = gtsam.readG2o(g2oFile)
gtsam.writeG2o(graphNoKernel, estimateLago, outputFile)
print("Done! ")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A 2D Pose SLAM example that reads input from g2o, "
"converts it to a factor graph and does the optimization. "
"Output is written on a file, in g2o format"
)
parser.add_argument("-i", "--input", help="input file g2o format")
parser.add_argument("-o", "--output", help="the path to the output file with optimized graph")
args = parser.parse_args()
run(args)

View File

@ -0,0 +1,208 @@
"""
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
Pose SLAM example using iSAM2 in 3D space.
Author: Jerred Chen
Modeled after:
- VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
- Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
"""
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import gtsam
import gtsam.utils.plot as gtsam_plot
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
key: int):
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
# Print the current estimates computed using iSAM2.
print("*"*50 + f"\nInference after State {key+1}:\n")
print(current_estimate)
# Compute the marginals for all states in the graph.
marginals = gtsam.Marginals(graph, current_estimate)
# Plot the newly updated iSAM2 inference.
fig = plt.figure(0)
axes = fig.gca(projection='3d')
plt.cla()
i = 1
while current_estimate.exists(i):
gtsam_plot.plot_pose3(0, current_estimate.atPose3(i), 10,
marginals.marginalCovariance(i))
i += 1
axes.set_xlim3d(-30, 45)
axes.set_ylim3d(-30, 45)
axes.set_zlim3d(-30, 45)
plt.pause(1)
def create_poses() -> List[gtsam.Pose3]:
"""Creates ground truth poses of the robot."""
P0 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
P1 = np.array([[0, -1, 0, 15],
[1, 0, 0, 15],
[0, 0, 1, 20],
[0, 0, 0, 1]])
P2 = np.array([[np.cos(np.pi/4), 0, np.sin(np.pi/4), 30],
[0, 1, 0, 30],
[-np.sin(np.pi/4), 0, np.cos(np.pi/4), 30],
[0, 0, 0, 1]])
P3 = np.array([[0, 1, 0, 30],
[0, 0, -1, 0],
[-1, 0, 0, -15],
[0, 0, 0, 1]])
P4 = np.array([[-1, 0, 0, 0],
[0, -1, 0, -10],
[0, 0, 1, -10],
[0, 0, 0, 1]])
P5 = P0[:]
return [gtsam.Pose3(P0), gtsam.Pose3(P1), gtsam.Pose3(P2),
gtsam.Pose3(P3), gtsam.Pose3(P4), gtsam.Pose3(P5)]
def determine_loop_closure(odom_tf: gtsam.Pose3, current_estimate: gtsam.Values,
key: int, xyz_tol=0.6, rot_tol=17) -> int:
"""Simple brute force approach which iterates through previous states
and checks for loop closure.
Args:
odom_tf: The noisy odometry transformation measurement in the body frame.
current_estimate: The current estimates computed by iSAM2.
key: Key corresponding to the current state estimate of the robot.
xyz_tol: Optional argument for the translational tolerance, in meters.
rot_tol: Optional argument for the rotational tolerance, in degrees.
Returns:
k: The key of the state which is helping add the loop closure constraint.
If loop closure is not found, then None is returned.
"""
if current_estimate:
prev_est = current_estimate.atPose3(key+1)
curr_est = prev_est.compose(odom_tf)
for k in range(1, key+1):
pose = current_estimate.atPose3(k)
if (abs(pose.matrix()[:3,:3] - curr_est.matrix()[:3,:3]) <= rot_tol*np.pi/180).all() and \
(abs(pose.matrix()[:3,3] - curr_est.matrix()[:3,3]) <= xyz_tol).all():
return k
def Pose3_ISAM2_example():
"""Perform 3D SLAM given ground truth poses as well as simple
loop closure detection."""
plt.ion()
# Declare the 3D translational standard deviations of the prior factor's Gaussian model, in meters.
prior_xyz_sigma = 0.3
# Declare the 3D rotational standard deviations of the prior factor's Gaussian model, in degrees.
prior_rpy_sigma = 5
# Declare the 3D translational standard deviations of the odometry factor's Gaussian model, in meters.
odometry_xyz_sigma = 0.2
# Declare the 3D rotational standard deviations of the odometry factor's Gaussian model, in degrees.
odometry_rpy_sigma = 5
# Although this example only uses linear measurements and Gaussian noise models, it is important
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_rpy_sigma*np.pi/180,
prior_rpy_sigma*np.pi/180,
prior_rpy_sigma*np.pi/180,
prior_xyz_sigma,
prior_xyz_sigma,
prior_xyz_sigma]))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_rpy_sigma*np.pi/180,
odometry_rpy_sigma*np.pi/180,
odometry_rpy_sigma*np.pi/180,
odometry_xyz_sigma,
odometry_xyz_sigma,
odometry_xyz_sigma]))
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
graph = gtsam.NonlinearFactorGraph()
initial_estimate = gtsam.Values()
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
# update calls are required to perform the relinearization.
parameters = gtsam.ISAM2Params()
parameters.setRelinearizeThreshold(0.1)
parameters.setRelinearizeSkip(1)
isam = gtsam.ISAM2(parameters)
# Create the ground truth poses of the robot trajectory.
true_poses = create_poses()
# Create the ground truth odometry transformations, xyz translations, and roll-pitch-yaw rotations
# between each robot pose in the trajectory.
odometry_tf = [true_poses[i-1].transformPoseTo(true_poses[i]) for i in range(1, len(true_poses))]
odometry_xyz = [(odometry_tf[i].x(), odometry_tf[i].y(), odometry_tf[i].z()) for i in range(len(odometry_tf))]
odometry_rpy = [odometry_tf[i].rotation().rpy() for i in range(len(odometry_tf))]
# Corrupt xyz translations and roll-pitch-yaw rotations with gaussian noise to create noisy odometry measurements.
noisy_measurements = [np.random.multivariate_normal(np.hstack((odometry_rpy[i],odometry_xyz[i])), \
ODOMETRY_NOISE.covariance()) for i in range(len(odometry_tf))]
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
# iSAM2 incremental optimization.
graph.push_back(gtsam.PriorFactorPose3(1, true_poses[0], PRIOR_NOISE))
initial_estimate.insert(1, true_poses[0].compose(gtsam.Pose3(
gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25), gtsam.Point3(0.05, -0.10, 0.20))))
# Initialize the current estimate which is used during the incremental inference loop.
current_estimate = initial_estimate
for i in range(len(odometry_tf)):
# Obtain the noisy translation and rotation that is received by the robot and corrupted by gaussian noise.
noisy_odometry = noisy_measurements[i]
# Compute the noisy odometry transformation according to the xyz translation and roll-pitch-yaw rotation.
noisy_tf = gtsam.Pose3(gtsam.Rot3.RzRyRx(noisy_odometry[:3]), noisy_odometry[3:6].reshape(-1,1))
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
loop = determine_loop_closure(noisy_tf, current_estimate, i, xyz_tol=18, rot_tol=30)
# Add a binary factor in between two existing states if loop closure is detected.
# Otherwise, add a binary factor between a newly observed state and the previous state.
if loop:
graph.push_back(gtsam.BetweenFactorPose3(i + 1, loop, noisy_tf, ODOMETRY_NOISE))
else:
graph.push_back(gtsam.BetweenFactorPose3(i + 1, i + 2, noisy_tf, ODOMETRY_NOISE))
# Compute and insert the initialization estimate for the current pose using a noisy odometry measurement.
noisy_estimate = current_estimate.atPose3(i + 1).compose(noisy_tf)
initial_estimate.insert(i + 2, noisy_estimate)
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
isam.update(graph, initial_estimate)
current_estimate = isam.calculateEstimate()
# Report all current state estimates from the iSAM2 optimization.
report_on_progress(graph, current_estimate, i)
initial_estimate.clear()
# Print the final covariance matrix for each pose after completing inference.
marginals = gtsam.Marginals(graph, current_estimate)
i = 1
while current_estimate.exists(i):
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
i += 1
plt.ioff()
plt.show()
if __name__ == '__main__':
Pose3_ISAM2_example()

View File

@ -8,13 +8,14 @@
# pylint: disable=invalid-name, E1101 # pylint: disable=invalid-name, E1101
from __future__ import print_function from __future__ import print_function
import argparse import argparse
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import gtsam import gtsam
import matplotlib.pyplot as plt
import numpy as np
from gtsam.utils import plot from gtsam.utils import plot
from mpl_toolkits.mplot3d import Axes3D
def vector6(x, y, z, a, b, c): def vector6(x, y, z, a, b, c):
@ -22,50 +23,62 @@ def vector6(x, y, z, a, b, c):
return np.array([x, y, z, a, b, c], dtype=float) return np.array([x, y, z, a, b, c], dtype=float)
parser = argparse.ArgumentParser( def main():
description="A 3D Pose SLAM example that reads input from g2o, and " """Main runner."""
"initializes Pose3")
parser.add_argument('-i', '--input', help='input file g2o format')
parser.add_argument('-o', '--output',
help="the path to the output file with optimized graph")
parser.add_argument("-p", "--plot", action="store_true",
help="Flag to plot results")
args = parser.parse_args()
g2oFile = gtsam.findExampleDataFile("pose3example.txt") if args.input is None \ parser = argparse.ArgumentParser(
else args.input description="A 3D Pose SLAM example that reads input from g2o, and "
"initializes Pose3")
parser.add_argument('-i', '--input', help='input file g2o format')
parser.add_argument(
'-o',
'--output',
help="the path to the output file with optimized graph")
parser.add_argument("-p",
"--plot",
action="store_true",
help="Flag to plot results")
args = parser.parse_args()
is3D = True g2oFile = gtsam.findExampleDataFile("pose3example.txt") if args.input is None \
graph, initial = gtsam.readG2o(g2oFile, is3D) else args.input
# Add Prior on the first key is3D = True
priorModel = gtsam.noiseModel.Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, graph, initial = gtsam.readG2o(g2oFile, is3D)
1e-4, 1e-4, 1e-4))
print("Adding prior to g2o file ") # Add Prior on the first key
firstKey = initial.keys()[0] priorModel = gtsam.noiseModel.Diagonal.Variances(
graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4))
params = gtsam.GaussNewtonParams() print("Adding prior to g2o file ")
params.setVerbosity("Termination") # this will show info about stopping conds firstKey = initial.keys()[0]
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params) graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel))
result = optimizer.optimize()
print("Optimization complete")
print("initial error = ", graph.error(initial)) params = gtsam.GaussNewtonParams()
print("final error = ", graph.error(result)) params.setVerbosity(
"Termination") # this will show info about stopping conds
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
result = optimizer.optimize()
print("Optimization complete")
if args.output is None: print("initial error = ", graph.error(initial))
print("Final Result:\n{}".format(result)) print("final error = ", graph.error(result))
else:
outputFile = args.output
print("Writing results to file: ", outputFile)
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
gtsam.writeG2o(graphNoKernel, result, outputFile)
print ("Done!")
if args.plot: if args.output is None:
resultPoses = gtsam.utilities.allPose3s(result) print("Final Result:\n{}".format(result))
for i in range(resultPoses.size()): else:
plot.plot_pose3(1, resultPoses.atPose3(i)) outputFile = args.output
plt.show() print("Writing results to file: ", outputFile)
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
gtsam.writeG2o(graphNoKernel, result, outputFile)
print("Done!")
if args.plot:
resultPoses = gtsam.utilities.allPose3s(result)
for i in range(resultPoses.size()):
plot.plot_pose3(1, resultPoses.atPose3(i))
plt.show()
if __name__ == "__main__":
main()

View File

@ -13,23 +13,29 @@ Author: Luca Carlone, Frank Dellaert (python port)
from __future__ import print_function from __future__ import print_function
import gtsam
import numpy as np import numpy as np
import gtsam
# Read graph from file def main():
g2oFile = gtsam.findExampleDataFile("pose3example.txt") """Main runner."""
# Read graph from file
g2oFile = gtsam.findExampleDataFile("pose3example.txt")
is3D = True is3D = True
graph, initial = gtsam.readG2o(g2oFile, is3D) graph, initial = gtsam.readG2o(g2oFile, is3D)
# Add prior on the first key. TODO: assumes first key ios z # Add prior on the first key. TODO: assumes first key ios z
priorModel = gtsam.noiseModel.Diagonal.Variances( priorModel = gtsam.noiseModel.Diagonal.Variances(
np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4])) np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
firstKey = initial.keys()[0] firstKey = initial.keys()[0]
graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel)) graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel))
# Initializing Pose3 - chordal relaxation" # Initializing Pose3 - chordal relaxation
initialization = gtsam.InitializePose3.initialize(graph) initialization = gtsam.InitializePose3.initialize(graph)
print(initialization) print(initialization)
if __name__ == "__main__":
main()

Some files were not shown because too many files have changed in this diff Show More