Merge branch 'develop' into fix/566
commit
86ab7d323a
|
@ -9,10 +9,10 @@ tar -zxf ${BOOST_FOLDER}.tar.gz
|
|||
|
||||
# Bootstrap
|
||||
cd ${BOOST_FOLDER}/
|
||||
./bootstrap.sh
|
||||
./bootstrap.sh --with-libraries=serialization,filesystem,thread,system,atomic,date_time,timer,chrono,program_options,regex
|
||||
|
||||
# Build and install
|
||||
sudo ./b2 install
|
||||
sudo ./b2 -j$(nproc) install
|
||||
|
||||
# Rebuild ld cache
|
||||
sudo ldconfig
|
||||
|
|
|
@ -85,4 +85,4 @@ make -j2 install
|
|||
cd $GITHUB_WORKSPACE/build/python
|
||||
$PYTHON setup.py install --user --prefix=
|
||||
cd $GITHUB_WORKSPACE/python/gtsam/tests
|
||||
$PYTHON -m unittest discover
|
||||
$PYTHON -m unittest discover -v
|
||||
|
|
|
@ -68,6 +68,8 @@ function configure()
|
|||
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
|
||||
-DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \
|
||||
-DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \
|
||||
-DGTSAM_USE_SYSTEM_EIGEN=${GTSAM_USE_SYSTEM_EIGEN:-OFF} \
|
||||
-DGTSAM_USE_SYSTEM_METIS=${GTSAM_USE_SYSTEM_METIS:-OFF} \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DBOOST_ROOT=$BOOST_ROOT \
|
||||
-DBoost_NO_SYSTEM_PATHS=ON \
|
||||
|
@ -92,7 +94,11 @@ function build ()
|
|||
|
||||
configure
|
||||
|
||||
make -j2
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
make -j$(nproc)
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu)
|
||||
fi
|
||||
|
||||
finish
|
||||
}
|
||||
|
@ -105,8 +111,12 @@ function test ()
|
|||
|
||||
configure
|
||||
|
||||
# Actual build:
|
||||
make -j2 check
|
||||
# Actual testing
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
make -j$(nproc) check
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu) check
|
||||
fi
|
||||
|
||||
finish
|
||||
}
|
||||
|
|
|
@ -47,8 +47,7 @@ jobs:
|
|||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
# LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository.
|
||||
if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
|
||||
|
@ -63,7 +62,7 @@ jobs:
|
|||
fi
|
||||
sudo apt-get -y update
|
||||
|
||||
sudo apt-get install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
@ -79,7 +78,5 @@ jobs:
|
|||
run: |
|
||||
bash .github/scripts/boost.sh
|
||||
|
||||
- name: Build and Test (Linux)
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
bash .github/scripts/unix.sh -t
|
||||
- name: Build and Test
|
||||
run: bash .github/scripts/unix.sh -t
|
||||
|
|
|
@ -34,8 +34,7 @@ jobs:
|
|||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
brew install cmake ninja
|
||||
brew install boost
|
||||
|
@ -48,7 +47,5 @@ jobs:
|
|||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build and Test (macOS)
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
bash .github/scripts/unix.sh -t
|
||||
- name: Build and Test
|
||||
run: bash .github/scripts/unix.sh -t
|
||||
|
|
|
@ -81,7 +81,7 @@ jobs:
|
|||
fi
|
||||
sudo apt-get -y update
|
||||
|
||||
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
|
|
@ -55,6 +55,12 @@ jobs:
|
|||
version: "9"
|
||||
flag: cayley
|
||||
|
||||
- name: ubuntu-system-libs
|
||||
os: ubuntu-18.04
|
||||
compiler: gcc
|
||||
version: "9"
|
||||
flag: system-libs
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
@ -70,7 +76,7 @@ jobs:
|
|||
fi
|
||||
sudo apt-get -y update
|
||||
|
||||
sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython-dev python-numpy libicu-dev
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
@ -126,6 +132,12 @@ jobs:
|
|||
echo "GTSAM_ROT3_EXPMAP=OFF" >> $GITHUB_ENV
|
||||
echo "GTSAM Uses Cayley map for Rot3"
|
||||
|
||||
- name: Use system versions of 3rd party libraries
|
||||
if: matrix.flag == 'system'
|
||||
run: |
|
||||
echo "GTSAM_USE_SYSTEM_EIGEN=ON" >> $GITHUB_ENV
|
||||
echo "GTSAM_USE_SYSTEM_METIS=ON" >> $GITHUB_ENV
|
||||
|
||||
- name: Build & Test
|
||||
run: |
|
||||
bash .github/scripts/unix.sh -t
|
||||
|
|
|
@ -12,42 +12,46 @@ jobs:
|
|||
CTEST_PARALLEL_LEVEL: 2
|
||||
CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
|
||||
GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
|
||||
BOOST_VERSION: 1.72.0
|
||||
BOOST_EXE: boost_1_72_0-msvc-14.2
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
name: [
|
||||
#TODO This build keeps timing out, need to understand why.
|
||||
# windows-2016-cl,
|
||||
windows-2019-cl,
|
||||
]
|
||||
#TODO This build fails, need to understand why.
|
||||
# windows-2016-cl,
|
||||
windows-2019-cl,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
|
||||
#TODO This build keeps timing out, need to understand why.
|
||||
#TODO This build fails, need to understand why.
|
||||
# - name: windows-2016-cl
|
||||
# os: windows-2016
|
||||
# compiler: cl
|
||||
# platform: 32
|
||||
|
||||
- name: windows-2019-cl
|
||||
os: windows-2019
|
||||
compiler: cl
|
||||
platform: 64
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Install (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
- name: Install Dependencies
|
||||
shell: powershell
|
||||
run: |
|
||||
Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh')
|
||||
scoop install cmake --global # So we don't get issues with CMP0074 policy
|
||||
scoop install ninja --global
|
||||
|
||||
if ("${{ matrix.compiler }}".StartsWith("clang")) {
|
||||
scoop install llvm --global
|
||||
}
|
||||
|
||||
if ("${{ matrix.compiler }}" -eq "gcc") {
|
||||
# Chocolatey GCC is broken on the windows-2019 image.
|
||||
# See: https://github.com/DaanDeMeyer/doctest/runs/231595515
|
||||
|
@ -55,27 +59,38 @@ jobs:
|
|||
scoop install gcc --global
|
||||
echo "CC=gcc" >> $GITHUB_ENV
|
||||
echo "CXX=g++" >> $GITHUB_ENV
|
||||
|
||||
} elseif ("${{ matrix.compiler }}" -eq "clang") {
|
||||
echo "CC=clang" >> $GITHUB_ENV
|
||||
echo "CXX=clang++" >> $GITHUB_ENV
|
||||
|
||||
} else {
|
||||
echo "CC=${{ matrix.compiler }}" >> $GITHUB_ENV
|
||||
echo "CXX=${{ matrix.compiler }}" >> $GITHUB_ENV
|
||||
echo "CC=${{ matrix.compiler }}" >> $env:GITHUB_ENV
|
||||
echo "CXX=${{ matrix.compiler }}" >> $env:GITHUB_ENV
|
||||
|
||||
}
|
||||
|
||||
# Scoop modifies the PATH so we make the modified PATH global.
|
||||
echo "$env:PATH" >> $GITHUB_PATH
|
||||
echo "$env:PATH" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Download and install Boost
|
||||
uses: MarkusJx/install-boost@v1.0.1
|
||||
id: install-boost
|
||||
with:
|
||||
boost_version: 1.72.0
|
||||
toolset: msvc14.2
|
||||
- name: Install Boost
|
||||
shell: powershell
|
||||
run: |
|
||||
# Snippet from: https://github.com/actions/virtual-environments/issues/2667
|
||||
$BOOST_PATH = "C:\hostedtoolcache\windows\Boost\$env:BOOST_VERSION\x86_64"
|
||||
|
||||
- name: Build (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
env:
|
||||
BOOST_ROOT: ${{ steps.install-boost.outputs.BOOST_ROOT }}
|
||||
# Use the prebuilt binary for Windows
|
||||
$Url = "https://sourceforge.net/projects/boost/files/boost-binaries/$env:BOOST_VERSION/$env:BOOST_EXE-${{matrix.platform}}.exe"
|
||||
(New-Object System.Net.WebClient).DownloadFile($Url, "$env:TEMP\boost.exe")
|
||||
Start-Process -Wait -FilePath "$env:TEMP\boost.exe" "/SILENT","/SP-","/SUPPRESSMSGBOXES","/DIR=$BOOST_PATH"
|
||||
|
||||
# Set the BOOST_ROOT variable
|
||||
echo "BOOST_ROOT=$BOOST_PATH" >> $env:GITHUB_ENV
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cmake -E remove_directory build
|
||||
cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib"
|
||||
|
|
|
@ -38,11 +38,14 @@ if(${GTSAM_SOURCE_DIR} STREQUAL ${GTSAM_BINARY_DIR})
|
|||
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
|
||||
endif()
|
||||
|
||||
include(cmake/HandleGeneralOptions.cmake) # CMake build options
|
||||
|
||||
# Libraries:
|
||||
include(cmake/HandleBoost.cmake) # Boost
|
||||
include(cmake/HandleCCache.cmake) # ccache
|
||||
include(cmake/HandleCPack.cmake) # CPack
|
||||
include(cmake/HandleEigen.cmake) # Eigen3
|
||||
include(cmake/HandleGeneralOptions.cmake) # CMake build options
|
||||
include(cmake/HandleMetis.cmake) # metis
|
||||
include(cmake/HandleMKL.cmake) # MKL
|
||||
include(cmake/HandleOpenMP.cmake) # OpenMP
|
||||
include(cmake/HandlePerfTools.cmake) # Google perftools
|
||||
|
|
|
@ -17,3 +17,5 @@ class GTSAM_EXPORT MyClass { ... };
|
|||
|
||||
GTSAM_EXPORT myFunction();
|
||||
```
|
||||
|
||||
More details [here](Using-GTSAM-EXPORT.md).
|
||||
|
|
13
INSTALL.md
13
INSTALL.md
|
@ -13,7 +13,8 @@ $ make install
|
|||
## Important Installation Notes
|
||||
|
||||
1. GTSAM requires the following libraries to be installed on your system:
|
||||
- BOOST version 1.58 or greater (install through Linux repositories or MacPorts)
|
||||
- BOOST version 1.65 or greater (install through Linux repositories or MacPorts). Please see [Boost Notes](#boost-notes).
|
||||
|
||||
- Cmake version 3.0 or higher
|
||||
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
|
||||
|
||||
|
@ -66,11 +67,15 @@ execute commands as follows for an out-of-source build:
|
|||
This will build the library and unit tests, run all of the unit tests,
|
||||
and then install the library itself.
|
||||
|
||||
## Boost Notes
|
||||
|
||||
Versions of Boost prior to 1.65 have a known bug that prevents proper "deep" serialization of objects, which means that objects encapsulated inside other objects don't get serialized.
|
||||
This is particularly seen when using `clang` as the C++ compiler.
|
||||
|
||||
For this reason we require Boost>=1.65, and recommend installing it through alternative channels when it is not available through your operating system's primary package manager.
|
||||
|
||||
## Known Issues
|
||||
|
||||
- When using `GTSAM_BUILD_WITH_MARCH_NATIVE=ON`, you may encounter issues in running tests which we are still investigating:
|
||||
- Use of a version of GCC < 7.5 results in an "Indeterminant Linear System" error for `testSmartProjectionFactor`.
|
||||
- Use of Boost version < 1.67 with clang will give a segfault for mulitple test cases.
|
||||
- MSVC 2013 is not yet supported because it cannot build the serialization module of Boost 1.55 (or earlier).
|
||||
|
||||
# Windows Installation
|
||||
|
|
12
README.md
12
README.md
|
@ -40,7 +40,7 @@ $ make install
|
|||
|
||||
Prerequisites:
|
||||
|
||||
- [Boost](http://www.boost.org/users/download/) >= 1.58 (Ubuntu: `sudo apt-get install libboost-all-dev`)
|
||||
- [Boost](http://www.boost.org/users/download/) >= 1.65 (Ubuntu: `sudo apt-get install libboost-all-dev`)
|
||||
- [CMake](http://www.cmake.org/cmake/resources/software.html) >= 3.0 (Ubuntu: `sudo apt-get install cmake`)
|
||||
- A modern compiler, i.e., at least gcc 4.7.3 on Linux.
|
||||
|
||||
|
@ -55,9 +55,9 @@ Optional prerequisites - used automatically if findable by CMake:
|
|||
|
||||
GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. It also introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect.
|
||||
|
||||
GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 to use the deprecated methods.
|
||||
GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4` to use the deprecated methods.
|
||||
|
||||
GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag GTSAM_ALLOW_DEPRECATED_SINCE_V41 for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile.
|
||||
GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag `GTSAM_ALLOW_DEPRECATED_SINCE_V41` for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile.
|
||||
|
||||
|
||||
## Wrappers
|
||||
|
@ -68,16 +68,16 @@ We provide support for [MATLAB](matlab/README.md) and [Python](python/README.md)
|
|||
|
||||
GTSAM includes a state of the art IMU handling scheme based on
|
||||
|
||||
- Todd Lupton and Salah Sukkarieh, "Visual-Inertial-Aided Navigation for High-Dynamic Motion in Built Environments Without Initial Conditions", TRO, 28(1):61-76, 2012. [[link]](https://ieeexplore.ieee.org/document/6092505)
|
||||
- Todd Lupton and Salah Sukkarieh, _"Visual-Inertial-Aided Navigation for High-Dynamic Motion in Built Environments Without Initial Conditions"_, TRO, 28(1):61-76, 2012. [[link]](https://ieeexplore.ieee.org/document/6092505)
|
||||
|
||||
Our implementation improves on this using integration on the manifold, as detailed in
|
||||
|
||||
- Luca Carlone, Zsolt Kira, Chris Beall, Vadim Indelman, and Frank Dellaert, "Eliminating conditionally independent sets in factor graphs: a unifying perspective based on smart factors", Int. Conf. on Robotics and Automation (ICRA), 2014. [[link]](https://ieeexplore.ieee.org/abstract/document/6907483)
|
||||
- Luca Carlone, Zsolt Kira, Chris Beall, Vadim Indelman, and Frank Dellaert, _"Eliminating conditionally independent sets in factor graphs: a unifying perspective based on smart factors"_, Int. Conf. on Robotics and Automation (ICRA), 2014. [[link]](https://ieeexplore.ieee.org/abstract/document/6907483)
|
||||
- Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, "IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation", Robotics: Science and Systems (RSS), 2015. [[link]](http://www.roboticsproceedings.org/rss11/p06.pdf)
|
||||
|
||||
If you are using the factor in academic work, please cite the publications above.
|
||||
|
||||
In GTSAM 4 a new and more efficient implementation, based on integrating on the NavState tangent space and detailed in [this document](doc/ImuFactor.pdf), is enabled by default. To switch to the RSS 2015 version, set the flag **GTSAM_TANGENT_PREINTEGRATION** to OFF.
|
||||
In GTSAM 4 a new and more efficient implementation, based on integrating on the NavState tangent space and detailed in [this document](doc/ImuFactor.pdf), is enabled by default. To switch to the RSS 2015 version, set the flag `GTSAM_TANGENT_PREINTEGRATION` to OFF.
|
||||
|
||||
|
||||
## Additional Information
|
||||
|
|
|
@ -10,7 +10,7 @@ To create a DLL in windows, the `GTSAM_EXPORT` keyword has been created and need
|
|||
3. If you have defined a class using `GTSAM_EXPORT`, do not use `GTSAM_EXPORT` in any of its individual function declarations. (Note that you _can_ put `GTSAM_EXPORT` in the definition of individual functions within a class as long as you don't put `GTSAM_EXPORT` in the class definition.)
|
||||
|
||||
## When is GTSAM_EXPORT being used incorrectly
|
||||
Unfortunately, using `GTSAM_EXPORT` incorrectly often does not cause a compiler or linker error in the library that is being compiled, but only when you try to use that DLL in a different library. For example, an error in gtsam/base will often show up when compiling the check_base_program or the MATLAB wrapper, but not when compiling/linking gtsam itself. The most common errors will say something like:
|
||||
Unfortunately, using `GTSAM_EXPORT` incorrectly often does not cause a compiler or linker error in the library that is being compiled, but only when you try to use that DLL in a different library. For example, an error in `gtsam/base` will often show up when compiling the `check_base_program` or the MATLAB wrapper, but not when compiling/linking gtsam itself. The most common errors will say something like:
|
||||
|
||||
```
|
||||
Error LNK2019 unresolved external symbol "public: void __cdecl gtsam::SO3::print(class std::basic_string<char,struct std::char_traits<char>,class std::allocator<char> > const &)const " (?print@SO3@gtsam@@QEBAXAEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@@Z) referenced in function "public: static void __cdecl gtsam::Testable<class gtsam::SO3>::Print(class gtsam::SO3 const &,class std::basic_string<char,struct std::char_traits<char>,class std::allocator<char> > const &)" (?Print@?$Testable@VSO3@gtsam@@@gtsam@@SAXAEBVSO3@2@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@@Z) check_geometry_program C:\AFIT\lib\gtsam\build\gtsam\geometry\tests\testSO3.obj
|
||||
|
|
|
@ -144,7 +144,8 @@ if(NOT TBB_FOUND)
|
|||
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
|
||||
# OS X
|
||||
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb")
|
||||
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb"
|
||||
"/usr/local/opt/tbb")
|
||||
|
||||
# TODO: Check to see which C++ library is being used by the compiler.
|
||||
if(NOT ${CMAKE_SYSTEM_VERSION} VERSION_LESS 13.0)
|
||||
|
@ -181,7 +182,18 @@ if(NOT TBB_FOUND)
|
|||
##################################
|
||||
|
||||
if(TBB_INCLUDE_DIRS)
|
||||
file(READ "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h" _tbb_version_file)
|
||||
set(_tbb_version_file_prior_to_tbb_2021_1 "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h")
|
||||
set(_tbb_version_file_after_tbb_2021_1 "${TBB_INCLUDE_DIRS}/oneapi/tbb/version.h")
|
||||
|
||||
if (EXISTS "${_tbb_version_file_prior_to_tbb_2021_1}")
|
||||
file(READ "${_tbb_version_file_prior_to_tbb_2021_1}" _tbb_version_file )
|
||||
elseif (EXISTS "${_tbb_version_file_after_tbb_2021_1}")
|
||||
file(READ "${_tbb_version_file_after_tbb_2021_1}" _tbb_version_file )
|
||||
else()
|
||||
message(FATAL_ERROR "Found TBB installation: ${TBB_INCLUDE_DIRS} "
|
||||
"missing version header.")
|
||||
endif()
|
||||
|
||||
string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1"
|
||||
TBB_VERSION_MAJOR "${_tbb_version_file}")
|
||||
string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1"
|
||||
|
|
|
@ -22,7 +22,7 @@ endif()
|
|||
|
||||
|
||||
# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such.
|
||||
set(BOOST_FIND_MINIMUM_VERSION 1.58)
|
||||
set(BOOST_FIND_MINIMUM_VERSION 1.65)
|
||||
set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
|
||||
|
||||
find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS})
|
||||
|
@ -30,7 +30,7 @@ find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM
|
|||
# Required components
|
||||
if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
|
||||
NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY)
|
||||
message(FATAL_ERROR "Missing required Boost components >= v1.58, please install/upgrade Boost or configure your search paths.")
|
||||
message(FATAL_ERROR "Missing required Boost components >= v1.65, please install/upgrade Boost or configure your search paths.")
|
||||
endif()
|
||||
|
||||
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
|
||||
|
|
|
@ -25,4 +25,4 @@ set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION
|
|||
|
||||
# Deb-package specific cpack
|
||||
set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev")
|
||||
set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.58)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)")
|
||||
set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.65)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)")
|
||||
|
|
|
@ -14,20 +14,21 @@ if(GTSAM_UNSTABLE_AVAILABLE)
|
|||
option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
|
||||
option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
|
||||
endif()
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF)
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
|
||||
endif()
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
###############################################################################
|
||||
# Metis library
|
||||
|
||||
# For both system or bundle version, a cmake target "metis-gtsam-if" is defined (interface library)
|
||||
|
||||
# Dont try to use metis if GTSAM_SUPPORT_NESTED_DISSECTION is disabled:
|
||||
if (NOT GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
return()
|
||||
endif()
|
||||
|
||||
option(GTSAM_USE_SYSTEM_METIS "Find and use system-installed libmetis. If 'off', use the one bundled with GTSAM" OFF)
|
||||
|
||||
if(GTSAM_USE_SYSTEM_METIS)
|
||||
# Debian package: libmetis-dev
|
||||
|
||||
find_path(METIS_INCLUDE_DIR metis.h REQUIRED)
|
||||
find_library(METIS_LIBRARY metis REQUIRED)
|
||||
|
||||
if(METIS_INCLUDE_DIR AND METIS_LIBRARY)
|
||||
mark_as_advanced(METIS_INCLUDE_DIR)
|
||||
mark_as_advanced(METIS_LIBRARY)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
target_include_directories(metis-gtsam-if BEFORE INTERFACE ${METIS_INCLUDE_DIR})
|
||||
target_link_libraries(metis-gtsam-if INTERFACE ${METIS_LIBRARY})
|
||||
endif()
|
||||
else()
|
||||
# Bundled version:
|
||||
option(GTSAM_BUILD_METIS_EXECUTABLES "Build metis library executables" OFF)
|
||||
add_subdirectory(${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis)
|
||||
|
||||
target_include_directories(metis-gtsam BEFORE PUBLIC
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/include>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
target_link_libraries(metis-gtsam-if INTERFACE metis-gtsam)
|
||||
endif()
|
||||
|
||||
list(APPEND GTSAM_EXPORTED_TARGETS metis-gtsam-if)
|
||||
install(TARGETS metis-gtsam-if EXPORT GTSAM-exports ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
|
|
@ -32,6 +32,7 @@ endif()
|
|||
print_build_options_for_target(gtsam)
|
||||
|
||||
print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
||||
print_config("Use System Metis" "${GTSAM_USE_SYSTEM_METIS}")
|
||||
|
||||
if(GTSAM_USE_TBB)
|
||||
print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})")
|
||||
|
|
|
@ -1,24 +1,32 @@
|
|||
###############################################################################
|
||||
# Find TBB
|
||||
find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
|
||||
if (GTSAM_WITH_TBB)
|
||||
# Find TBB
|
||||
find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
|
||||
|
||||
# Set up variables if we're using TBB
|
||||
if(TBB_FOUND AND GTSAM_WITH_TBB)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
|
||||
set(TBB_GREATER_EQUAL_2020 1)
|
||||
# Set up variables if we're using TBB
|
||||
if(TBB_FOUND)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
|
||||
if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
endif()
|
||||
|
||||
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
|
||||
set(TBB_GREATER_EQUAL_2020 1)
|
||||
else()
|
||||
set(TBB_GREATER_EQUAL_2020 0)
|
||||
endif()
|
||||
# all definitions and link requisites will go via imported targets:
|
||||
# tbb & tbbmalloc
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
|
||||
else()
|
||||
set(TBB_GREATER_EQUAL_2020 0)
|
||||
set(GTSAM_USE_TBB 0) # This will go into config.h
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
# Prohibit Timing build mode in combination with TBB
|
||||
if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
|
||||
message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
|
||||
endif()
|
||||
# all definitions and link requisites will go via imported targets:
|
||||
# tbb & tbbmalloc
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
|
||||
else()
|
||||
set(GTSAM_USE_TBB 0) # This will go into config.h
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
# Prohibit Timing build mode in combination with TBB
|
||||
if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
|
||||
message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
|
||||
endif()
|
||||
|
|
|
@ -22,18 +22,19 @@ if (GTSAM_BUILD_DOCS)
|
|||
|
||||
# GTSAM core subfolders
|
||||
set(gtsam_doc_subdirs
|
||||
gtsam/base
|
||||
gtsam/discrete
|
||||
gtsam/geometry
|
||||
gtsam/inference
|
||||
gtsam/linear
|
||||
gtsam/navigation
|
||||
gtsam/nonlinear
|
||||
gtsam/sam
|
||||
gtsam/sfm
|
||||
gtsam/slam
|
||||
gtsam/smart
|
||||
gtsam/symbolic
|
||||
gtsam/base
|
||||
gtsam/basis
|
||||
gtsam/discrete
|
||||
gtsam/geometry
|
||||
gtsam/inference
|
||||
gtsam/linear
|
||||
gtsam/navigation
|
||||
gtsam/nonlinear
|
||||
gtsam/sam
|
||||
gtsam/sfm
|
||||
gtsam/slam
|
||||
gtsam/smart
|
||||
gtsam/symbolic
|
||||
gtsam
|
||||
)
|
||||
|
||||
|
|
|
@ -1,6 +1,57 @@
|
|||
# Instructions
|
||||
|
||||
Build all docker images, in order:
|
||||
# Images on Docker Hub
|
||||
|
||||
There are 4 images available on https://hub.docker.com/orgs/borglab/repositories:
|
||||
|
||||
- `borglab/ubuntu-boost-tbb`: 18.06 Linux (nicknamed `bionic`) base image, with Boost and TBB installed.
|
||||
- `borglab/ubuntu-gtsam`: GTSAM Release version installed in `/usr/local`.
|
||||
- `borglab/ubuntu-gtsam-python`: installed GTSAM with python wrapper.
|
||||
- `borglab/ubuntu-gtsam-python-vnc`: image with GTSAM+python wrapper that will run a VNC server to connect to.
|
||||
|
||||
# Using the images
|
||||
|
||||
## Just GTSAM
|
||||
|
||||
To start the Docker image, execute
|
||||
```bash
|
||||
docker run -it borglab/ubuntu-gtsam:bionic
|
||||
```
|
||||
after you will find yourself in a bash shell, in the directory `/usr/src/gtsam/build`.
|
||||
## GTSAM with Python wrapper
|
||||
|
||||
To use GTSAM via the python wrapper, similarly execute
|
||||
```bash
|
||||
docker run -it borglab/ubuntu-gtsam-python:bionic
|
||||
```
|
||||
and then launch `python3`:
|
||||
```bash
|
||||
python3
|
||||
>>> import gtsam
|
||||
>>> gtsam.Pose2(1,2,3)
|
||||
(1, 2, 3)
|
||||
```
|
||||
|
||||
## GTSAM with Python wrapper and VNC
|
||||
|
||||
First, start the docker image, which will run a VNC server on port 5900:
|
||||
```bash
|
||||
docker run -p 5900:5900 borglab/ubuntu-gtsam-python-vnc:bionic
|
||||
```
|
||||
|
||||
Then open a remote VNC X client, for example:
|
||||
|
||||
### Linux
|
||||
```bash
|
||||
sudo apt-get install tigervnc-viewer
|
||||
xtigervncviewer :5900
|
||||
```
|
||||
### Mac
|
||||
The Finder's "Connect to Server..." with `vnc://127.0.0.1` does not work, for some reason. Using the free [VNC Viewer](https://www.realvnc.com/en/connect/download/viewer/), enter `0.0.0.0:5900` as the server.
|
||||
|
||||
# Re-building the images locally
|
||||
|
||||
To build all docker images, in order:
|
||||
|
||||
```bash
|
||||
(cd ubuntu-boost-tbb && ./build.sh)
|
||||
|
@ -9,13 +60,4 @@ Build all docker images, in order:
|
|||
(cd ubuntu-gtsam-python-vnc && ./build.sh)
|
||||
```
|
||||
|
||||
Then launch with:
|
||||
|
||||
docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic
|
||||
|
||||
Then open a remote VNC X client, for example:
|
||||
|
||||
sudo apt-get install tigervnc-viewer
|
||||
xtigervncviewer :5900
|
||||
|
||||
|
||||
Note: building GTSAM can take a lot of memory because of the heavy templating. It is advisable to give Docker enough resources, e.g., 8GB, to avoid OOM errors while compiling.
|
|
@ -1,3 +1,3 @@
|
|||
# Build command for Docker image
|
||||
# TODO(dellaert): use docker compose and/or cmake
|
||||
docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic .
|
||||
docker build --no-cache -t borglab/ubuntu-boost-tbb:bionic .
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction.
|
||||
|
||||
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||
FROM dellaert/ubuntu-gtsam-python:bionic
|
||||
FROM borglab/ubuntu-gtsam-python:bionic
|
||||
|
||||
# Things needed to get a python GUI
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Build command for Docker image
|
||||
# TODO(dellaert): use docker compose and/or cmake
|
||||
# Needs to be run in docker/ubuntu-gtsam-python-vnc directory
|
||||
docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic .
|
||||
docker build -t borglab/ubuntu-gtsam-python-vnc:bionic .
|
||||
|
|
|
@ -2,4 +2,4 @@
|
|||
docker run -it \
|
||||
--workdir="/usr/src/gtsam" \
|
||||
-p 5900:5900 \
|
||||
dellaert/ubuntu-gtsam-python-vnc:bionic
|
||||
borglab/ubuntu-gtsam-python-vnc:bionic
|
|
@ -1,7 +1,7 @@
|
|||
# GTSAM Ubuntu image with Python wrapper support.
|
||||
|
||||
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||
FROM dellaert/ubuntu-gtsam:bionic
|
||||
FROM borglab/ubuntu-gtsam:bionic
|
||||
|
||||
# Install pip
|
||||
RUN apt-get install -y python3-pip python3-dev
|
||||
|
@ -22,7 +22,9 @@ RUN cmake \
|
|||
..
|
||||
|
||||
# Build again, as ubuntu-gtsam image cleaned
|
||||
RUN make -j4 install && make clean
|
||||
RUN make -j4 install
|
||||
RUN make python-install
|
||||
RUN make clean
|
||||
|
||||
# Needed to run python wrapper:
|
||||
RUN echo 'export PYTHONPATH=/usr/local/python/:$PYTHONPATH' >> /root/.bashrc
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# Build command for Docker image
|
||||
# TODO(dellaert): use docker compose and/or cmake
|
||||
docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic .
|
||||
docker build --no-cache -t borglab/ubuntu-gtsam-python:bionic .
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# Ubuntu image with GTSAM installed. Configured with Boost and TBB support.
|
||||
|
||||
# Get the base Ubuntu image from Docker Hub
|
||||
FROM dellaert/ubuntu-boost-tbb:bionic
|
||||
FROM borglab/ubuntu-boost-tbb:bionic
|
||||
|
||||
# Install git
|
||||
RUN apt-get update && \
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# Build command for Docker image
|
||||
# TODO(dellaert): use docker compose and/or cmake
|
||||
docker build --no-cache -t dellaert/ubuntu-gtsam:bionic .
|
||||
docker build --no-cache -t borglab/ubuntu-gtsam:bionic .
|
||||
|
|
|
@ -51,13 +51,13 @@ The directory **vSLAMexample** includes 2 simple examples using GTSAM:
|
|||
|
||||
See the separate README file there.
|
||||
|
||||
##Undirected Graphical Models (UGM)
|
||||
## Undirected Graphical Models (UGM)
|
||||
The best representation for a Markov Random Field is a factor graph :-) This is illustrated with some discrete examples from the UGM MATLAB toolbox, which
|
||||
can be found at <http://www.di.ens.fr/~mschmidt/Software/UGM>
|
||||
|
||||
|
||||
##Building and Running
|
||||
To build, cd into the directory and do:
|
||||
## Building and Running
|
||||
To build, cd into the top-level gtsam directory and do:
|
||||
|
||||
```
|
||||
mkdir build
|
||||
|
|
|
@ -49,10 +49,7 @@ if(NOT GTSAM_USE_SYSTEM_EIGEN)
|
|||
|
||||
endif()
|
||||
|
||||
option(GTSAM_BUILD_METIS_EXECUTABLES "Build metis library executables" OFF)
|
||||
if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
add_subdirectory(metis)
|
||||
endif()
|
||||
# metis: already handled in ROOT/cmake/HandleMetis.cmake
|
||||
|
||||
add_subdirectory(ceres)
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ project(gtsam LANGUAGES CXX)
|
|||
# The following variable is the master list of subdirs to add
|
||||
set (gtsam_subdirs
|
||||
base
|
||||
basis
|
||||
geometry
|
||||
inference
|
||||
symbolic
|
||||
|
@ -88,7 +89,8 @@ list(APPEND gtsam_srcs "${PROJECT_BINARY_DIR}/config.h" "${PROJECT_BINARY_DIR}/d
|
|||
install(FILES "${PROJECT_BINARY_DIR}/config.h" "${PROJECT_BINARY_DIR}/dllexport.h" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam)
|
||||
|
||||
if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES metis-gtsam)
|
||||
# target metis-gtsam-if is defined in both cases: embedded metis or system version:
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES metis-gtsam-if)
|
||||
endif()
|
||||
|
||||
# Versions
|
||||
|
@ -154,16 +156,6 @@ target_include_directories(gtsam SYSTEM BEFORE PUBLIC
|
|||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/CCOLAMD>
|
||||
)
|
||||
if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
target_include_directories(gtsam BEFORE PUBLIC
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/include>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
if(WIN32) # Add 'lib' prefix to static library to avoid filename collision with shared library
|
||||
if (NOT BUILD_SHARED_LIBS)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
* @author Frank Dellaert
|
||||
* @author Mike Bosse
|
||||
* @author Duy Nguyen Ta
|
||||
* @author Yotam Stern
|
||||
*/
|
||||
|
||||
|
||||
|
@ -319,12 +320,28 @@ T expm(const Vector& x, int K=7) {
|
|||
}
|
||||
|
||||
/**
|
||||
* Linear interpolation between X and Y by coefficient t in [0, 1].
|
||||
* Linear interpolation between X and Y by coefficient t. Typically t \in [0,1],
|
||||
* but can also be used to extrapolate before pose X or after pose Y.
|
||||
*/
|
||||
template <typename T>
|
||||
T interpolate(const T& X, const T& Y, double t) {
|
||||
assert(t >= 0 && t <= 1);
|
||||
return traits<T>::Compose(X, traits<T>::Expmap(t * traits<T>::Logmap(traits<T>::Between(X, Y))));
|
||||
T interpolate(const T& X, const T& Y, double t,
|
||||
typename MakeOptionalJacobian<T, T>::type Hx = boost::none,
|
||||
typename MakeOptionalJacobian<T, T>::type Hy = boost::none) {
|
||||
if (Hx || Hy) {
|
||||
typename MakeJacobian<T, T>::type between_H_x, log_H, exp_H, compose_H_x;
|
||||
const T between =
|
||||
traits<T>::Between(X, Y, between_H_x); // between_H_y = identity
|
||||
typename traits<T>::TangentVector delta = traits<T>::Logmap(between, log_H);
|
||||
const T Delta = traits<T>::Expmap(t * delta, exp_H);
|
||||
const T result = traits<T>::Compose(
|
||||
X, Delta, compose_H_x); // compose_H_xinv_y = identity
|
||||
|
||||
if (Hx) *Hx = compose_H_x + t * exp_H * log_H * between_H_x;
|
||||
if (Hy) *Hy = t * exp_H * log_H;
|
||||
return result;
|
||||
}
|
||||
return traits<T>::Compose(
|
||||
X, traits<T>::Expmap(t * traits<T>::Logmap(traits<T>::Between(X, Y))));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -89,6 +89,13 @@ public:
|
|||
usurp(dynamic.data());
|
||||
}
|
||||
|
||||
/// Constructor that will resize a dynamic matrix (unless already correct)
|
||||
OptionalJacobian(Eigen::MatrixXd* dynamic) :
|
||||
map_(nullptr) {
|
||||
dynamic->resize(Rows, Cols); // no malloc if correct size
|
||||
usurp(dynamic->data());
|
||||
}
|
||||
|
||||
#ifndef OPTIONALJACOBIAN_NOBOOST
|
||||
|
||||
/// Constructor with boost::none just makes empty
|
||||
|
|
|
@ -33,13 +33,13 @@ class IndexPair {
|
|||
size_t j() const;
|
||||
};
|
||||
|
||||
// template<KEY = {gtsam::IndexPair}>
|
||||
// class DSFMap {
|
||||
// DSFMap();
|
||||
// KEY find(const KEY& key) const;
|
||||
// void merge(const KEY& x, const KEY& y);
|
||||
// std::map<KEY, Set> sets();
|
||||
// };
|
||||
template<KEY = {gtsam::IndexPair}>
|
||||
class DSFMap {
|
||||
DSFMap();
|
||||
KEY find(const KEY& key) const;
|
||||
void merge(const KEY& x, const KEY& y);
|
||||
std::map<KEY, Set> sets();
|
||||
};
|
||||
|
||||
class IndexPairSet {
|
||||
IndexPairSet();
|
||||
|
@ -81,13 +81,6 @@ class IndexPairSetMap {
|
|||
gtsam::IndexPairSet at(gtsam::IndexPair& key);
|
||||
};
|
||||
|
||||
class DSFMapIndexPair {
|
||||
DSFMapIndexPair();
|
||||
gtsam::IndexPair find(const gtsam::IndexPair& key) const;
|
||||
void merge(const gtsam::IndexPair& x, const gtsam::IndexPair& y);
|
||||
gtsam::IndexPairSetMap sets();
|
||||
};
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
bool linear_independent(Matrix A, Matrix B, double tol);
|
||||
|
||||
|
|
|
@ -24,40 +24,33 @@ using namespace std;
|
|||
using namespace gtsam;
|
||||
|
||||
//******************************************************************************
|
||||
#define TEST_CONSTRUCTOR(DIM1, DIM2, X, TRUTHY) \
|
||||
{ \
|
||||
OptionalJacobian<DIM1, DIM2> H(X); \
|
||||
EXPECT(H == TRUTHY); \
|
||||
}
|
||||
TEST( OptionalJacobian, Constructors ) {
|
||||
Matrix23 fixed;
|
||||
|
||||
OptionalJacobian<2, 3> H1;
|
||||
EXPECT(!H1);
|
||||
|
||||
OptionalJacobian<2, 3> H2(fixed);
|
||||
EXPECT(H2);
|
||||
|
||||
OptionalJacobian<2, 3> H3(&fixed);
|
||||
EXPECT(H3);
|
||||
|
||||
Matrix dynamic;
|
||||
OptionalJacobian<2, 3> H4(dynamic);
|
||||
EXPECT(H4);
|
||||
|
||||
OptionalJacobian<2, 3> H5(boost::none);
|
||||
EXPECT(!H5);
|
||||
|
||||
boost::optional<Matrix&> optional(dynamic);
|
||||
OptionalJacobian<2, 3> H6(optional);
|
||||
EXPECT(H6);
|
||||
|
||||
OptionalJacobian<2, 3> H;
|
||||
EXPECT(!H);
|
||||
|
||||
TEST_CONSTRUCTOR(2, 3, fixed, true);
|
||||
TEST_CONSTRUCTOR(2, 3, &fixed, true);
|
||||
TEST_CONSTRUCTOR(2, 3, dynamic, true);
|
||||
TEST_CONSTRUCTOR(2, 3, &dynamic, true);
|
||||
TEST_CONSTRUCTOR(2, 3, boost::none, false);
|
||||
TEST_CONSTRUCTOR(2, 3, optional, true);
|
||||
|
||||
// Test dynamic
|
||||
OptionalJacobian<-1, -1> H7;
|
||||
EXPECT(!H7);
|
||||
|
||||
OptionalJacobian<-1, -1> H8(dynamic);
|
||||
EXPECT(H8);
|
||||
|
||||
OptionalJacobian<-1, -1> H9(boost::none);
|
||||
EXPECT(!H9);
|
||||
|
||||
OptionalJacobian<-1, -1> H10(optional);
|
||||
EXPECT(H10);
|
||||
TEST_CONSTRUCTOR(-1, -1, dynamic, true);
|
||||
TEST_CONSTRUCTOR(-1, -1, boost::none, false);
|
||||
TEST_CONSTRUCTOR(-1, -1, optional, true);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
@ -101,6 +94,25 @@ TEST( OptionalJacobian, Fixed) {
|
|||
dynamic2.setOnes();
|
||||
test(dynamic2);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic2));
|
||||
|
||||
{ // Dynamic pointer
|
||||
// Passing in an empty matrix means we want it resized
|
||||
Matrix dynamic0;
|
||||
test(&dynamic0);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic0));
|
||||
|
||||
// Dynamic wrong size
|
||||
Matrix dynamic1(3, 5);
|
||||
dynamic1.setOnes();
|
||||
test(&dynamic1);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic1));
|
||||
|
||||
// Dynamic right size
|
||||
Matrix dynamic2(2, 5);
|
||||
dynamic2.setOnes();
|
||||
test(&dynamic2);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic2));
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
|
|
@ -0,0 +1,507 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Basis.h
|
||||
* @brief Compute an interpolating basis
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
/**
|
||||
* This file supports creating continuous functions `f(x;p)` as a linear
|
||||
* combination of `basis functions` such as the Fourier basis on SO(2) or a set
|
||||
* of Chebyshev polynomials on [-1,1].
|
||||
*
|
||||
* In the expression `f(x;p)` the variable `x` is
|
||||
* the continuous argument at which the function is evaluated, and `p` are
|
||||
* the parameters which are coefficients of the different basis functions,
|
||||
* e.g. p = [4; 3; 2] => 4 + 3x + 2x^2 for a polynomial.
|
||||
* However, different parameterizations are also possible.
|
||||
|
||||
* The `Basis` class below defines a number of functors that can be used to
|
||||
* evaluate `f(x;p)` at a given `x`, and these functors also calculate
|
||||
* the Jacobian of `f(x;p)` with respect to the parameters `p`.
|
||||
* This is actually the most important calculation, as it will allow GTSAM
|
||||
* to optimize over the parameters `p`.
|
||||
|
||||
* This functionality is implemented using the `CRTP` or "Curiously recurring
|
||||
* template pattern" C++ idiom, which is a meta-programming technique in which
|
||||
* the derived class is passed as a template argument to `Basis<DERIVED>`.
|
||||
* The DERIVED class is assumed to satisfy a C++ concept,
|
||||
* i.e., we expect it to define the following types and methods:
|
||||
|
||||
- type `Parameters`: the parameters `p` in f(x;p)
|
||||
- `CalculateWeights(size_t N, double x, double a=default, double b=default)`
|
||||
- `DerivativeWeights(size_t N, double x, double a=default, double b=default)`
|
||||
|
||||
where `Weights` is an N*1 row vector which defines the basis values for the
|
||||
polynomial at the specified point `x`.
|
||||
|
||||
E.g. A Fourier series would give the following:
|
||||
- `CalculateWeights` -> For N=5, the values for the bases:
|
||||
[1, cos(x), sin(x), cos(2x), sin(2x)]
|
||||
- `DerivativeWeights` -> For N=5, these are:
|
||||
[0, -sin(x), cos(x), -2sin(2x), 2cos(x)]
|
||||
|
||||
Note that for a pseudo-spectral basis (as in Chebyshev2), the weights are
|
||||
instead the values for the Barycentric interpolation formula, since the values
|
||||
at the polynomial points (e.g. Chebyshev points) define the bases.
|
||||
*/
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using Weights = Eigen::Matrix<double, 1, -1>; /* 1xN vector */
|
||||
|
||||
/**
|
||||
* @brief Function for computing the kronecker product of the 1*N Weight vector
|
||||
* `w` with the MxM identity matrix `I` efficiently.
|
||||
* The main reason for this is so we don't need to use Eigen's Unsupported
|
||||
* library.
|
||||
*
|
||||
* @tparam M Size of the identity matrix.
|
||||
* @param w The weights of the polynomial.
|
||||
* @return Mx(M*N) kronecker product [w(0)*I, w(1)*I, ..., w(N-1)*I]
|
||||
*/
|
||||
template <size_t M>
|
||||
Matrix kroneckerProductIdentity(const Weights& w) {
|
||||
Matrix result(M, w.cols() * M);
|
||||
result.setZero();
|
||||
|
||||
for (int i = 0; i < w.cols(); i++) {
|
||||
result.block(0, i * M, M, M).diagonal().array() = w(i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// CRTP Base class for function bases
|
||||
template <typename DERIVED>
|
||||
class GTSAM_EXPORT Basis {
|
||||
public:
|
||||
/**
|
||||
* Calculate weights for all x in vector X.
|
||||
* Returns M*N matrix where M is the size of the vector X,
|
||||
* and N is the number of basis functions.
|
||||
*/
|
||||
static Matrix WeightMatrix(size_t N, const Vector& X) {
|
||||
Matrix W(X.size(), N);
|
||||
for (int i = 0; i < X.size(); i++)
|
||||
W.row(i) = DERIVED::CalculateWeights(N, X(i));
|
||||
return W;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculate weights for all x in vector X, with interval [a,b].
|
||||
*
|
||||
* @param N The number of basis functions.
|
||||
* @param X The vector for which to compute the weights.
|
||||
* @param a The lower bound for the interval range.
|
||||
* @param b The upper bound for the interval range.
|
||||
* @return Returns M*N matrix where M is the size of the vector X.
|
||||
*/
|
||||
static Matrix WeightMatrix(size_t N, const Vector& X, double a, double b) {
|
||||
Matrix W(X.size(), N);
|
||||
for (int i = 0; i < X.size(); i++)
|
||||
W.row(i) = DERIVED::CalculateWeights(N, X(i), a, b);
|
||||
return W;
|
||||
}
|
||||
|
||||
/**
|
||||
* An instance of an EvaluationFunctor calculates f(x;p) at a given `x`,
|
||||
* applied to Parameters `p`.
|
||||
* This functor is used to evaluate a parameterized function at a given scalar
|
||||
* value x. When given a specific N*1 vector of Parameters, returns the scalar
|
||||
* value of the function at x, possibly with Jacobians wrpt the parameters.
|
||||
*/
|
||||
class EvaluationFunctor {
|
||||
protected:
|
||||
Weights weights_;
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
EvaluationFunctor() {}
|
||||
|
||||
/// Constructor with interval [a,b]
|
||||
EvaluationFunctor(size_t N, double x)
|
||||
: weights_(DERIVED::CalculateWeights(N, x)) {}
|
||||
|
||||
/// Constructor with interval [a,b]
|
||||
EvaluationFunctor(size_t N, double x, double a, double b)
|
||||
: weights_(DERIVED::CalculateWeights(N, x, a, b)) {}
|
||||
|
||||
/// Regular 1D evaluation
|
||||
double apply(const typename DERIVED::Parameters& p,
|
||||
OptionalJacobian<-1, -1> H = boost::none) const {
|
||||
if (H) *H = weights_;
|
||||
return (weights_ * p)(0);
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
double operator()(const typename DERIVED::Parameters& p,
|
||||
OptionalJacobian<-1, -1> H = boost::none) const {
|
||||
return apply(p, H); // might call apply in derived
|
||||
}
|
||||
|
||||
void print(const std::string& s = "") const {
|
||||
std::cout << s << (s != "" ? " " : "") << weights_ << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* VectorEvaluationFunctor at a given x, applied to ParameterMatrix<M>.
|
||||
* This functor is used to evaluate a parameterized function at a given scalar
|
||||
* value x. When given a specific M*N parameters, returns an M-vector the M
|
||||
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
|
||||
*/
|
||||
template <int M>
|
||||
class VectorEvaluationFunctor : protected EvaluationFunctor {
|
||||
protected:
|
||||
using VectorM = Eigen::Matrix<double, M, 1>;
|
||||
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
|
||||
Jacobian H_;
|
||||
|
||||
/**
|
||||
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
|
||||
* H =[ w(0) 0 w(1) 0 w(2) 0
|
||||
* 0 w(0) 0 w(1) 0 w(2) ]
|
||||
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
|
||||
*/
|
||||
void calculateJacobian() {
|
||||
H_ = kroneckerProductIdentity<M>(this->weights_);
|
||||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
/// For serialization
|
||||
VectorEvaluationFunctor() {}
|
||||
|
||||
/// Default Constructor
|
||||
VectorEvaluationFunctor(size_t N, double x) : EvaluationFunctor(N, x) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
/// Constructor, with interval [a,b]
|
||||
VectorEvaluationFunctor(size_t N, double x, double a, double b)
|
||||
: EvaluationFunctor(N, x, a, b) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
/// M-dimensional evaluation
|
||||
VectorM apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = H_;
|
||||
return P.matrix() * this->weights_.transpose();
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
VectorM operator()(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Given a M*N Matrix of M-vectors at N polynomial points, an instance of
|
||||
* VectorComponentFunctor computes the N-vector value for a specific row
|
||||
* component of the M-vectors at all the polynomial points.
|
||||
*
|
||||
* This component is specified by the row index i, with 0<i<M.
|
||||
*/
|
||||
template <int M>
|
||||
class VectorComponentFunctor : public EvaluationFunctor {
|
||||
protected:
|
||||
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
|
||||
size_t rowIndex_;
|
||||
Jacobian H_;
|
||||
|
||||
/*
|
||||
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
|
||||
* H=[w(0) 0 w(1) 0 w(2) 0] for rowIndex==0
|
||||
* H=[0 w(0) 0 w(1) 0 w(2)] for rowIndex==1
|
||||
* i.e., one row of the Kronecker product of weights_ with the
|
||||
* MxM identity matrix. See also VectorEvaluationFunctor.
|
||||
*/
|
||||
void calculateJacobian(size_t N) {
|
||||
H_.setZero(1, M * N);
|
||||
for (int j = 0; j < EvaluationFunctor::weights_.size(); j++)
|
||||
H_(0, rowIndex_ + j * M) = EvaluationFunctor::weights_(j);
|
||||
}
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
VectorComponentFunctor() {}
|
||||
|
||||
/// Construct with row index
|
||||
VectorComponentFunctor(size_t N, size_t i, double x)
|
||||
: EvaluationFunctor(N, x), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
|
||||
/// Construct with row index and interval
|
||||
VectorComponentFunctor(size_t N, size_t i, double x, double a, double b)
|
||||
: EvaluationFunctor(N, x, a, b), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
|
||||
/// Calculate component of component rowIndex_ of P
|
||||
double apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = H_;
|
||||
return P.row(rowIndex_) * EvaluationFunctor::weights_.transpose();
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
double operator()(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Manifold EvaluationFunctor at a given x, applied to ParameterMatrix<M>.
|
||||
* This functor is used to evaluate a parameterized function at a given scalar
|
||||
* value x. When given a specific M*N parameters, returns an M-vector the M
|
||||
* corresponding functions at x, possibly with Jacobians wrpt the parameters.
|
||||
*
|
||||
* The difference with the VectorEvaluationFunctor is that after computing the
|
||||
* M*1 vector xi=F(x;P), with x a scalar and P the M*N parameter vector, we
|
||||
* also retract xi back to the T manifold.
|
||||
* For example, if T==Rot3, then we first compute a 3-vector xi using x and P,
|
||||
* and then map that 3-vector xi back to the Rot3 manifold, yielding a valid
|
||||
* 3D rotation.
|
||||
*/
|
||||
template <class T>
|
||||
class ManifoldEvaluationFunctor
|
||||
: public VectorEvaluationFunctor<traits<T>::dimension> {
|
||||
enum { M = traits<T>::dimension };
|
||||
using Base = VectorEvaluationFunctor<M>;
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
ManifoldEvaluationFunctor() {}
|
||||
|
||||
/// Default Constructor
|
||||
ManifoldEvaluationFunctor(size_t N, double x) : Base(N, x) {}
|
||||
|
||||
/// Constructor, with interval [a,b]
|
||||
ManifoldEvaluationFunctor(size_t N, double x, double a, double b)
|
||||
: Base(N, x, a, b) {}
|
||||
|
||||
/// Manifold evaluation
|
||||
T apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxMN*/ -1, -1> H = boost::none) const {
|
||||
// Interpolate the M-dimensional vector to yield a vector in tangent space
|
||||
Eigen::Matrix<double, M, 1> xi = Base::operator()(P, H);
|
||||
|
||||
// Now call retract with this M-vector, possibly with derivatives
|
||||
Eigen::Matrix<double, M, M> D_result_xi;
|
||||
T result = T::ChartAtOrigin::Retract(xi, H ? &D_result_xi : 0);
|
||||
|
||||
// Finally, if derivatives are asked, apply chain rule where H is Mx(M*N)
|
||||
// derivative of interpolation and D_result_xi is MxM derivative of
|
||||
// retract.
|
||||
if (H) *H = D_result_xi * (*H);
|
||||
|
||||
// and return a T
|
||||
return result;
|
||||
}
|
||||
|
||||
/// c++ sugar
|
||||
T operator()(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H); // might call apply in derived
|
||||
}
|
||||
};
|
||||
|
||||
/// Base class for functors below that calculate derivative weights
|
||||
class DerivativeFunctorBase {
|
||||
protected:
|
||||
Weights weights_;
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
DerivativeFunctorBase() {}
|
||||
|
||||
DerivativeFunctorBase(size_t N, double x)
|
||||
: weights_(DERIVED::DerivativeWeights(N, x)) {}
|
||||
|
||||
DerivativeFunctorBase(size_t N, double x, double a, double b)
|
||||
: weights_(DERIVED::DerivativeWeights(N, x, a, b)) {}
|
||||
|
||||
void print(const std::string& s = "") const {
|
||||
std::cout << s << (s != "" ? " " : "") << weights_ << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* An instance of a DerivativeFunctor calculates f'(x;p) at a given `x`,
|
||||
* applied to Parameters `p`.
|
||||
* When given a scalar value x and a specific N*1 vector of Parameters,
|
||||
* this functor returns the scalar derivative value of the function at x,
|
||||
* possibly with Jacobians wrpt the parameters.
|
||||
*/
|
||||
class DerivativeFunctor : protected DerivativeFunctorBase {
|
||||
public:
|
||||
/// For serialization
|
||||
DerivativeFunctor() {}
|
||||
|
||||
DerivativeFunctor(size_t N, double x) : DerivativeFunctorBase(N, x) {}
|
||||
|
||||
DerivativeFunctor(size_t N, double x, double a, double b)
|
||||
: DerivativeFunctorBase(N, x, a, b) {}
|
||||
|
||||
double apply(const typename DERIVED::Parameters& p,
|
||||
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = this->weights_;
|
||||
return (this->weights_ * p)(0);
|
||||
}
|
||||
/// c++ sugar
|
||||
double operator()(const typename DERIVED::Parameters& p,
|
||||
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) const {
|
||||
return apply(p, H); // might call apply in derived
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* VectorDerivativeFunctor at a given x, applied to ParameterMatrix<M>.
|
||||
*
|
||||
* This functor is used to evaluate the derivatives of a parameterized
|
||||
* function at a given scalar value x. When given a specific M*N parameters,
|
||||
* returns an M-vector the M corresponding function derivatives at x, possibly
|
||||
* with Jacobians wrpt the parameters.
|
||||
*/
|
||||
template <int M>
|
||||
class VectorDerivativeFunctor : protected DerivativeFunctorBase {
|
||||
protected:
|
||||
using VectorM = Eigen::Matrix<double, M, 1>;
|
||||
using Jacobian = Eigen::Matrix<double, /*MxMN*/ M, -1>;
|
||||
Jacobian H_;
|
||||
|
||||
/**
|
||||
* Calculate the `M*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
|
||||
* H =[ w(0) 0 w(1) 0 w(2) 0
|
||||
* 0 w(0) 0 w(1) 0 w(2) ]
|
||||
* i.e., the Kronecker product of weights_ with the MxM identity matrix.
|
||||
*/
|
||||
void calculateJacobian() {
|
||||
H_ = kroneckerProductIdentity<M>(this->weights_);
|
||||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
/// For serialization
|
||||
VectorDerivativeFunctor() {}
|
||||
|
||||
/// Default Constructor
|
||||
VectorDerivativeFunctor(size_t N, double x) : DerivativeFunctorBase(N, x) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
/// Constructor, with optional interval [a,b]
|
||||
VectorDerivativeFunctor(size_t N, double x, double a, double b)
|
||||
: DerivativeFunctorBase(N, x, a, b) {
|
||||
calculateJacobian();
|
||||
}
|
||||
|
||||
VectorM apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxMN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = H_;
|
||||
return P.matrix() * this->weights_.transpose();
|
||||
}
|
||||
/// c++ sugar
|
||||
VectorM operator()(
|
||||
const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*MxMN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Given a M*N Matrix of M-vectors at N polynomial points, an instance of
|
||||
* ComponentDerivativeFunctor computes the N-vector derivative for a specific
|
||||
* row component of the M-vectors at all the polynomial points.
|
||||
*
|
||||
* This component is specified by the row index i, with 0<i<M.
|
||||
*/
|
||||
template <int M>
|
||||
class ComponentDerivativeFunctor : protected DerivativeFunctorBase {
|
||||
protected:
|
||||
using Jacobian = Eigen::Matrix<double, /*1xMN*/ 1, -1>;
|
||||
size_t rowIndex_;
|
||||
Jacobian H_;
|
||||
|
||||
/*
|
||||
* Calculate the `1*(M*N)` Jacobian of this functor with respect to
|
||||
* the M*N parameter matrix `P`.
|
||||
* We flatten assuming column-major order, e.g., if N=3 and M=2, we have
|
||||
* H=[w(0) 0 w(1) 0 w(2) 0] for rowIndex==0
|
||||
* H=[0 w(0) 0 w(1) 0 w(2)] for rowIndex==1
|
||||
* i.e., one row of the Kronecker product of weights_ with the
|
||||
* MxM identity matrix. See also VectorDerivativeFunctor.
|
||||
*/
|
||||
void calculateJacobian(size_t N) {
|
||||
H_.setZero(1, M * N);
|
||||
for (int j = 0; j < this->weights_.size(); j++)
|
||||
H_(0, rowIndex_ + j * M) = this->weights_(j);
|
||||
}
|
||||
|
||||
public:
|
||||
/// For serialization
|
||||
ComponentDerivativeFunctor() {}
|
||||
|
||||
/// Construct with row index
|
||||
ComponentDerivativeFunctor(size_t N, size_t i, double x)
|
||||
: DerivativeFunctorBase(N, x), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
|
||||
/// Construct with row index and interval
|
||||
ComponentDerivativeFunctor(size_t N, size_t i, double x, double a, double b)
|
||||
: DerivativeFunctorBase(N, x, a, b), rowIndex_(i) {
|
||||
calculateJacobian(N);
|
||||
}
|
||||
/// Calculate derivative of component rowIndex_ of F
|
||||
double apply(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
|
||||
if (H) *H = H_;
|
||||
return P.row(rowIndex_) * this->weights_.transpose();
|
||||
}
|
||||
/// c++ sugar
|
||||
double operator()(const ParameterMatrix<M>& P,
|
||||
OptionalJacobian</*1xMN*/ -1, -1> H = boost::none) const {
|
||||
return apply(P, H);
|
||||
}
|
||||
};
|
||||
|
||||
// Vector version for MATLAB :-(
|
||||
static double Derivative(double x, const Vector& p, //
|
||||
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) {
|
||||
return DerivativeFunctor(x)(p.transpose(), H);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,424 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file BasisFactors.h
|
||||
* @brief Factor definitions for various Basis functors.
|
||||
* @author Varun Agrawal
|
||||
* @date July 4, 2020
|
||||
**/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/basis/Basis.h>
|
||||
#include <gtsam/nonlinear/FunctorizedFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @brief Factor for enforcing the scalar value of the polynomial BASIS
|
||||
* representation at `x` is the same as the measurement `z` when using a
|
||||
* pseudo-spectral parameterization.
|
||||
*
|
||||
* @tparam BASIS The basis class to use e.g. Chebyshev2
|
||||
*/
|
||||
template <class BASIS>
|
||||
class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, Vector>;
|
||||
|
||||
public:
|
||||
EvaluationFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new EvaluationFactor object
|
||||
*
|
||||
* @param key Symbol for value to optimize.
|
||||
* @param z The measurement value.
|
||||
* @param model Noise model
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the polynomial.
|
||||
*/
|
||||
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new EvaluationFactor object
|
||||
*
|
||||
* @param key Symbol for value to optimize.
|
||||
* @param z The measurement value.
|
||||
* @param model Noise model
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x, a, b)) {}
|
||||
|
||||
virtual ~EvaluationFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
|
||||
* of size (M, N) is equal to a vector-valued measurement at the same point,
|
||||
when
|
||||
* using a pseudo-spectral parameterization.
|
||||
*
|
||||
* This factors tries to enforce the basis function evaluation `f(x;p)` to take
|
||||
* on the value `z` at location `x`, providing a gradient on the parameters p.
|
||||
* In a probabilistic estimation context, `z` is known as a measurement, and the
|
||||
* parameterized basis function can be seen as a
|
||||
* measurement prediction function.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param M: Size of the evaluated state vector.
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class GTSAM_EXPORT VectorEvaluationFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
|
||||
public:
|
||||
VectorEvaluationFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorEvaluationFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
VectorEvaluationFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template VectorEvaluationFunctor<M>(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorEvaluationFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
VectorEvaluationFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x, double a, double b)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template VectorEvaluationFunctor<M>(N, x, a, b)) {}
|
||||
|
||||
virtual ~VectorEvaluationFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* Unary factor for enforcing BASIS polynomial evaluation on a ParameterMatrix
|
||||
* of size (P, N) is equal to specified measurement at the same point, when
|
||||
* using a pseudo-spectral parameterization.
|
||||
*
|
||||
* This factor is similar to `VectorEvaluationFactor` with the key difference
|
||||
* being that it only enforces the constraint for a single scalar in the vector,
|
||||
* indexed by `i`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param P: Size of the fixed-size vector.
|
||||
*
|
||||
* Example:
|
||||
* VectorComponentFactor<BASIS, P> controlPrior(key, measured, model,
|
||||
* N, i, t, a, b);
|
||||
* where N is the degree and i is the component index.
|
||||
*/
|
||||
template <class BASIS, size_t P>
|
||||
class GTSAM_EXPORT VectorComponentFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
|
||||
public:
|
||||
VectorComponentFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorComponentFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The scalar value at a specified index `i` of the full measurement
|
||||
* vector.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, size_t i, double x)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template VectorComponentFunctor<P>(N, i, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorComponentFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix object used to represent the
|
||||
* polynomial.
|
||||
* @param z The scalar value at a specified index `i` of the full measurement
|
||||
* vector.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate 0the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
VectorComponentFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, size_t i, double x, double a, double b)
|
||||
: Base(
|
||||
key, z, model,
|
||||
typename BASIS::template VectorComponentFunctor<P>(N, i, x, a, b)) {
|
||||
}
|
||||
|
||||
virtual ~VectorComponentFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* For a measurement value of type T i.e. `T z = g(x)`, this unary
|
||||
* factor enforces that the polynomial basis, when evaluated at `x`, gives us
|
||||
* the same `z`, i.e. `T z = g(x) = f(x;p)`.
|
||||
*
|
||||
* This is done via computations on the tangent space of the
|
||||
* manifold of T.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param T: Object type which is synthesized by the provided functor.
|
||||
*
|
||||
* Example:
|
||||
* ManifoldEvaluationFactor<Chebyshev2, Rot3> rotationFactor(key, measurement,
|
||||
* model, N, x, a, b);
|
||||
*
|
||||
* where `x` is the value (e.g. timestep) at which the rotation was evaluated.
|
||||
*/
|
||||
template <class BASIS, typename T>
|
||||
class GTSAM_EXPORT ManifoldEvaluationFactor
|
||||
: public FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>>;
|
||||
|
||||
public:
|
||||
ManifoldEvaluationFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new ManifoldEvaluationFactor object.
|
||||
*
|
||||
* @param key Key for the state matrix parameterizing the function to estimate
|
||||
* via the BASIS.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which the estimated function is evaluated.
|
||||
*/
|
||||
ManifoldEvaluationFactor(Key key, const T &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model,
|
||||
typename BASIS::template ManifoldEvaluationFunctor<T>(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new ManifoldEvaluationFactor object.
|
||||
*
|
||||
* @param key Key for the state matrix parameterizing the function to estimate
|
||||
* via the BASIS.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which the estimated function is evaluated.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
ManifoldEvaluationFactor(Key key, const T &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(
|
||||
key, z, model,
|
||||
typename BASIS::template ManifoldEvaluationFunctor<T>(N, x, a, b)) {
|
||||
}
|
||||
|
||||
virtual ~ManifoldEvaluationFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* A unary factor which enforces the evaluation of the derivative of a BASIS
|
||||
* polynomial at a specified point`x` is equal to the scalar measurement `z`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
*/
|
||||
template <class BASIS>
|
||||
class GTSAM_EXPORT DerivativeFactor
|
||||
: public FunctorizedFactor<double, typename BASIS::Parameters> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, typename BASIS::Parameters>;
|
||||
|
||||
public:
|
||||
DerivativeFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new DerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
DerivativeFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model, typename BASIS::DerivativeFunctor(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new DerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
DerivativeFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(key, z, model, typename BASIS::DerivativeFunctor(N, x, a, b)) {}
|
||||
|
||||
virtual ~DerivativeFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* A unary factor which enforces the evaluation of the derivative of a BASIS
|
||||
* polynomial at a specified point `x` is equal to the vector value `z`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param M: Size of the evaluated state vector derivative.
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class GTSAM_EXPORT VectorDerivativeFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
using Func = typename BASIS::template VectorDerivativeFunctor<M>;
|
||||
|
||||
public:
|
||||
VectorDerivativeFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
VectorDerivativeFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x)
|
||||
: Base(key, z, model, Func(N, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new VectorDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The measurement value.
|
||||
* @param model The noise model.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
VectorDerivativeFactor(Key key, const Vector &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
double x, double a, double b)
|
||||
: Base(key, z, model, Func(N, x, a, b)) {}
|
||||
|
||||
virtual ~VectorDerivativeFactor() {}
|
||||
};
|
||||
|
||||
/**
|
||||
* A unary factor which enforces the evaluation of the derivative of a BASIS
|
||||
* polynomial is equal to the scalar value at a specific index `i` of a
|
||||
* vector-valued measurement `z`.
|
||||
*
|
||||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
* @param P: Size of the control component derivative.
|
||||
*/
|
||||
template <class BASIS, int P>
|
||||
class GTSAM_EXPORT ComponentDerivativeFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
using Func = typename BASIS::template ComponentDerivativeFunctor<P>;
|
||||
|
||||
public:
|
||||
ComponentDerivativeFactor() {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new ComponentDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The scalar measurement value at a specific index `i` of the full
|
||||
* measurement vector.
|
||||
* @param model The degree of the polynomial.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
*/
|
||||
ComponentDerivativeFactor(Key key, const double &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
size_t i, double x)
|
||||
: Base(key, z, model, Func(N, i, x)) {}
|
||||
|
||||
/**
|
||||
* @brief Construct a new ComponentDerivativeFactor object.
|
||||
*
|
||||
* @param key The key to the ParameterMatrix which represents the basis
|
||||
* polynomial.
|
||||
* @param z The scalar measurement value at a specific index `i` of the full
|
||||
* measurement vector.
|
||||
* @param model The degree of the polynomial.
|
||||
* @param N The degree of the polynomial.
|
||||
* @param i The index for the evaluated vector to give us the desired scalar
|
||||
* value.
|
||||
* @param x The point at which to evaluate the basis polynomial.
|
||||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
ComponentDerivativeFactor(Key key, const double &z,
|
||||
const SharedNoiseModel &model, const size_t N,
|
||||
size_t i, double x, double a, double b)
|
||||
: Base(key, z, model, Func(N, i, x, a, b)) {}
|
||||
|
||||
virtual ~ComponentDerivativeFactor() {}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,6 @@
|
|||
# Install headers
|
||||
file(GLOB basis_headers "*.h")
|
||||
install(FILES ${basis_headers} DESTINATION include/gtsam/basis)
|
||||
|
||||
# Build tests
|
||||
add_subdirectory(tests)
|
|
@ -0,0 +1,98 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Chebyshev.cpp
|
||||
* @brief Chebyshev basis decompositions
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#include <gtsam/basis/Chebyshev.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @brief Scale x from [a, b] to [t1, t2]
|
||||
*
|
||||
* ((b'-a') * (x - a) / (b - a)) + a'
|
||||
*
|
||||
* @param x Value to scale to new range.
|
||||
* @param a Original lower limit.
|
||||
* @param b Original upper limit.
|
||||
* @param t1 New lower limit.
|
||||
* @param t2 New upper limit.
|
||||
* @return double
|
||||
*/
|
||||
static double scale(double x, double a, double b, double t1, double t2) {
|
||||
return ((t2 - t1) * (x - a) / (b - a)) + t1;
|
||||
}
|
||||
|
||||
Weights Chebyshev1Basis::CalculateWeights(size_t N, double x, double a,
|
||||
double b) {
|
||||
Weights Tx(1, N);
|
||||
|
||||
x = scale(x, a, b, -1, 1);
|
||||
|
||||
Tx(0) = 1;
|
||||
Tx(1) = x;
|
||||
for (size_t i = 2; i < N; i++) {
|
||||
// instead of cos(i*acos(x)), this recurrence is much faster
|
||||
Tx(i) = 2 * x * Tx(i - 1) - Tx(i - 2);
|
||||
}
|
||||
return Tx;
|
||||
}
|
||||
|
||||
Weights Chebyshev1Basis::DerivativeWeights(size_t N, double x, double a,
|
||||
double b) {
|
||||
Weights Ux = Chebyshev2Basis::CalculateWeights(N, x, a, b);
|
||||
Weights weights = Weights::Zero(N);
|
||||
for (size_t n = 1; n < N; n++) {
|
||||
weights(n) = n * Ux(n - 1);
|
||||
}
|
||||
return weights;
|
||||
}
|
||||
|
||||
Weights Chebyshev2Basis::CalculateWeights(size_t N, double x, double a,
|
||||
double b) {
|
||||
Weights Ux(N);
|
||||
|
||||
x = scale(x, a, b, -1, 1);
|
||||
|
||||
Ux(0) = 1;
|
||||
Ux(1) = 2 * x;
|
||||
for (size_t i = 2; i < N; i++) {
|
||||
// instead of cos(i*acos(x)), this recurrence is much faster
|
||||
Ux(i) = 2 * x * Ux(i - 1) - Ux(i - 2);
|
||||
}
|
||||
return Ux;
|
||||
}
|
||||
|
||||
Weights Chebyshev2Basis::DerivativeWeights(size_t N, double x, double a,
|
||||
double b) {
|
||||
Weights Tx = Chebyshev1Basis::CalculateWeights(N + 1, x, a, b);
|
||||
Weights Ux = Chebyshev2Basis::CalculateWeights(N, x, a, b);
|
||||
|
||||
Weights weights(N);
|
||||
|
||||
x = scale(x, a, b, -1, 1);
|
||||
if (x == -1 || x == 1) {
|
||||
throw std::runtime_error(
|
||||
"Derivative of Chebyshev2 Basis does not exist at range limits.");
|
||||
}
|
||||
|
||||
for (size_t n = 0; n < N; n++) {
|
||||
weights(n) = ((n + 1) * Tx(n + 1) - x * Ux(n)) / (x * x - 1);
|
||||
}
|
||||
return weights;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,109 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Chebyshev.h
|
||||
* @brief Chebyshev basis decompositions
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
#include <unsupported/Eigen/KroneckerProduct>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Basis of Chebyshev polynomials of the first kind
|
||||
* https://en.wikipedia.org/wiki/Chebyshev_polynomials#First_kind
|
||||
* These are typically denoted with the symbol T_n, where n is the degree.
|
||||
* The parameter N is the number of coefficients, i.e., N = n+1.
|
||||
*/
|
||||
struct Chebyshev1Basis : Basis<Chebyshev1Basis> {
|
||||
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
|
||||
|
||||
Parameters parameters_;
|
||||
|
||||
/**
|
||||
* @brief Evaluate Chebyshev Weights on [-1,1] at x up to order N-1 (N values)
|
||||
*
|
||||
* @param N Degree of the polynomial.
|
||||
* @param x Point to evaluate polynomial at.
|
||||
* @param a Lower limit of polynomial (default=-1).
|
||||
* @param b Upper limit of polynomial (default=1).
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/**
|
||||
* @brief Evaluate Chebyshev derivative at x.
|
||||
* The derivative weights are pre-multiplied to the polynomial Parameters.
|
||||
*
|
||||
* From Wikipedia we have D[T_n(x),x] = n*U_{n-1}(x)
|
||||
* I.e. the derivative fo a first kind cheb is just a second kind cheb
|
||||
* So, we define a second kind basis here of order N-1
|
||||
* Note that it has one less weight.
|
||||
*
|
||||
* The Parameters pertain to 1st kind chebs up to order N-1
|
||||
* But of course the first one (order 0) is constant, so omit that weight.
|
||||
*
|
||||
* @param N Degree of the polynomial.
|
||||
* @param x Point to evaluate polynomial at.
|
||||
* @param a Lower limit of polynomial (default=-1).
|
||||
* @param b Upper limit of polynomial (default=1).
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
}; // Chebyshev1Basis
|
||||
|
||||
/**
|
||||
* Basis of Chebyshev polynomials of the second kind.
|
||||
* https://en.wikipedia.org/wiki/Chebyshev_polynomials#Second_kind
|
||||
* These are typically denoted with the symbol U_n, where n is the degree.
|
||||
* The parameter N is the number of coefficients, i.e., N = n+1.
|
||||
* In contrast to the templates in Chebyshev2, the classes below specify
|
||||
* basis functions, weighted combinations of which are used to approximate
|
||||
* functions. In this sense, they are like the sines and cosines of the Fourier
|
||||
* basis.
|
||||
*/
|
||||
struct Chebyshev2Basis : Basis<Chebyshev2Basis> {
|
||||
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
|
||||
|
||||
/**
|
||||
* Evaluate Chebyshev Weights on [-1,1] at any x up to order N-1 (N values).
|
||||
*
|
||||
* @param N Degree of the polynomial.
|
||||
* @param x Point to evaluate polynomial at.
|
||||
* @param a Lower limit of polynomial (default=-1).
|
||||
* @param b Upper limit of polynomial (default=1).
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/**
|
||||
* @brief Evaluate Chebyshev derivative at x.
|
||||
*
|
||||
* @param N Degree of the polynomial.
|
||||
* @param x Point to evaluate polynomial at.
|
||||
* @param a Lower limit of polynomial (default=-1).
|
||||
* @param b Upper limit of polynomial (default=1).
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
}; // Chebyshev2Basis
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,205 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Chebyshev2.cpp
|
||||
* @brief Chebyshev parameterizations on Chebyshev points of second kind
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
Weights Chebyshev2::CalculateWeights(size_t N, double x, double a, double b) {
|
||||
// Allocate space for weights
|
||||
Weights weights(N);
|
||||
|
||||
// We start by getting distances from x to all Chebyshev points
|
||||
// as well as getting smallest distance
|
||||
Weights distances(N);
|
||||
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
const double dj =
|
||||
x - Point(N, j, a, b); // only thing that depends on [a,b]
|
||||
|
||||
if (std::abs(dj) < 1e-10) {
|
||||
// exceptional case: x coincides with a Chebyshev point
|
||||
weights.setZero();
|
||||
weights(j) = 1;
|
||||
return weights;
|
||||
}
|
||||
distances(j) = dj;
|
||||
}
|
||||
|
||||
// Beginning of interval, j = 0, x(0) = a
|
||||
weights(0) = 0.5 / distances(0);
|
||||
|
||||
// All intermediate points j=1:N-2
|
||||
double d = weights(0), s = -1; // changes sign s at every iteration
|
||||
for (size_t j = 1; j < N - 1; j++, s = -s) {
|
||||
weights(j) = s / distances(j);
|
||||
d += weights(j);
|
||||
}
|
||||
|
||||
// End of interval, j = N-1, x(N-1) = b
|
||||
weights(N - 1) = 0.5 * s / distances(N - 1);
|
||||
d += weights(N - 1);
|
||||
|
||||
// normalize
|
||||
return weights / d;
|
||||
}
|
||||
|
||||
Weights Chebyshev2::DerivativeWeights(size_t N, double x, double a, double b) {
|
||||
// Allocate space for weights
|
||||
Weights weightDerivatives(N);
|
||||
|
||||
// toggle variable so we don't need to use `pow` for -1
|
||||
double t = -1;
|
||||
|
||||
// We start by getting distances from x to all Chebyshev points
|
||||
// as well as getting smallest distance
|
||||
Weights distances(N);
|
||||
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
const double dj =
|
||||
x - Point(N, j, a, b); // only thing that depends on [a,b]
|
||||
if (std::abs(dj) < 1e-10) {
|
||||
// exceptional case: x coincides with a Chebyshev point
|
||||
weightDerivatives.setZero();
|
||||
// compute the jth row of the differentiation matrix for this point
|
||||
double cj = (j == 0 || j == N - 1) ? 2. : 1.;
|
||||
for (size_t k = 0; k < N; k++) {
|
||||
if (j == 0 && k == 0) {
|
||||
// we reverse the sign since we order the cheb points from -1 to 1
|
||||
weightDerivatives(k) = -(cj * (N - 1) * (N - 1) + 1) / 6.0;
|
||||
} else if (j == N - 1 && k == N - 1) {
|
||||
// we reverse the sign since we order the cheb points from -1 to 1
|
||||
weightDerivatives(k) = (cj * (N - 1) * (N - 1) + 1) / 6.0;
|
||||
} else if (k == j) {
|
||||
double xj = Point(N, j);
|
||||
double xj2 = xj * xj;
|
||||
weightDerivatives(k) = -0.5 * xj / (1 - xj2);
|
||||
} else {
|
||||
double xj = Point(N, j);
|
||||
double xk = Point(N, k);
|
||||
double ck = (k == 0 || k == N - 1) ? 2. : 1.;
|
||||
t = ((j + k) % 2) == 0 ? 1 : -1;
|
||||
weightDerivatives(k) = (cj / ck) * t / (xj - xk);
|
||||
}
|
||||
}
|
||||
return 2 * weightDerivatives / (b - a);
|
||||
}
|
||||
distances(j) = dj;
|
||||
}
|
||||
|
||||
// This section of code computes the derivative of
|
||||
// the Barycentric Interpolation weights formula by applying
|
||||
// the chain rule on the original formula.
|
||||
|
||||
// g and k are multiplier terms which represent the derivatives of
|
||||
// the numerator and denominator
|
||||
double g = 0, k = 0;
|
||||
double w = 1;
|
||||
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
if (j == 0 || j == N - 1) {
|
||||
w = 0.5;
|
||||
} else {
|
||||
w = 1.0;
|
||||
}
|
||||
|
||||
t = (j % 2 == 0) ? 1 : -1;
|
||||
|
||||
double c = t / distances(j);
|
||||
g += w * c;
|
||||
k += (w * c / distances(j));
|
||||
}
|
||||
|
||||
double s = 1; // changes sign s at every iteration
|
||||
double g2 = g * g;
|
||||
|
||||
for (size_t j = 0; j < N; j++, s = -s) {
|
||||
// Beginning of interval, j = 0, x0 = -1.0 and end of interval, j = N-1,
|
||||
// x0 = 1.0
|
||||
if (j == 0 || j == N - 1) {
|
||||
w = 0.5;
|
||||
} else {
|
||||
// All intermediate points j=1:N-2
|
||||
w = 1.0;
|
||||
}
|
||||
weightDerivatives(j) = (w * -s / (g * distances(j) * distances(j))) -
|
||||
(w * -s * k / (g2 * distances(j)));
|
||||
}
|
||||
|
||||
return weightDerivatives;
|
||||
}
|
||||
|
||||
Chebyshev2::DiffMatrix Chebyshev2::DifferentiationMatrix(size_t N, double a,
|
||||
double b) {
|
||||
DiffMatrix D(N, N);
|
||||
if (N == 1) {
|
||||
D(0, 0) = 1;
|
||||
return D;
|
||||
}
|
||||
|
||||
// toggle variable so we don't need to use `pow` for -1
|
||||
double t = -1;
|
||||
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
double xi = Point(N, i);
|
||||
double ci = (i == 0 || i == N - 1) ? 2. : 1.;
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
if (i == 0 && j == 0) {
|
||||
// we reverse the sign since we order the cheb points from -1 to 1
|
||||
D(i, j) = -(ci * (N - 1) * (N - 1) + 1) / 6.0;
|
||||
} else if (i == N - 1 && j == N - 1) {
|
||||
// we reverse the sign since we order the cheb points from -1 to 1
|
||||
D(i, j) = (ci * (N - 1) * (N - 1) + 1) / 6.0;
|
||||
} else if (i == j) {
|
||||
double xi2 = xi * xi;
|
||||
D(i, j) = -xi / (2 * (1 - xi2));
|
||||
} else {
|
||||
double xj = Point(N, j);
|
||||
double cj = (j == 0 || j == N - 1) ? 2. : 1.;
|
||||
t = ((i + j) % 2) == 0 ? 1 : -1;
|
||||
D(i, j) = (ci / cj) * t / (xi - xj);
|
||||
}
|
||||
}
|
||||
}
|
||||
// scale the matrix to the range
|
||||
return D / ((b - a) / 2.0);
|
||||
}
|
||||
|
||||
Weights Chebyshev2::IntegrationWeights(size_t N, double a, double b) {
|
||||
// Allocate space for weights
|
||||
Weights weights(N);
|
||||
size_t K = N - 1, // number of intervals between N points
|
||||
K2 = K * K;
|
||||
weights(0) = 0.5 * (b - a) / (K2 + K % 2 - 1);
|
||||
weights(N - 1) = weights(0);
|
||||
|
||||
size_t last_k = K / 2 + K % 2 - 1;
|
||||
|
||||
for (size_t i = 1; i <= N - 2; ++i) {
|
||||
double theta = i * M_PI / K;
|
||||
weights(i) = (K % 2 == 0) ? 1 - cos(K * theta) / (K2 - 1) : 1;
|
||||
|
||||
for (size_t k = 1; k <= last_k; ++k)
|
||||
weights(i) -= 2 * cos(2 * k * theta) / (4 * k * k - 1);
|
||||
weights(i) *= (b - a) / K;
|
||||
}
|
||||
|
||||
return weights;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,148 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Chebyshev2.h
|
||||
* @brief Pseudo-spectral parameterization for Chebyshev polynomials of the
|
||||
* second kind.
|
||||
*
|
||||
* In a pseudo-spectral case, rather than the parameters acting as
|
||||
* weights for the bases polynomials (as in Chebyshev2Basis), here the
|
||||
* parameters are the *values* at a specific set of points in the interval, the
|
||||
* "Chebyshev points". These values uniquely determine the polynomial that
|
||||
* interpolates them at the Chebyshev points.
|
||||
*
|
||||
* This is different from Chebyshev.h since it leverage ideas from
|
||||
* pseudo-spectral optimization, i.e. we don't decompose into basis functions,
|
||||
* rather estimate function parameters that enforce function nodes at Chebyshev
|
||||
* points.
|
||||
*
|
||||
* Please refer to Agrawal21icra for more details.
|
||||
*
|
||||
* @author Varun Agrawal, Jing Dong, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
#include <boost/function.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Chebyshev Interpolation on Chebyshev points of the second kind
|
||||
* Note that N here, the number of points, is one less than N from
|
||||
* 'Approximation Theory and Approximation Practice by L. N. Trefethen (pg.42)'.
|
||||
*/
|
||||
class GTSAM_EXPORT Chebyshev2 : public Basis<Chebyshev2> {
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
using Base = Basis<Chebyshev2>;
|
||||
using Parameters = Eigen::Matrix<double, /*Nx1*/ -1, 1>;
|
||||
using DiffMatrix = Eigen::Matrix<double, /*NxN*/ -1, -1>;
|
||||
|
||||
/// Specific Chebyshev point
|
||||
static double Point(size_t N, int j) {
|
||||
assert(j >= 0 && size_t(j) < N);
|
||||
const double dtheta = M_PI / (N > 1 ? (N - 1) : 1);
|
||||
// We add -PI so that we get values ordered from -1 to +1
|
||||
// sin(- M_PI_2 + dtheta*j); also works
|
||||
return cos(-M_PI + dtheta * j);
|
||||
}
|
||||
|
||||
/// Specific Chebyshev point, within [a,b] interval
|
||||
static double Point(size_t N, int j, double a, double b) {
|
||||
assert(j >= 0 && size_t(j) < N);
|
||||
const double dtheta = M_PI / (N - 1);
|
||||
// We add -PI so that we get values ordered from -1 to +1
|
||||
return a + (b - a) * (1. + cos(-M_PI + dtheta * j)) / 2;
|
||||
}
|
||||
|
||||
/// All Chebyshev points
|
||||
static Vector Points(size_t N) {
|
||||
Vector points(N);
|
||||
for (size_t j = 0; j < N; j++) points(j) = Point(N, j);
|
||||
return points;
|
||||
}
|
||||
|
||||
/// All Chebyshev points, within [a,b] interval
|
||||
static Vector Points(size_t N, double a, double b) {
|
||||
Vector points = Points(N);
|
||||
const double T1 = (a + b) / 2, T2 = (b - a) / 2;
|
||||
points = T1 + (T2 * points).array();
|
||||
return points;
|
||||
}
|
||||
|
||||
/**
|
||||
* Evaluate Chebyshev Weights on [-1,1] at any x up to order N-1 (N values)
|
||||
* These weights implement barycentric interpolation at a specific x.
|
||||
* More precisely, f(x) ~ [w0;...;wN] * [f0;...;fN], where the fj are the
|
||||
* values of the function f at the Chebyshev points. As such, for a given x we
|
||||
* obtain a linear map from parameter vectors f to interpolated values f(x).
|
||||
* Optional [a,b] interval can be specified as well.
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/**
|
||||
* Evaluate derivative of barycentric weights.
|
||||
* This is easy and efficient via the DifferentiationMatrix.
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/// compute D = differentiation matrix, Trefethen00book p.53
|
||||
/// when given a parameter vector f of function values at the Chebyshev
|
||||
/// points, D*f are the values of f'.
|
||||
/// https://people.maths.ox.ac.uk/trefethen/8all.pdf Theorem 8.4
|
||||
static DiffMatrix DifferentiationMatrix(size_t N, double a = -1,
|
||||
double b = 1);
|
||||
|
||||
/**
|
||||
* Evaluate Clenshaw-Curtis integration weights.
|
||||
* Trefethen00book, pg 128, clencurt.m
|
||||
* Note that N in clencurt.m is 1 less than our N
|
||||
* K = N-1;
|
||||
theta = pi*(0:K)'/K;
|
||||
w = zeros(1,N); ii = 2:K; v = ones(K-1, 1);
|
||||
if mod(K,2) == 0
|
||||
w(1) = 1/(K^2-1); w(N) = w(1);
|
||||
for k=1:K/2-1, v = v-2*cos(2*k*theta(ii))/(4*k^2-1); end
|
||||
v = v - cos(K*theta(ii))/(K^2-1);
|
||||
else
|
||||
w(1) = 1/K^2; w(N) = w(1);
|
||||
for k=1:K/2, v = v-2*cos(2*k*theta(ii))/(4*k^2-1); end
|
||||
end
|
||||
w(ii) = 2*v/K;
|
||||
|
||||
*/
|
||||
static Weights IntegrationWeights(size_t N, double a = -1, double b = 1);
|
||||
|
||||
/**
|
||||
* Create matrix of values at Chebyshev points given vector-valued function.
|
||||
*/
|
||||
template <size_t M>
|
||||
static Matrix matrix(boost::function<Eigen::Matrix<double, M, 1>(double)> f,
|
||||
size_t N, double a = -1, double b = 1) {
|
||||
Matrix Xmat(M, N);
|
||||
for (size_t j = 0; j < N; j++) {
|
||||
Xmat.col(j) = f(Point(N, j, a, b));
|
||||
}
|
||||
return Xmat;
|
||||
}
|
||||
}; // \ Chebyshev2
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,99 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file FitBasis.h
|
||||
* @date July 4, 2020
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @brief Fit a Basis using least-squares
|
||||
*/
|
||||
|
||||
/*
|
||||
* Concept needed for LS. Parameters = Coefficients | Values
|
||||
* - Parameters, Jacobian
|
||||
* - PredictFactor(double x)(Parameters p, OptionalJacobian<1,N> H)
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/basis/Basis.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/// Our sequence representation is a map of {x: y} values where y = f(x)
|
||||
using Sequence = std::map<double, double>;
|
||||
/// A sample is a key-value pair from a sequence.
|
||||
using Sample = std::pair<double, double>;
|
||||
|
||||
/**
|
||||
* Class that does regression via least squares
|
||||
* Example usage:
|
||||
* size_t N = 3;
|
||||
* auto fit = FitBasis<Chebyshev2>(data_points, noise_model, N);
|
||||
* Vector coefficients = fit.parameters();
|
||||
*
|
||||
* where `data_points` are a map from `x` to `y` values indicating a function
|
||||
* mapping at specific points, `noise_model` is the gaussian noise model, and
|
||||
* `N` is the degree of the polynomial basis used to fit the function.
|
||||
*/
|
||||
template <class Basis>
|
||||
class FitBasis {
|
||||
public:
|
||||
using Parameters = typename Basis::Parameters;
|
||||
|
||||
private:
|
||||
Parameters parameters_;
|
||||
|
||||
public:
|
||||
/// Create nonlinear FG from Sequence
|
||||
static NonlinearFactorGraph NonlinearGraph(const Sequence& sequence,
|
||||
const SharedNoiseModel& model,
|
||||
size_t N) {
|
||||
NonlinearFactorGraph graph;
|
||||
for (const Sample sample : sequence) {
|
||||
graph.emplace_shared<EvaluationFactor<Basis>>(0, sample.second, model, N,
|
||||
sample.first);
|
||||
}
|
||||
return graph;
|
||||
}
|
||||
|
||||
/// Create linear FG from Sequence
|
||||
static GaussianFactorGraph::shared_ptr LinearGraph(
|
||||
const Sequence& sequence, const SharedNoiseModel& model, size_t N) {
|
||||
NonlinearFactorGraph graph = NonlinearGraph(sequence, model, N);
|
||||
Values values;
|
||||
values.insert<Parameters>(0, Parameters::Zero(N));
|
||||
GaussianFactorGraph::shared_ptr gfg = graph.linearize(values);
|
||||
return gfg;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a new FitBasis object.
|
||||
*
|
||||
* @param sequence map of x->y values for a function, a.k.a. y = f(x).
|
||||
* @param model The noise model to use.
|
||||
* @param N The degree of the polynomial to fit.
|
||||
*/
|
||||
FitBasis(const Sequence& sequence, const SharedNoiseModel& model, size_t N) {
|
||||
GaussianFactorGraph::shared_ptr gfg = LinearGraph(sequence, model, N);
|
||||
VectorValues solution = gfg->optimize();
|
||||
parameters_ = solution.at(0);
|
||||
}
|
||||
|
||||
/// Return Fourier coefficients
|
||||
Parameters parameters() const { return parameters_; }
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,112 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Fourier.h
|
||||
* @brief Fourier decomposition, see e.g.
|
||||
* http://mathworld.wolfram.com/FourierSeries.html
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @date July 4, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/// Fourier basis
|
||||
class GTSAM_EXPORT FourierBasis : public Basis<FourierBasis> {
|
||||
public:
|
||||
using Parameters = Eigen::Matrix<double, /*Nx1*/ -1, 1>;
|
||||
using DiffMatrix = Eigen::Matrix<double, /*NxN*/ -1, -1>;
|
||||
|
||||
/**
|
||||
* @brief Evaluate Real Fourier Weights of size N in interval [a, b],
|
||||
* e.g. N=5 yields bases: 1, cos(x), sin(x), cos(2*x), sin(2*x)
|
||||
*
|
||||
* @param N The degree of the polynomial to use.
|
||||
* @param x The point at which to compute the derivaive weights.
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x) {
|
||||
Weights b(N);
|
||||
b[0] = 1;
|
||||
for (size_t i = 1, n = 1; i < N; i++) {
|
||||
if (i % 2 == 1) {
|
||||
b[i] = cos(n * x);
|
||||
} else {
|
||||
b[i] = sin(n * x);
|
||||
n++;
|
||||
}
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Evaluate Real Fourier Weights of size N in interval [a, b],
|
||||
* e.g. N=5 yields bases: 1, cos(x), sin(x), cos(2*x), sin(2*x)
|
||||
*
|
||||
* @param N The degree of the polynomial to use.
|
||||
* @param x The point at which to compute the weights.
|
||||
* @param a Lower bound of interval.
|
||||
* @param b Upper bound of interval.
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights CalculateWeights(size_t N, double x, double a, double b) {
|
||||
// TODO(Varun) How do we enforce an interval for Fourier series?
|
||||
return CalculateWeights(N, x);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute D = differentiation matrix.
|
||||
* Given coefficients c of a Fourier series c, D*c are the values of c'.
|
||||
*/
|
||||
static DiffMatrix DifferentiationMatrix(size_t N) {
|
||||
DiffMatrix D = DiffMatrix::Zero(N, N);
|
||||
double k = 1;
|
||||
for (size_t i = 1; i < N; i += 2) {
|
||||
D(i, i + 1) = k; // sin'(k*x) = k*cos(k*x)
|
||||
D(i + 1, i) = -k; // cos'(k*x) = -k*sin(k*x)
|
||||
k += 1;
|
||||
}
|
||||
|
||||
return D;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get weights at a given x that calculate the derivative.
|
||||
*
|
||||
* @param N The degree of the polynomial to use.
|
||||
* @param x The point at which to compute the derivaive weights.
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x) {
|
||||
return CalculateWeights(N, x) * DifferentiationMatrix(N);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get derivative weights at a given x that calculate the derivative,
|
||||
in the interval [a, b].
|
||||
*
|
||||
* @param N The degree of the polynomial to use.
|
||||
* @param x The point at which to compute the derivaive weights.
|
||||
* @param a Lower bound of interval.
|
||||
* @param b Upper bound of interval.
|
||||
* @return Weights
|
||||
*/
|
||||
static Weights DerivativeWeights(size_t N, double x, double a, double b) {
|
||||
return CalculateWeights(N, x, a, b) * DifferentiationMatrix(N);
|
||||
}
|
||||
|
||||
}; // FourierBasis
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,215 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file ParamaterMatrix.h
|
||||
* @brief Define ParameterMatrix class which is used to store values at
|
||||
* interpolation points.
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @date September 21, 2020
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* A matrix abstraction of MxN values at the Basis points.
|
||||
* This class serves as a wrapper over an Eigen matrix.
|
||||
* @tparam M: The dimension of the type you wish to evaluate.
|
||||
* @param N: the number of Basis points (e.g. Chebyshev points of the second
|
||||
* kind).
|
||||
*/
|
||||
template <int M>
|
||||
class ParameterMatrix {
|
||||
using MatrixType = Eigen::Matrix<double, M, -1>;
|
||||
|
||||
private:
|
||||
MatrixType matrix_;
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
enum { dimension = Eigen::Dynamic };
|
||||
|
||||
/**
|
||||
* Create ParameterMatrix using the number of basis points.
|
||||
* @param N: The number of basis points (the columns).
|
||||
*/
|
||||
ParameterMatrix(const size_t N) : matrix_(M, N) { matrix_.setZero(); }
|
||||
|
||||
/**
|
||||
* Create ParameterMatrix from an MxN Eigen Matrix.
|
||||
* @param matrix: An Eigen matrix used to initialze the ParameterMatrix.
|
||||
*/
|
||||
ParameterMatrix(const MatrixType& matrix) : matrix_(matrix) {}
|
||||
|
||||
/// Get the number of rows.
|
||||
size_t rows() const { return matrix_.rows(); }
|
||||
|
||||
/// Get the number of columns.
|
||||
size_t cols() const { return matrix_.cols(); }
|
||||
|
||||
/// Get the underlying matrix.
|
||||
MatrixType matrix() const { return matrix_; }
|
||||
|
||||
/// Return the tranpose of the underlying matrix.
|
||||
Eigen::Matrix<double, -1, M> transpose() const { return matrix_.transpose(); }
|
||||
|
||||
/**
|
||||
* Get the matrix row specified by `index`.
|
||||
* @param index: The row index to retrieve.
|
||||
*/
|
||||
Eigen::Matrix<double, 1, -1> row(size_t index) const {
|
||||
return matrix_.row(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matrix row specified by `index`.
|
||||
* @param index: The row index to set.
|
||||
*/
|
||||
auto row(size_t index) -> Eigen::Block<MatrixType, 1, -1, false> {
|
||||
return matrix_.row(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the matrix column specified by `index`.
|
||||
* @param index: The column index to retrieve.
|
||||
*/
|
||||
Eigen::Matrix<double, M, 1> col(size_t index) const {
|
||||
return matrix_.col(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matrix column specified by `index`.
|
||||
* @param index: The column index to set.
|
||||
*/
|
||||
auto col(size_t index) -> Eigen::Block<MatrixType, M, 1, true> {
|
||||
return matrix_.col(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set all matrix coefficients to zero.
|
||||
*/
|
||||
void setZero() { matrix_.setZero(); }
|
||||
|
||||
/**
|
||||
* Add a ParameterMatrix to another.
|
||||
* @param other: ParameterMatrix to add.
|
||||
*/
|
||||
ParameterMatrix<M> operator+(const ParameterMatrix<M>& other) const {
|
||||
return ParameterMatrix<M>(matrix_ + other.matrix());
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a MxN-sized vector to the ParameterMatrix.
|
||||
* @param other: Vector which is reshaped and added.
|
||||
*/
|
||||
ParameterMatrix<M> operator+(
|
||||
const Eigen::Matrix<double, -1, 1>& other) const {
|
||||
// This form avoids a deep copy and instead typecasts `other`.
|
||||
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
|
||||
return ParameterMatrix<M>(matrix_ + other_);
|
||||
}
|
||||
|
||||
/**
|
||||
* Subtract a ParameterMatrix from another.
|
||||
* @param other: ParameterMatrix to subtract.
|
||||
*/
|
||||
ParameterMatrix<M> operator-(const ParameterMatrix<M>& other) const {
|
||||
return ParameterMatrix<M>(matrix_ - other.matrix());
|
||||
}
|
||||
|
||||
/**
|
||||
* Subtract a MxN-sized vector from the ParameterMatrix.
|
||||
* @param other: Vector which is reshaped and subracted.
|
||||
*/
|
||||
ParameterMatrix<M> operator-(
|
||||
const Eigen::Matrix<double, -1, 1>& other) const {
|
||||
Eigen::Map<const MatrixType> other_(other.data(), M, cols());
|
||||
return ParameterMatrix<M>(matrix_ - other_);
|
||||
}
|
||||
|
||||
/**
|
||||
* Multiply ParameterMatrix with an Eigen matrix.
|
||||
* @param other: Eigen matrix which should be multiplication compatible with
|
||||
* the ParameterMatrix.
|
||||
*/
|
||||
MatrixType operator*(const Eigen::Matrix<double, -1, -1>& other) const {
|
||||
return matrix_ * other;
|
||||
}
|
||||
|
||||
/// @name Vector Space requirements, following LieMatrix
|
||||
/// @{
|
||||
|
||||
/**
|
||||
* Print the ParameterMatrix.
|
||||
* @param s: The prepend string to add more contextual info.
|
||||
*/
|
||||
void print(const std::string& s = "") const {
|
||||
std::cout << (s == "" ? s : s + " ") << matrix_ << std::endl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for equality up to absolute tolerance.
|
||||
* @param other: The ParameterMatrix to check equality with.
|
||||
* @param tol: The absolute tolerance threshold.
|
||||
*/
|
||||
bool equals(const ParameterMatrix<M>& other, double tol = 1e-8) const {
|
||||
return gtsam::equal_with_abs_tol(matrix_, other.matrix(), tol);
|
||||
}
|
||||
|
||||
/// Returns dimensionality of the tangent space
|
||||
inline size_t dim() const { return matrix_.size(); }
|
||||
|
||||
/// Convert to vector form, is done row-wise
|
||||
inline Vector vector() const {
|
||||
using RowMajor = Eigen::Matrix<double, -1, -1, Eigen::RowMajor>;
|
||||
Vector result(matrix_.size());
|
||||
Eigen::Map<RowMajor>(&result(0), rows(), cols()) = matrix_;
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Identity function to satisfy VectorSpace traits.
|
||||
*
|
||||
* NOTE: The size at compile time is unknown so this identity is zero
|
||||
* length and thus not valid.
|
||||
*/
|
||||
inline static ParameterMatrix identity() {
|
||||
// throw std::runtime_error(
|
||||
// "ParameterMatrix::identity(): Don't use this function");
|
||||
return ParameterMatrix(0);
|
||||
}
|
||||
|
||||
/// @}
|
||||
};
|
||||
|
||||
// traits for ParameterMatrix
|
||||
template <int M>
|
||||
struct traits<ParameterMatrix<M>>
|
||||
: public internal::VectorSpace<ParameterMatrix<M>> {};
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Stream operator that takes a ParameterMatrix. Used for printing.
|
||||
template <int M>
|
||||
inline std::ostream& operator<<(std::ostream& os,
|
||||
const ParameterMatrix<M>& parameterMatrix) {
|
||||
os << parameterMatrix.matrix();
|
||||
return os;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,146 @@
|
|||
//*************************************************************************
|
||||
// basis
|
||||
//*************************************************************************
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// TODO(gerry): add all the Functors to the Basis interfaces, e.g.
|
||||
// `EvaluationFunctor`
|
||||
|
||||
#include <gtsam/basis/Fourier.h>
|
||||
|
||||
class FourierBasis {
|
||||
static Vector CalculateWeights(size_t N, double x);
|
||||
static Matrix WeightMatrix(size_t N, Vector x);
|
||||
|
||||
static Matrix DifferentiationMatrix(size_t N);
|
||||
static Vector DerivativeWeights(size_t N, double x);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/Chebyshev.h>
|
||||
|
||||
class Chebyshev1Basis {
|
||||
static Matrix CalculateWeights(size_t N, double x);
|
||||
static Matrix WeightMatrix(size_t N, Vector X);
|
||||
};
|
||||
|
||||
class Chebyshev2Basis {
|
||||
static Matrix CalculateWeights(size_t N, double x);
|
||||
static Matrix WeightMatrix(size_t N, Vector x);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
class Chebyshev2 {
|
||||
static double Point(size_t N, int j);
|
||||
static double Point(size_t N, int j, double a, double b);
|
||||
|
||||
static Vector Points(size_t N);
|
||||
static Vector Points(size_t N, double a, double b);
|
||||
|
||||
static Matrix WeightMatrix(size_t N, Vector X);
|
||||
static Matrix WeightMatrix(size_t N, Vector X, double a, double b);
|
||||
|
||||
static Matrix CalculateWeights(size_t N, double x, double a, double b);
|
||||
static Matrix DerivativeWeights(size_t N, double x, double a, double b);
|
||||
static Matrix IntegrationWeights(size_t N, double a, double b);
|
||||
static Matrix DifferentiationMatrix(size_t N, double a, double b);
|
||||
|
||||
// TODO Needs OptionalJacobian
|
||||
// static double Derivative(double x, Vector f);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
||||
template <M = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}>
|
||||
class ParameterMatrix {
|
||||
ParameterMatrix(const size_t N);
|
||||
ParameterMatrix(const Matrix& matrix);
|
||||
|
||||
Matrix matrix() const;
|
||||
|
||||
void print(const string& s = "") const;
|
||||
};
|
||||
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
|
||||
template <BASIS = {gtsam::Chebyshev2, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::FourierBasis}>
|
||||
virtual class EvaluationFactor : gtsam::NoiseModelFactor {
|
||||
EvaluationFactor();
|
||||
EvaluationFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x);
|
||||
EvaluationFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x, double a, double b);
|
||||
};
|
||||
|
||||
template <BASIS, M>
|
||||
virtual class VectorEvaluationFactor : gtsam::NoiseModelFactor {
|
||||
VectorEvaluationFactor();
|
||||
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x);
|
||||
VectorEvaluationFactor(gtsam::Key key, const Vector& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x, double a, double b);
|
||||
};
|
||||
|
||||
// TODO(Varun) Better way to support arbitrary dimensions?
|
||||
// Especially if users mainly do `pip install gtsam` for the Python wrapper.
|
||||
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 3>
|
||||
VectorEvaluationFactorChebyshev2D3;
|
||||
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 4>
|
||||
VectorEvaluationFactorChebyshev2D4;
|
||||
typedef gtsam::VectorEvaluationFactor<gtsam::Chebyshev2, 12>
|
||||
VectorEvaluationFactorChebyshev2D12;
|
||||
|
||||
template <BASIS, P>
|
||||
virtual class VectorComponentFactor : gtsam::NoiseModelFactor {
|
||||
VectorComponentFactor();
|
||||
VectorComponentFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
size_t i, double x);
|
||||
VectorComponentFactor(gtsam::Key key, const double z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
size_t i, double x, double a, double b);
|
||||
};
|
||||
|
||||
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 3>
|
||||
VectorComponentFactorChebyshev2D3;
|
||||
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 4>
|
||||
VectorComponentFactorChebyshev2D4;
|
||||
typedef gtsam::VectorComponentFactor<gtsam::Chebyshev2, 12>
|
||||
VectorComponentFactorChebyshev2D12;
|
||||
|
||||
template <BASIS, T>
|
||||
virtual class ManifoldEvaluationFactor : gtsam::NoiseModelFactor {
|
||||
ManifoldEvaluationFactor();
|
||||
ManifoldEvaluationFactor(gtsam::Key key, const T& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x);
|
||||
ManifoldEvaluationFactor(gtsam::Key key, const T& z,
|
||||
const gtsam::noiseModel::Base* model, const size_t N,
|
||||
double x, double a, double b);
|
||||
};
|
||||
|
||||
// TODO(gerry): Add `DerivativeFactor`, `VectorDerivativeFactor`, and
|
||||
// `ComponentDerivativeFactor`
|
||||
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
template <BASIS = {gtsam::FourierBasis, gtsam::Chebyshev1Basis,
|
||||
gtsam::Chebyshev2Basis, gtsam::Chebyshev2}>
|
||||
class FitBasis {
|
||||
FitBasis(const std::map<double, double>& sequence,
|
||||
const gtsam::noiseModel::Base* model, size_t N);
|
||||
|
||||
static gtsam::NonlinearFactorGraph NonlinearGraph(
|
||||
const std::map<double, double>& sequence,
|
||||
const gtsam::noiseModel::Base* model, size_t N);
|
||||
static gtsam::GaussianFactorGraph::shared_ptr LinearGraph(
|
||||
const std::map<double, double>& sequence,
|
||||
const gtsam::noiseModel::Base* model, size_t N);
|
||||
Parameters parameters() const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1 @@
|
|||
gtsamAddTestsGlob(basis "test*.cpp" "" "gtsam")
|
|
@ -0,0 +1,236 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testChebyshev.cpp
|
||||
* @date July 4, 2020
|
||||
* @author Varun Agrawal
|
||||
* @brief Unit tests for Chebyshev Basis Decompositions
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/Chebyshev.h>
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
auto model = noiseModel::Unit::Create(1);
|
||||
|
||||
const size_t N = 3;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Chebyshev1) {
|
||||
using Synth = Chebyshev1Basis::EvaluationFunctor;
|
||||
Vector c(N);
|
||||
double x;
|
||||
c << 12, 3, 1;
|
||||
x = -1.0;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 3 * x + 2 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
x = -0.5;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 3 * x + 2 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
x = 0.3;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 3 * x + 2 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Chebyshev2) {
|
||||
using Synth = Chebyshev2Basis::EvaluationFunctor;
|
||||
Vector c(N);
|
||||
double x;
|
||||
c << 12, 3, 1;
|
||||
x = -1.0;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 6 * x + 4 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
x = -0.5;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 6 * x + 4 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
x = 0.3;
|
||||
EXPECT_DOUBLES_EQUAL(12 + 6 * x + 4 * x * x - 1, Synth(N, x)(c), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Evaluation) {
|
||||
Chebyshev1Basis::EvaluationFunctor fx(N, 0.5);
|
||||
Vector c(N);
|
||||
c << 3, 5, -12;
|
||||
EXPECT_DOUBLES_EQUAL(11.5, fx(c), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
#include <gtsam/nonlinear/Marginals.h>
|
||||
TEST(Chebyshev, Expression) {
|
||||
// Create linear factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
Key key(1);
|
||||
|
||||
// Let's pretend we have 6 GPS measurements (we just do x coordinate)
|
||||
// at times
|
||||
const size_t m = 6;
|
||||
Vector t(m);
|
||||
t << -0.7, -0.4, 0.1, 0.3, 0.7, 0.9;
|
||||
Vector x(m);
|
||||
x << -0.7, -0.4, 0.1, 0.3, 0.7, 0.9;
|
||||
|
||||
for (size_t i = 0; i < m; i++) {
|
||||
graph.emplace_shared<EvaluationFactor<Chebyshev1Basis>>(key, x(i), model, N,
|
||||
t(i));
|
||||
}
|
||||
|
||||
// Solve
|
||||
Values initial;
|
||||
initial.insert<Vector>(key, Vector::Zero(N)); // initial does not matter
|
||||
|
||||
// ... and optimize
|
||||
GaussNewtonParams parameters;
|
||||
GaussNewtonOptimizer optimizer(graph, initial, parameters);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
// Check
|
||||
Vector expected(N);
|
||||
expected << 0, 1, 0;
|
||||
Vector actual_c = result.at<Vector>(key);
|
||||
EXPECT(assert_equal(expected, actual_c));
|
||||
|
||||
// Calculate and print covariances
|
||||
Marginals marginals(graph, result);
|
||||
Matrix3 cov = marginals.marginalCovariance(key);
|
||||
EXPECT_DOUBLES_EQUAL(0.626, cov(1, 1), 1e-3);
|
||||
|
||||
// Predict x at time 1.0
|
||||
Chebyshev1Basis::EvaluationFunctor f(N, 1.0);
|
||||
Matrix H;
|
||||
double actual = f(actual_c, H);
|
||||
EXPECT_DOUBLES_EQUAL(1.0, actual, 1e-9);
|
||||
|
||||
// Calculate predictive variance on prediction
|
||||
double actual_variance_on_prediction = (H * cov * H.transpose())(0);
|
||||
EXPECT_DOUBLES_EQUAL(1.1494, actual_variance_on_prediction, 1e-4);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Decomposition) {
|
||||
const size_t M = 16;
|
||||
|
||||
// Create example sequence
|
||||
Sequence sequence;
|
||||
for (size_t i = 0; i < M; i++) {
|
||||
double x = ((double)i / M); // - 0.99;
|
||||
double y = x;
|
||||
sequence[x] = y;
|
||||
}
|
||||
|
||||
// Do Chebyshev Decomposition
|
||||
FitBasis<Chebyshev1Basis> actual(sequence, model, N);
|
||||
|
||||
// Check
|
||||
Vector expected = Vector::Zero(N);
|
||||
expected(1) = 1;
|
||||
EXPECT(assert_equal(expected, (Vector)actual.parameters(), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev1, Derivative) {
|
||||
Vector c(N);
|
||||
c << 12, 3, 2;
|
||||
|
||||
Weights D;
|
||||
|
||||
double x = -1.0;
|
||||
D = Chebyshev1Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(-5, (D * c)(0), 1e-9);
|
||||
|
||||
x = -0.5;
|
||||
D = Chebyshev1Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(-1, (D * c)(0), 1e-9);
|
||||
|
||||
x = 0.3;
|
||||
D = Chebyshev1Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(5.4, (D * c)(0), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
Vector3 f(-6, 1, 0.5);
|
||||
|
||||
double proxy1(double x, size_t N) {
|
||||
return Chebyshev1Basis::EvaluationFunctor(N, x)(Vector(f));
|
||||
}
|
||||
|
||||
TEST(Chebyshev1, Derivative2) {
|
||||
const double x = 0.5;
|
||||
auto D = Chebyshev1Basis::DerivativeWeights(N, x);
|
||||
|
||||
Matrix numeric_dTdx =
|
||||
numericalDerivative21<double, double, double>(proxy1, x, N);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(2, numeric_dTdx(0, 0), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(2, (D * f)(0), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Derivative) {
|
||||
Vector c(N);
|
||||
c << 12, 6, 2;
|
||||
|
||||
Weights D;
|
||||
|
||||
double x = -1.0;
|
||||
CHECK_EXCEPTION(Chebyshev2Basis::DerivativeWeights(N, x), std::runtime_error);
|
||||
x = 1.0;
|
||||
CHECK_EXCEPTION(Chebyshev2Basis::DerivativeWeights(N, x), std::runtime_error);
|
||||
|
||||
x = -0.5;
|
||||
D = Chebyshev2Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(4, (D * c)(0), 1e-9);
|
||||
|
||||
x = 0.3;
|
||||
D = Chebyshev2Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(16.8, (D * c)(0), 1e-9);
|
||||
|
||||
x = 0.75;
|
||||
D = Chebyshev2Basis::DerivativeWeights(N, x);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(24, (D * c)(0), 1e-9);
|
||||
|
||||
x = 10;
|
||||
D = Chebyshev2Basis::DerivativeWeights(N, x, 0, 20);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(12, (D * c)(0), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
double proxy2(double x, size_t N) {
|
||||
return Chebyshev2Basis::EvaluationFunctor(N, x)(Vector(f));
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, Derivative2) {
|
||||
const double x = 0.5;
|
||||
auto D = Chebyshev2Basis::DerivativeWeights(N, x);
|
||||
|
||||
Matrix numeric_dTdx =
|
||||
numericalDerivative21<double, double, double>(proxy2, x, N);
|
||||
// regression
|
||||
EXPECT_DOUBLES_EQUAL(4, numeric_dTdx(0, 0), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(4, (D * f)(0), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -0,0 +1,435 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testChebyshev.cpp
|
||||
* @date July 4, 2020
|
||||
* @author Varun Agrawal
|
||||
* @brief Unit tests for Chebyshev Basis Decompositions via pseudo-spectral
|
||||
* methods
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
using namespace boost::placeholders;
|
||||
|
||||
noiseModel::Diagonal::shared_ptr model = noiseModel::Unit::Create(1);
|
||||
|
||||
const size_t N = 32;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Point) {
|
||||
static const int N = 5;
|
||||
auto points = Chebyshev2::Points(N);
|
||||
Vector expected(N);
|
||||
expected << -1., -sqrt(2.) / 2., 0., sqrt(2.) / 2., 1.;
|
||||
static const double tol = 1e-15; // changing this reveals errors
|
||||
EXPECT_DOUBLES_EQUAL(expected(0), points(0), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(1), points(1), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(2), points(2), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(3), points(3), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(4), points(4), tol);
|
||||
|
||||
// Check symmetry
|
||||
EXPECT_DOUBLES_EQUAL(Chebyshev2::Point(N, 0), -Chebyshev2::Point(N, 4), tol);
|
||||
EXPECT_DOUBLES_EQUAL(Chebyshev2::Point(N, 1), -Chebyshev2::Point(N, 3), tol);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, PointInInterval) {
|
||||
static const int N = 5;
|
||||
auto points = Chebyshev2::Points(N, 0, 20);
|
||||
Vector expected(N);
|
||||
expected << 0., 1. - sqrt(2.) / 2., 1., 1. + sqrt(2.) / 2., 2.;
|
||||
expected *= 10.0;
|
||||
static const double tol = 1e-15; // changing this reveals errors
|
||||
EXPECT_DOUBLES_EQUAL(expected(0), points(0), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(1), points(1), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(2), points(2), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(3), points(3), tol);
|
||||
EXPECT_DOUBLES_EQUAL(expected(4), points(4), tol);
|
||||
|
||||
// all at once
|
||||
Vector actual = Chebyshev2::Points(N, 0, 20);
|
||||
CHECK(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// InterpolatingPolynomial[{{-1, 4}, {0, 2}, {1, 6}}, 0.5]
|
||||
TEST(Chebyshev2, Interpolate2) {
|
||||
size_t N = 3;
|
||||
Chebyshev2::EvaluationFunctor fx(N, 0.5);
|
||||
Vector f(N);
|
||||
f << 4, 2, 6;
|
||||
EXPECT_DOUBLES_EQUAL(3.25, fx(f), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// InterpolatingPolynomial[{{0, 4}, {1, 2}, {2, 6}}, 1.5]
|
||||
TEST(Chebyshev2, Interpolate2_Interval) {
|
||||
Chebyshev2::EvaluationFunctor fx(3, 1.5, 0, 2);
|
||||
Vector3 f(4, 2, 6);
|
||||
EXPECT_DOUBLES_EQUAL(3.25, fx(f), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// InterpolatingPolynomial[{{-1, 4}, {-Sqrt[2]/2, 2}, {0, 6}, {Sqrt[2]/2,3}, {1,
|
||||
// 3}}, 0.5]
|
||||
TEST(Chebyshev2, Interpolate5) {
|
||||
Chebyshev2::EvaluationFunctor fx(5, 0.5);
|
||||
Vector f(5);
|
||||
f << 4, 2, 6, 3, 3;
|
||||
EXPECT_DOUBLES_EQUAL(4.34283, fx(f), 1e-5);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Interpolating vectors
|
||||
TEST(Chebyshev2, InterpolateVector) {
|
||||
double t = 30, a = 0, b = 100;
|
||||
const size_t N = 3;
|
||||
// Create 2x3 matrix with Vectors at Chebyshev points
|
||||
ParameterMatrix<2> X(N);
|
||||
X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp
|
||||
|
||||
// Check value
|
||||
Vector expected(2);
|
||||
expected << t, 0;
|
||||
Eigen::Matrix<double, /*2x2N*/ -1, -1> actualH(2, 2 * N);
|
||||
|
||||
Chebyshev2::VectorEvaluationFunctor<2> fx(N, t, a, b);
|
||||
EXPECT(assert_equal(expected, fx(X, actualH), 1e-9));
|
||||
|
||||
// Check derivative
|
||||
boost::function<Vector2(ParameterMatrix<2>)> f = boost::bind(
|
||||
&Chebyshev2::VectorEvaluationFunctor<2>::operator(), fx, _1, boost::none);
|
||||
Matrix numericalH =
|
||||
numericalDerivative11<Vector2, ParameterMatrix<2>, 2 * N>(f, X);
|
||||
EXPECT(assert_equal(numericalH, actualH, 1e-9));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Decomposition) {
|
||||
// Create example sequence
|
||||
Sequence sequence;
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
double x = (double)i / 16. - 0.99, y = x;
|
||||
sequence[x] = y;
|
||||
}
|
||||
|
||||
// Do Chebyshev Decomposition
|
||||
FitBasis<Chebyshev2> actual(sequence, model, 3);
|
||||
|
||||
// Check
|
||||
Vector expected(3);
|
||||
expected << -1, 0, 1;
|
||||
EXPECT(assert_equal(expected, actual.parameters(), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, DifferentiationMatrix3) {
|
||||
// Trefethen00book, p.55
|
||||
const size_t N = 3;
|
||||
Matrix expected(N, N);
|
||||
// Differentiation matrix computed from Chebfun
|
||||
expected << 1.5000, -2.0000, 0.5000, //
|
||||
0.5000, -0.0000, -0.5000, //
|
||||
-0.5000, 2.0000, -1.5000;
|
||||
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
|
||||
// This was verified with chebfun
|
||||
expected = -expected;
|
||||
|
||||
Matrix actual = Chebyshev2::DifferentiationMatrix(N);
|
||||
EXPECT(assert_equal(expected, actual, 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, DerivativeMatrix6) {
|
||||
// Trefethen00book, p.55
|
||||
const size_t N = 6;
|
||||
Matrix expected(N, N);
|
||||
expected << 8.5000, -10.4721, 2.8944, -1.5279, 1.1056, -0.5000, //
|
||||
2.6180, -1.1708, -2.0000, 0.8944, -0.6180, 0.2764, //
|
||||
-0.7236, 2.0000, -0.1708, -1.6180, 0.8944, -0.3820, //
|
||||
0.3820, -0.8944, 1.6180, 0.1708, -2.0000, 0.7236, //
|
||||
-0.2764, 0.6180, -0.8944, 2.0000, 1.1708, -2.6180, //
|
||||
0.5000, -1.1056, 1.5279, -2.8944, 10.4721, -8.5000;
|
||||
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
|
||||
// This was verified with chebfun
|
||||
expected = -expected;
|
||||
|
||||
Matrix actual = Chebyshev2::DifferentiationMatrix(N);
|
||||
EXPECT(assert_equal((Matrix)expected, actual, 1e-4));
|
||||
}
|
||||
|
||||
// test function for CalculateWeights and DerivativeWeights
|
||||
double f(double x) {
|
||||
// return 3*(x**3) - 2*(x**2) + 5*x - 11
|
||||
return 3.0 * pow(x, 3) - 2.0 * pow(x, 2) + 5.0 * x - 11;
|
||||
}
|
||||
|
||||
// its derivative
|
||||
double fprime(double x) {
|
||||
// return 9*(x**2) - 4*(x) + 5
|
||||
return 9.0 * pow(x, 2) - 4.0 * x + 5.0;
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, CalculateWeights) {
|
||||
Eigen::Matrix<double, -1, 1> fvals(N);
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
fvals(i) = f(Chebyshev2::Point(N, i));
|
||||
}
|
||||
double x1 = 0.7, x2 = -0.376;
|
||||
Weights weights1 = Chebyshev2::CalculateWeights(N, x1);
|
||||
Weights weights2 = Chebyshev2::CalculateWeights(N, x2);
|
||||
EXPECT_DOUBLES_EQUAL(f(x1), weights1 * fvals, 1e-8);
|
||||
EXPECT_DOUBLES_EQUAL(f(x2), weights2 * fvals, 1e-8);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, CalculateWeights2) {
|
||||
double a = 0, b = 10, x1 = 7, x2 = 4.12;
|
||||
|
||||
Eigen::Matrix<double, -1, 1> fvals(N);
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
fvals(i) = f(Chebyshev2::Point(N, i, a, b));
|
||||
}
|
||||
|
||||
Weights weights1 = Chebyshev2::CalculateWeights(N, x1, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(f(x1), weights1 * fvals, 1e-8);
|
||||
|
||||
Weights weights2 = Chebyshev2::CalculateWeights(N, x2, a, b);
|
||||
double expected2 = f(x2); // 185.454784
|
||||
double actual2 = weights2 * fvals;
|
||||
EXPECT_DOUBLES_EQUAL(expected2, actual2, 1e-8);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, DerivativeWeights) {
|
||||
Eigen::Matrix<double, -1, 1> fvals(N);
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
fvals(i) = f(Chebyshev2::Point(N, i));
|
||||
}
|
||||
double x1 = 0.7, x2 = -0.376, x3 = 0.0;
|
||||
Weights dWeights1 = Chebyshev2::DerivativeWeights(N, x1);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x1), dWeights1 * fvals, 1e-9);
|
||||
|
||||
Weights dWeights2 = Chebyshev2::DerivativeWeights(N, x2);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x2), dWeights2 * fvals, 1e-9);
|
||||
|
||||
Weights dWeights3 = Chebyshev2::DerivativeWeights(N, x3);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x3), dWeights3 * fvals, 1e-9);
|
||||
|
||||
// test if derivative calculation and cheb point is correct
|
||||
double x4 = Chebyshev2::Point(N, 3);
|
||||
Weights dWeights4 = Chebyshev2::DerivativeWeights(N, x4);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x4), dWeights4 * fvals, 1e-9);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, DerivativeWeights2) {
|
||||
double x1 = 5, x2 = 4.12, a = 0, b = 10;
|
||||
|
||||
Eigen::Matrix<double, -1, 1> fvals(N);
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
fvals(i) = f(Chebyshev2::Point(N, i, a, b));
|
||||
}
|
||||
|
||||
Weights dWeights1 = Chebyshev2::DerivativeWeights(N, x1, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x1), dWeights1 * fvals, 1e-8);
|
||||
|
||||
Weights dWeights2 = Chebyshev2::DerivativeWeights(N, x2, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x2), dWeights2 * fvals, 1e-8);
|
||||
|
||||
// test if derivative calculation and cheb point is correct
|
||||
double x3 = Chebyshev2::Point(N, 3, a, b);
|
||||
Weights dWeights3 = Chebyshev2::DerivativeWeights(N, x3, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x3), dWeights3 * fvals, 1e-8);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check two different ways to calculate the derivative weights
|
||||
TEST(Chebyshev2, DerivativeWeightsDifferentiationMatrix) {
|
||||
const size_t N6 = 6;
|
||||
double x1 = 0.311;
|
||||
Matrix D6 = Chebyshev2::DifferentiationMatrix(N6);
|
||||
Weights expected = Chebyshev2::CalculateWeights(N6, x1) * D6;
|
||||
Weights actual = Chebyshev2::DerivativeWeights(N6, x1);
|
||||
EXPECT(assert_equal(expected, actual, 1e-12));
|
||||
|
||||
double a = -3, b = 8, x2 = 5.05;
|
||||
Matrix D6_2 = Chebyshev2::DifferentiationMatrix(N6, a, b);
|
||||
Weights expected1 = Chebyshev2::CalculateWeights(N6, x2, a, b) * D6_2;
|
||||
Weights actual1 = Chebyshev2::DerivativeWeights(N6, x2, a, b);
|
||||
EXPECT(assert_equal(expected1, actual1, 1e-12));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check two different ways to calculate the derivative weights
|
||||
TEST(Chebyshev2, DerivativeWeights6) {
|
||||
const size_t N6 = 6;
|
||||
Matrix D6 = Chebyshev2::DifferentiationMatrix(N6);
|
||||
Chebyshev2::Parameters x = Chebyshev2::Points(N6); // ramp with slope 1
|
||||
EXPECT(assert_equal(Vector::Ones(N6), Vector(D6 * x)));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check two different ways to calculate the derivative weights
|
||||
TEST(Chebyshev2, DerivativeWeights7) {
|
||||
const size_t N7 = 7;
|
||||
Matrix D7 = Chebyshev2::DifferentiationMatrix(N7);
|
||||
Chebyshev2::Parameters x = Chebyshev2::Points(N7); // ramp with slope 1
|
||||
EXPECT(assert_equal(Vector::Ones(N7), Vector(D7 * x)));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check derivative in two different ways: numerical and using D on f
|
||||
Vector6 f3_at_6points = (Vector6() << 4, 2, 6, 2, 4, 3).finished();
|
||||
double proxy3(double x) {
|
||||
return Chebyshev2::EvaluationFunctor(6, x)(f3_at_6points);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, Derivative6) {
|
||||
// Check Derivative evaluation at point x=0.2
|
||||
|
||||
// calculate expected values by numerical derivative of synthesis
|
||||
const double x = 0.2;
|
||||
Matrix numeric_dTdx = numericalDerivative11<double, double>(proxy3, x);
|
||||
|
||||
// Calculate derivatives at Chebyshev points using D3, interpolate
|
||||
Matrix D6 = Chebyshev2::DifferentiationMatrix(6);
|
||||
Vector derivative_at_points = D6 * f3_at_6points;
|
||||
Chebyshev2::EvaluationFunctor fx(6, x);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), fx(derivative_at_points), 1e-8);
|
||||
|
||||
// Do directly
|
||||
Chebyshev2::DerivativeFunctor dfdx(6, x);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), dfdx(f3_at_6points), 1e-8);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Assert that derivative also works in non-standard interval [0,3]
|
||||
double proxy4(double x) {
|
||||
return Chebyshev2::EvaluationFunctor(6, x, 0, 3)(f3_at_6points);
|
||||
}
|
||||
|
||||
TEST(Chebyshev2, Derivative6_03) {
|
||||
// Check Derivative evaluation at point x=0.2, in interval [0,3]
|
||||
|
||||
// calculate expected values by numerical derivative of synthesis
|
||||
const double x = 0.2;
|
||||
Matrix numeric_dTdx = numericalDerivative11<double, double>(proxy4, x);
|
||||
|
||||
// Calculate derivatives at Chebyshev points using D3, interpolate
|
||||
Matrix D6 = Chebyshev2::DifferentiationMatrix(6, 0, 3);
|
||||
Vector derivative_at_points = D6 * f3_at_6points;
|
||||
Chebyshev2::EvaluationFunctor fx(6, x, 0, 3);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), fx(derivative_at_points), 1e-8);
|
||||
|
||||
// Do directly
|
||||
Chebyshev2::DerivativeFunctor dfdx(6, x, 0, 3);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), dfdx(f3_at_6points), 1e-8);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Test VectorDerivativeFunctor
|
||||
TEST(Chebyshev2, VectorDerivativeFunctor) {
|
||||
const size_t N = 3, M = 2;
|
||||
const double x = 0.2;
|
||||
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
|
||||
VecD fx(N, x, 0, 3);
|
||||
ParameterMatrix<M> X(N);
|
||||
Matrix actualH(M, M * N);
|
||||
EXPECT(assert_equal(Vector::Zero(M), (Vector)fx(X, actualH), 1e-8));
|
||||
|
||||
// Test Jacobian
|
||||
Matrix expectedH = numericalDerivative11<Vector2, ParameterMatrix<M>, M * N>(
|
||||
boost::bind(&VecD::operator(), fx, _1, boost::none), X);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-7));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Test VectorDerivativeFunctor with polynomial function
|
||||
TEST(Chebyshev2, VectorDerivativeFunctor2) {
|
||||
const size_t N = 64, M = 1, T = 15;
|
||||
using VecD = Chebyshev2::VectorDerivativeFunctor<M>;
|
||||
|
||||
const Vector points = Chebyshev2::Points(N, 0, T);
|
||||
|
||||
// Assign the parameter matrix
|
||||
Vector values(N);
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
values(i) = f(points(i));
|
||||
}
|
||||
ParameterMatrix<M> X(values);
|
||||
|
||||
// Evaluate the derivative at the chebyshev points using
|
||||
// VectorDerivativeFunctor.
|
||||
for (size_t i = 0; i < N; ++i) {
|
||||
VecD d(N, points(i), 0, T);
|
||||
Vector1 Dx = d(X);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(points(i)), Dx(0), 1e-6);
|
||||
}
|
||||
|
||||
// Test Jacobian at the first chebyshev point.
|
||||
Matrix actualH(M, M * N);
|
||||
VecD vecd(N, points(0), 0, T);
|
||||
vecd(X, actualH);
|
||||
Matrix expectedH = numericalDerivative11<Vector1, ParameterMatrix<M>, M * N>(
|
||||
boost::bind(&VecD::operator(), vecd, _1, boost::none), X);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-6));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Test ComponentDerivativeFunctor
|
||||
TEST(Chebyshev2, ComponentDerivativeFunctor) {
|
||||
const size_t N = 6, M = 2;
|
||||
const double x = 0.2;
|
||||
using CompFunc = Chebyshev2::ComponentDerivativeFunctor<M>;
|
||||
size_t row = 1;
|
||||
CompFunc fx(N, row, x, 0, 3);
|
||||
ParameterMatrix<M> X(N);
|
||||
Matrix actualH(1, M * N);
|
||||
EXPECT_DOUBLES_EQUAL(0, fx(X, actualH), 1e-8);
|
||||
|
||||
Matrix expectedH = numericalDerivative11<double, ParameterMatrix<M>, M * N>(
|
||||
boost::bind(&CompFunc::operator(), fx, _1, boost::none), X);
|
||||
EXPECT(assert_equal(expectedH, actualH, 1e-7));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, IntegralWeights) {
|
||||
const size_t N7 = 7;
|
||||
Vector actual = Chebyshev2::IntegrationWeights(N7);
|
||||
Vector expected = (Vector(N7) << 0.0285714285714286, 0.253968253968254,
|
||||
0.457142857142857, 0.520634920634921, 0.457142857142857,
|
||||
0.253968253968254, 0.0285714285714286)
|
||||
.finished();
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
|
||||
const size_t N8 = 8;
|
||||
Vector actual2 = Chebyshev2::IntegrationWeights(N8);
|
||||
Vector expected2 = (Vector(N8) << 0.0204081632653061, 0.190141007218208,
|
||||
0.352242423718159, 0.437208405798326, 0.437208405798326,
|
||||
0.352242423718159, 0.190141007218208, 0.0204081632653061)
|
||||
.finished();
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -0,0 +1,254 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testFourier.cpp
|
||||
* @date July 4, 2020
|
||||
* @author Frank Dellaert, Varun Agrawal
|
||||
* @brief Unit tests for Fourier Basis Decompositions with Expressions
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
#include <gtsam/basis/Fourier.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
auto model = noiseModel::Unit::Create(1);
|
||||
|
||||
// Coefficients for testing, respectively 3 and 7 parameter Fourier basis.
|
||||
// They correspond to best approximation of TestFunction(x)
|
||||
const Vector k3Coefficients = (Vector3() << 1.5661, 1.2717, 1.2717).finished();
|
||||
const Vector7 k7Coefficients =
|
||||
(Vector7() << 1.5661, 1.2717, 1.2717, -0.0000, 0.5887, -0.0943, 0.0943)
|
||||
.finished();
|
||||
|
||||
// The test-function used below
|
||||
static double TestFunction(double x) { return exp(sin(x) + cos(x)); }
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, BasisEvaluationFunctor) {
|
||||
// fx(0) takes coefficients c to calculate the value f(c;x==0)
|
||||
FourierBasis::EvaluationFunctor fx(3, 0);
|
||||
EXPECT_DOUBLES_EQUAL(k3Coefficients[0] + k3Coefficients[1],
|
||||
fx(k3Coefficients), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, BasisEvaluationFunctorDerivative) {
|
||||
// If we give the H argument, we get the derivative of fx(0) wrpt coefficients
|
||||
// Needs to be Matrix so it can be used by OptionalJacobian.
|
||||
Matrix H(1, 3);
|
||||
FourierBasis::EvaluationFunctor fx(3, 0);
|
||||
EXPECT_DOUBLES_EQUAL(k3Coefficients[0] + k3Coefficients[1],
|
||||
fx(k3Coefficients, H), 1e-9);
|
||||
|
||||
Matrix13 expectedH(1, 1, 0);
|
||||
EXPECT(assert_equal(expectedH, H));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, Manual) {
|
||||
const size_t N = 3;
|
||||
|
||||
// We will create a linear factor graph
|
||||
GaussianFactorGraph graph;
|
||||
|
||||
// We create an unknown Vector expression for the coefficients
|
||||
Key key(1);
|
||||
|
||||
// We will need values below to test the Jacobians
|
||||
Values values;
|
||||
values.insert<Vector>(key, Vector::Zero(N)); // value does not really matter
|
||||
|
||||
// At 16 different samples points x, check Predict_ expression
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
const double x = i * M_PI / 8;
|
||||
const double desiredValue = TestFunction(x);
|
||||
|
||||
// Manual JacobianFactor
|
||||
Matrix A(1, N);
|
||||
A << 1, cos(x), sin(x);
|
||||
Vector b(1);
|
||||
b << desiredValue;
|
||||
JacobianFactor linearFactor(key, A, b);
|
||||
graph.add(linearFactor);
|
||||
|
||||
// Create factor to predict value at x
|
||||
EvaluationFactor<FourierBasis> predictFactor(key, desiredValue, model, N,
|
||||
x);
|
||||
|
||||
// Check expression Jacobians
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(predictFactor, values, 1e-5, 1e-9);
|
||||
|
||||
auto linearizedFactor = predictFactor.linearize(values);
|
||||
auto linearizedJacobianFactor =
|
||||
boost::dynamic_pointer_cast<JacobianFactor>(linearizedFactor);
|
||||
CHECK(linearizedJacobianFactor); // makes sure it's indeed a JacobianFactor
|
||||
EXPECT(assert_equal(linearFactor, *linearizedJacobianFactor, 1e-9));
|
||||
}
|
||||
|
||||
// Solve linear graph
|
||||
VectorValues actual = graph.optimize();
|
||||
EXPECT(assert_equal((Vector)k3Coefficients, actual.at(key), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, EvaluationFactor) {
|
||||
// Check fitting a function with a 7-parameter Fourier basis
|
||||
|
||||
// Create linear factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
Key key(1);
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
double x = i * M_PI / 8, desiredValue = TestFunction(x);
|
||||
graph.emplace_shared<EvaluationFactor<FourierBasis>>(key, desiredValue,
|
||||
model, 7, x);
|
||||
}
|
||||
|
||||
// Solve FourierFactorGraph
|
||||
Values values;
|
||||
values.insert<Vector>(key, Vector::Zero(7));
|
||||
GaussianFactorGraph::shared_ptr lfg = graph.linearize(values);
|
||||
VectorValues actual = lfg->optimize();
|
||||
|
||||
EXPECT(assert_equal((Vector)k7Coefficients, actual.at(key), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, WeightMatrix) {
|
||||
// The WeightMatrix creates an m*n matrix, where m is the number of sample
|
||||
// points, and n is the number of parameters.
|
||||
Matrix expected(2, 3);
|
||||
expected.row(0) << 1, cos(1), sin(1);
|
||||
expected.row(1) << 1, cos(2), sin(2);
|
||||
Vector2 X(1, 2);
|
||||
Matrix actual = FourierBasis::WeightMatrix(3, X);
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, Decomposition) {
|
||||
// Create example sequence
|
||||
Sequence sequence;
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
double x = i * M_PI / 8, y = TestFunction(x);
|
||||
sequence[x] = y;
|
||||
}
|
||||
|
||||
// Do Fourier Decomposition
|
||||
FitBasis<FourierBasis> actual(sequence, model, 3);
|
||||
|
||||
// Check
|
||||
EXPECT(assert_equal((Vector)k3Coefficients, actual.parameters(), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check derivative in two different ways: numerical and using D on f
|
||||
double proxy(double x) {
|
||||
return FourierBasis::EvaluationFunctor(7, x)(k7Coefficients);
|
||||
}
|
||||
|
||||
TEST(Basis, Derivative7) {
|
||||
// Check Derivative evaluation at point x=0.2
|
||||
|
||||
// Calculate expected values by numerical derivative of proxy.
|
||||
const double x = 0.2;
|
||||
Matrix numeric_dTdx = numericalDerivative11<double, double>(proxy, x);
|
||||
|
||||
// Calculate derivatives at Chebyshev points using D7, interpolate
|
||||
Matrix D7 = FourierBasis::DifferentiationMatrix(7);
|
||||
Vector derivativeCoefficients = D7 * k7Coefficients;
|
||||
FourierBasis::EvaluationFunctor fx(7, x);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), fx(derivativeCoefficients), 1e-8);
|
||||
|
||||
// Do directly
|
||||
FourierBasis::DerivativeFunctor dfdx(7, x);
|
||||
EXPECT_DOUBLES_EQUAL(numeric_dTdx(0, 0), dfdx(k7Coefficients), 1e-8);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Basis, VecDerivativeFunctor) {
|
||||
using DotShape = typename FourierBasis::VectorDerivativeFunctor<2>;
|
||||
const size_t N = 3;
|
||||
|
||||
// MATLAB example, Dec 27 2019, commit 014eded5
|
||||
double h = 2 * M_PI / 16;
|
||||
Vector2 dotShape(0.5556, -0.8315); // at h/2
|
||||
DotShape dotShapeFunction(N, h / 2);
|
||||
Matrix23 theta_mat = (Matrix32() << 0, 0, 0.7071, 0.7071, 0.7071, -0.7071)
|
||||
.finished()
|
||||
.transpose();
|
||||
ParameterMatrix<2> theta(theta_mat);
|
||||
EXPECT(assert_equal(Vector(dotShape), dotShapeFunction(theta), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Suppose we want to parameterize a periodic function with function values at
|
||||
// specific times, rather than coefficients. Can we do it? This would be a
|
||||
// generalization of the Fourier transform, and constitute a "pseudo-spectral"
|
||||
// parameterization. One way to do this is to establish hard constraints that
|
||||
// express the relationship between the new parameters and the coefficients.
|
||||
// For example, below I'd like the parameters to be the function values at
|
||||
// X = {0.1,0.2,0.3}, rather than a 3-vector of coefficients.
|
||||
// Because the values f(X) = at these points can be written as f(X) = W(X)*c,
|
||||
// we can simply express the coefficents c as c=inv(W(X))*f, which is a
|
||||
// generalized Fourier transform. That also means we can create factors with the
|
||||
// unknown f-values, as done manually below.
|
||||
TEST(Basis, PseudoSpectral) {
|
||||
// We will create a linear factor graph
|
||||
GaussianFactorGraph graph;
|
||||
|
||||
const size_t N = 3;
|
||||
const Key key(1);
|
||||
|
||||
// The correct values at X = {0.1,0.2,0.3} are simply W*c
|
||||
const Vector X = (Vector3() << 0.1, 0.2, 0.3).finished();
|
||||
const Matrix W = FourierBasis::WeightMatrix(N, X);
|
||||
const Vector expected = W * k3Coefficients;
|
||||
|
||||
// Check those values are indeed correct values of Fourier approximation
|
||||
using Eval = FourierBasis::EvaluationFunctor;
|
||||
EXPECT_DOUBLES_EQUAL(Eval(N, 0.1)(k3Coefficients), expected(0), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(Eval(N, 0.2)(k3Coefficients), expected(1), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(Eval(N, 0.3)(k3Coefficients), expected(2), 1e-9);
|
||||
|
||||
// Calculate "inverse Fourier transform" matrix
|
||||
const Matrix invW = W.inverse();
|
||||
|
||||
// At 16 different samples points x, add a factor on fExpr
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
const double x = i * M_PI / 8;
|
||||
const double desiredValue = TestFunction(x);
|
||||
|
||||
// Manual JacobianFactor
|
||||
Matrix A(1, 3);
|
||||
A << 1, cos(x), sin(x);
|
||||
Vector b(1);
|
||||
b << desiredValue;
|
||||
JacobianFactor linearFactor(key, A * invW, b);
|
||||
graph.add(linearFactor);
|
||||
}
|
||||
|
||||
// Solve linear graph
|
||||
VectorValues actual = graph.optimize();
|
||||
EXPECT(assert_equal((Vector)expected, actual.at(key), 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -0,0 +1,145 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testParameterMatrix.cpp
|
||||
* @date Sep 22, 2020
|
||||
* @author Varun Agrawal, Frank Dellaert
|
||||
* @brief Unit tests for ParameterMatrix.h
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
using Parameters = Chebyshev2::Parameters;
|
||||
|
||||
const size_t M = 2, N = 5;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Constructor) {
|
||||
ParameterMatrix<2> actual1(3);
|
||||
ParameterMatrix<2> expected1(Matrix::Zero(2, 3));
|
||||
EXPECT(assert_equal(expected1, actual1));
|
||||
|
||||
ParameterMatrix<2> actual2(Matrix::Ones(2, 3));
|
||||
ParameterMatrix<2> expected2(Matrix::Ones(2, 3));
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
EXPECT(assert_equal(expected2.matrix(), actual2.matrix()));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Dimensions) {
|
||||
ParameterMatrix<M> params(N);
|
||||
EXPECT_LONGS_EQUAL(params.rows(), M);
|
||||
EXPECT_LONGS_EQUAL(params.cols(), N);
|
||||
EXPECT_LONGS_EQUAL(params.dim(), M * N);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Getters) {
|
||||
ParameterMatrix<M> params(N);
|
||||
|
||||
Matrix expectedMatrix = Matrix::Zero(2, 5);
|
||||
EXPECT(assert_equal(expectedMatrix, params.matrix()));
|
||||
|
||||
Matrix expectedMatrixTranspose = Matrix::Zero(5, 2);
|
||||
EXPECT(assert_equal(expectedMatrixTranspose, params.transpose()));
|
||||
|
||||
ParameterMatrix<M> p2(Matrix::Ones(M, N));
|
||||
Vector expectedRowVector = Vector::Ones(N);
|
||||
for (size_t r = 0; r < M; ++r) {
|
||||
EXPECT(assert_equal(p2.row(r), expectedRowVector));
|
||||
}
|
||||
|
||||
ParameterMatrix<M> p3(2 * Matrix::Ones(M, N));
|
||||
Vector expectedColVector = 2 * Vector::Ones(M);
|
||||
for (size_t c = 0; c < M; ++c) {
|
||||
EXPECT(assert_equal(p3.col(c), expectedColVector));
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Setters) {
|
||||
ParameterMatrix<M> params(Matrix::Zero(M, N));
|
||||
Matrix expected = Matrix::Zero(M, N);
|
||||
|
||||
// row
|
||||
params.row(0) = Vector::Ones(N);
|
||||
expected.row(0) = Vector::Ones(N);
|
||||
EXPECT(assert_equal(expected, params.matrix()));
|
||||
|
||||
// col
|
||||
params.col(2) = Vector::Ones(M);
|
||||
expected.col(2) = Vector::Ones(M);
|
||||
|
||||
EXPECT(assert_equal(expected, params.matrix()));
|
||||
|
||||
// setZero
|
||||
params.setZero();
|
||||
expected.setZero();
|
||||
EXPECT(assert_equal(expected, params.matrix()));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Addition) {
|
||||
ParameterMatrix<M> params(Matrix::Ones(M, N));
|
||||
ParameterMatrix<M> expected(2 * Matrix::Ones(M, N));
|
||||
|
||||
// Add vector
|
||||
EXPECT(assert_equal(expected, params + Vector::Ones(M * N)));
|
||||
// Add another ParameterMatrix
|
||||
ParameterMatrix<M> actual = params + ParameterMatrix<M>(Matrix::Ones(M, N));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Subtraction) {
|
||||
ParameterMatrix<M> params(2 * Matrix::Ones(M, N));
|
||||
ParameterMatrix<M> expected(Matrix::Ones(M, N));
|
||||
|
||||
// Subtract vector
|
||||
EXPECT(assert_equal(expected, params - Vector::Ones(M * N)));
|
||||
// Subtract another ParameterMatrix
|
||||
ParameterMatrix<M> actual = params - ParameterMatrix<M>(Matrix::Ones(M, N));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, Multiplication) {
|
||||
ParameterMatrix<M> params(Matrix::Ones(M, N));
|
||||
Matrix multiplier = 2 * Matrix::Ones(N, 2);
|
||||
Matrix expected = 2 * N * Matrix::Ones(M, 2);
|
||||
EXPECT(assert_equal(expected, params * multiplier));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(ParameterMatrix, VectorSpace) {
|
||||
ParameterMatrix<M> params(Matrix::Ones(M, N));
|
||||
// vector
|
||||
EXPECT(assert_equal(Vector::Ones(M * N), params.vector()));
|
||||
// identity
|
||||
EXPECT(assert_equal(ParameterMatrix<M>::identity(),
|
||||
ParameterMatrix<M>(Matrix::Zero(M, 0))));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
//******************************************************************************
|
|
@ -77,3 +77,9 @@
|
|||
|
||||
// Support Metis-based nested dissection
|
||||
#cmakedefine GTSAM_TANGENT_PREINTEGRATION
|
||||
|
||||
// Whether to use the system installed Metis instead of the provided one
|
||||
#cmakedefine GTSAM_USE_SYSTEM_METIS
|
||||
|
||||
// Toggle switch for BetweenFactor jacobian computation
|
||||
#cmakedefine GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
|
|
|
@ -147,51 +147,149 @@ public:
|
|||
* G = F' * F - F' * E * P * E' * F
|
||||
* g = F' * (b - E * P * E' * b)
|
||||
* Fixed size version
|
||||
*/
|
||||
template<int N, int ND> // N = 2 or 3, ND is the camera dimension
|
||||
static SymmetricBlockMatrix SchurComplement(
|
||||
const std::vector< Eigen::Matrix<double, ZDim, ND>, Eigen::aligned_allocator< Eigen::Matrix<double, ZDim, ND> > >& Fs,
|
||||
const Matrix& E, const Eigen::Matrix<double, N, N>& P, const Vector& b) {
|
||||
*/
|
||||
template <int N,
|
||||
int ND> // N = 2 or 3 (point dimension), ND is the camera dimension
|
||||
static SymmetricBlockMatrix SchurComplement(
|
||||
const std::vector<
|
||||
Eigen::Matrix<double, ZDim, ND>,
|
||||
Eigen::aligned_allocator<Eigen::Matrix<double, ZDim, ND>>>& Fs,
|
||||
const Matrix& E, const Eigen::Matrix<double, N, N>& P, const Vector& b) {
|
||||
// a single point is observed in m cameras
|
||||
size_t m = Fs.size();
|
||||
|
||||
// a single point is observed in m cameras
|
||||
size_t m = Fs.size();
|
||||
// Create a SymmetricBlockMatrix (augmented hessian, with extra row/column with info vector)
|
||||
size_t M1 = ND * m + 1;
|
||||
std::vector<DenseIndex> dims(m + 1); // this also includes the b term
|
||||
std::fill(dims.begin(), dims.end() - 1, ND);
|
||||
dims.back() = 1;
|
||||
SymmetricBlockMatrix augmentedHessian(dims, Matrix::Zero(M1, M1));
|
||||
|
||||
// Create a SymmetricBlockMatrix (augmented hessian, with extra row/column with info vector)
|
||||
size_t M1 = ND * m + 1;
|
||||
std::vector<DenseIndex> dims(m + 1); // this also includes the b term
|
||||
std::fill(dims.begin(), dims.end() - 1, ND);
|
||||
dims.back() = 1;
|
||||
SymmetricBlockMatrix augmentedHessian(dims, Matrix::Zero(M1, M1));
|
||||
// Blockwise Schur complement
|
||||
for (size_t i = 0; i < m; i++) { // for each camera
|
||||
|
||||
// Blockwise Schur complement
|
||||
for (size_t i = 0; i < m; i++) { // for each camera
|
||||
const Eigen::Matrix<double, ZDim, ND>& Fi = Fs[i];
|
||||
const auto FiT = Fi.transpose();
|
||||
const Eigen::Matrix<double, ZDim, N> Ei_P = //
|
||||
E.block(ZDim * i, 0, ZDim, N) * P;
|
||||
|
||||
const Eigen::Matrix<double, ZDim, ND>& Fi = Fs[i];
|
||||
const auto FiT = Fi.transpose();
|
||||
const Eigen::Matrix<double, ZDim, N> Ei_P = //
|
||||
E.block(ZDim * i, 0, ZDim, N) * P;
|
||||
// D = (Dx2) * ZDim
|
||||
augmentedHessian.setOffDiagonalBlock(i, m, FiT * b.segment<ZDim>(ZDim * i) // F' * b
|
||||
- FiT * (Ei_P * (E.transpose() * b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1)
|
||||
|
||||
// D = (Dx2) * ZDim
|
||||
augmentedHessian.setOffDiagonalBlock(i, m, FiT * b.segment<ZDim>(ZDim * i) // F' * b
|
||||
- FiT * (Ei_P * (E.transpose() * b))); // D = (DxZDim) * (ZDimx3) * (N*ZDimm) * (ZDimm x 1)
|
||||
// (DxD) = (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) )
|
||||
augmentedHessian.setDiagonalBlock(i, FiT
|
||||
* (Fi - Ei_P * E.block(ZDim * i, 0, ZDim, N).transpose() * Fi));
|
||||
|
||||
// (DxD) = (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) )
|
||||
augmentedHessian.setDiagonalBlock(i, FiT
|
||||
* (Fi - Ei_P * E.block(ZDim * i, 0, ZDim, N).transpose() * Fi));
|
||||
// upper triangular part of the hessian
|
||||
for (size_t j = i + 1; j < m; j++) { // for each camera
|
||||
const Eigen::Matrix<double, ZDim, ND>& Fj = Fs[j];
|
||||
|
||||
// upper triangular part of the hessian
|
||||
for (size_t j = i + 1; j < m; j++) { // for each camera
|
||||
const Eigen::Matrix<double, ZDim, ND>& Fj = Fs[j];
|
||||
// (DxD) = (Dx2) * ( (2x2) * (2xD) )
|
||||
augmentedHessian.setOffDiagonalBlock(i, j, -FiT
|
||||
* (Ei_P * E.block(ZDim * j, 0, ZDim, N).transpose() * Fj));
|
||||
}
|
||||
} // end of for over cameras
|
||||
|
||||
// (DxD) = (Dx2) * ( (2x2) * (2xD) )
|
||||
augmentedHessian.setOffDiagonalBlock(i, j, -FiT
|
||||
* (Ei_P * E.block(ZDim * j, 0, ZDim, N).transpose() * Fj));
|
||||
augmentedHessian.diagonalBlock(m)(0, 0) += b.squaredNorm();
|
||||
return augmentedHessian;
|
||||
}
|
||||
|
||||
/**
|
||||
* Do Schur complement, given Jacobian as Fs,E,P, return SymmetricBlockMatrix
|
||||
* G = F' * F - F' * E * P * E' * F
|
||||
* g = F' * (b - E * P * E' * b)
|
||||
* In this version, we allow for the case where the keys in the Jacobian are
|
||||
* organized differently from the keys in the output SymmetricBlockMatrix In
|
||||
* particular: each diagonal block of the Jacobian F captures 2 poses (useful
|
||||
* for rolling shutter and extrinsic calibration) such that F keeps the block
|
||||
* structure that makes the Schur complement trick fast.
|
||||
*
|
||||
* N = 2 or 3 (point dimension), ND is the Jacobian block dimension, NDD is
|
||||
* the Hessian block dimension
|
||||
*/
|
||||
template <int N, int ND, int NDD>
|
||||
static SymmetricBlockMatrix SchurComplementAndRearrangeBlocks(
|
||||
const std::vector<
|
||||
Eigen::Matrix<double, ZDim, ND>,
|
||||
Eigen::aligned_allocator<Eigen::Matrix<double, ZDim, ND>>>& Fs,
|
||||
const Matrix& E, const Eigen::Matrix<double, N, N>& P, const Vector& b,
|
||||
const KeyVector& jacobianKeys, const KeyVector& hessianKeys) {
|
||||
size_t nrNonuniqueKeys = jacobianKeys.size();
|
||||
size_t nrUniqueKeys = hessianKeys.size();
|
||||
|
||||
// Marginalize point: note - we reuse the standard SchurComplement function.
|
||||
SymmetricBlockMatrix augmentedHessian = SchurComplement<N, ND>(Fs, E, P, b);
|
||||
|
||||
// Pack into an Hessian factor, allow space for b term.
|
||||
std::vector<DenseIndex> dims(nrUniqueKeys + 1);
|
||||
std::fill(dims.begin(), dims.end() - 1, NDD);
|
||||
dims.back() = 1;
|
||||
SymmetricBlockMatrix augmentedHessianUniqueKeys;
|
||||
|
||||
// Deal with the fact that some blocks may share the same keys.
|
||||
if (nrUniqueKeys == nrNonuniqueKeys) {
|
||||
// Case when there is 1 calibration key per camera:
|
||||
augmentedHessianUniqueKeys = SymmetricBlockMatrix(
|
||||
dims, Matrix(augmentedHessian.selfadjointView()));
|
||||
} else {
|
||||
// When multiple cameras share a calibration we have to rearrange
|
||||
// the results of the Schur complement matrix.
|
||||
std::vector<DenseIndex> nonuniqueDims(nrNonuniqueKeys + 1); // includes b
|
||||
std::fill(nonuniqueDims.begin(), nonuniqueDims.end() - 1, NDD);
|
||||
nonuniqueDims.back() = 1;
|
||||
augmentedHessian = SymmetricBlockMatrix(
|
||||
nonuniqueDims, Matrix(augmentedHessian.selfadjointView()));
|
||||
|
||||
// Get map from key to location in the new augmented Hessian matrix (the
|
||||
// one including only unique keys).
|
||||
std::map<Key, size_t> keyToSlotMap;
|
||||
for (size_t k = 0; k < nrUniqueKeys; k++) {
|
||||
keyToSlotMap[hessianKeys[k]] = k;
|
||||
}
|
||||
|
||||
// Initialize matrix to zero.
|
||||
augmentedHessianUniqueKeys = SymmetricBlockMatrix(
|
||||
dims, Matrix::Zero(NDD * nrUniqueKeys + 1, NDD * nrUniqueKeys + 1));
|
||||
|
||||
// Add contributions for each key: note this loops over the hessian with
|
||||
// nonUnique keys (augmentedHessian) and populates an Hessian that only
|
||||
// includes the unique keys (that is what we want to return).
|
||||
for (size_t i = 0; i < nrNonuniqueKeys; i++) { // rows
|
||||
Key key_i = jacobianKeys.at(i);
|
||||
|
||||
// Update information vector.
|
||||
augmentedHessianUniqueKeys.updateOffDiagonalBlock(
|
||||
keyToSlotMap[key_i], nrUniqueKeys,
|
||||
augmentedHessian.aboveDiagonalBlock(i, nrNonuniqueKeys));
|
||||
|
||||
// Update blocks.
|
||||
for (size_t j = i; j < nrNonuniqueKeys; j++) { // cols
|
||||
Key key_j = jacobianKeys.at(j);
|
||||
if (i == j) {
|
||||
augmentedHessianUniqueKeys.updateDiagonalBlock(
|
||||
keyToSlotMap[key_i], augmentedHessian.diagonalBlock(i));
|
||||
} else { // (i < j)
|
||||
if (keyToSlotMap[key_i] != keyToSlotMap[key_j]) {
|
||||
augmentedHessianUniqueKeys.updateOffDiagonalBlock(
|
||||
keyToSlotMap[key_i], keyToSlotMap[key_j],
|
||||
augmentedHessian.aboveDiagonalBlock(i, j));
|
||||
} else {
|
||||
augmentedHessianUniqueKeys.updateDiagonalBlock(
|
||||
keyToSlotMap[key_i],
|
||||
augmentedHessian.aboveDiagonalBlock(i, j) +
|
||||
augmentedHessian.aboveDiagonalBlock(i, j).transpose());
|
||||
}
|
||||
}
|
||||
}
|
||||
} // end of for over cameras
|
||||
}
|
||||
|
||||
augmentedHessian.diagonalBlock(m)(0, 0) += b.squaredNorm();
|
||||
return augmentedHessian;
|
||||
// Update bottom right element of the matrix.
|
||||
augmentedHessianUniqueKeys.updateDiagonalBlock(
|
||||
nrUniqueKeys, augmentedHessian.diagonalBlock(nrNonuniqueKeys));
|
||||
}
|
||||
return augmentedHessianUniqueKeys;
|
||||
}
|
||||
|
||||
/**
|
||||
* Do Schur complement, given Jacobian as Fs,E,P, return SymmetricBlockMatrix
|
||||
|
@ -206,7 +304,7 @@ public:
|
|||
}
|
||||
|
||||
/// Computes Point Covariance P, with lambda parameter
|
||||
template<int N> // N = 2 or 3
|
||||
template<int N> // N = 2 or 3 (point dimension)
|
||||
static void ComputePointCovariance(Eigen::Matrix<double, N, N>& P,
|
||||
const Matrix& E, double lambda, bool diagonalDamping = false) {
|
||||
|
||||
|
@ -258,7 +356,7 @@ public:
|
|||
* Applies Schur complement (exploiting block structure) to get a smart factor on cameras,
|
||||
* and adds the contribution of the smart factor to a pre-allocated augmented Hessian.
|
||||
*/
|
||||
template<int N> // N = 2 or 3
|
||||
template<int N> // N = 2 or 3 (point dimension)
|
||||
static void UpdateSchurComplement(const FBlocks& Fs, const Matrix& E,
|
||||
const Eigen::Matrix<double, N, N>& P, const Vector& b,
|
||||
const KeyVector& allKeys, const KeyVector& keys,
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace gtsam {
|
|||
* \nosubgrouping
|
||||
*/
|
||||
template<typename Calibration>
|
||||
class PinholeCamera: public PinholeBaseK<Calibration> {
|
||||
class GTSAM_EXPORT PinholeCamera: public PinholeBaseK<Calibration> {
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ namespace gtsam {
|
|||
* \nosubgrouping
|
||||
*/
|
||||
template<typename CALIBRATION>
|
||||
class PinholeBaseK: public PinholeBase {
|
||||
class GTSAM_EXPORT PinholeBaseK: public PinholeBase {
|
||||
|
||||
private:
|
||||
|
||||
|
|
|
@ -25,6 +25,12 @@ namespace gtsam {
|
|||
/// As of GTSAM 4, in order to make GTSAM more lean,
|
||||
/// it is now possible to just typedef Point2 to Vector2
|
||||
typedef Vector2 Point2;
|
||||
|
||||
// Convenience typedef
|
||||
using Point2Pair = std::pair<Point2, Point2>;
|
||||
GTSAM_EXPORT std::ostream &operator<<(std::ostream &os, const gtsam::Point2Pair &p);
|
||||
|
||||
using Point2Pairs = std::vector<Point2Pair>;
|
||||
|
||||
/// Distance of the point from the origin, with Jacobian
|
||||
GTSAM_EXPORT double norm2(const Point2& p, OptionalJacobian<1, 2> H = boost::none);
|
||||
|
@ -34,10 +40,6 @@ GTSAM_EXPORT double distance2(const Point2& p1, const Point2& q,
|
|||
OptionalJacobian<1, 2> H1 = boost::none,
|
||||
OptionalJacobian<1, 2> H2 = boost::none);
|
||||
|
||||
// Convenience typedef
|
||||
typedef std::pair<Point2, Point2> Point2Pair;
|
||||
GTSAM_EXPORT std::ostream &operator<<(std::ostream &os, const gtsam::Point2Pair &p);
|
||||
|
||||
// For MATLAB wrapper
|
||||
typedef std::vector<Point2, Eigen::aligned_allocator<Point2> > Point2Vector;
|
||||
|
||||
|
|
|
@ -261,25 +261,59 @@ Vector3 SO3::Logmap(const SO3& Q, ChartJacobian H) {
|
|||
|
||||
// when trace == -1, i.e., when theta = +-pi, +-3pi, +-5pi, etc.
|
||||
// we do something special
|
||||
if (tr + 1.0 < 1e-10) {
|
||||
if (std::abs(R33 + 1.0) > 1e-5)
|
||||
omega = (M_PI / sqrt(2.0 + 2.0 * R33)) * Vector3(R13, R23, 1.0 + R33);
|
||||
else if (std::abs(R22 + 1.0) > 1e-5)
|
||||
omega = (M_PI / sqrt(2.0 + 2.0 * R22)) * Vector3(R12, 1.0 + R22, R32);
|
||||
else
|
||||
// if(std::abs(R.r1_.x()+1.0) > 1e-5) This is implicit
|
||||
omega = (M_PI / sqrt(2.0 + 2.0 * R11)) * Vector3(1.0 + R11, R21, R31);
|
||||
if (tr + 1.0 < 1e-3) {
|
||||
if (R33 > R22 && R33 > R11) {
|
||||
// R33 is the largest diagonal, a=3, b=1, c=2
|
||||
const double W = R21 - R12;
|
||||
const double Q1 = 2.0 + 2.0 * R33;
|
||||
const double Q2 = R31 + R13;
|
||||
const double Q3 = R23 + R32;
|
||||
const double r = sqrt(Q1);
|
||||
const double one_over_r = 1 / r;
|
||||
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
|
||||
const double sgn_w = W < 0 ? -1.0 : 1.0;
|
||||
const double mag = M_PI - (2 * sgn_w * W) / norm;
|
||||
const double scale = 0.5 * one_over_r * mag;
|
||||
omega = sgn_w * scale * Vector3(Q2, Q3, Q1);
|
||||
} else if (R22 > R11) {
|
||||
// R22 is the largest diagonal, a=2, b=3, c=1
|
||||
const double W = R13 - R31;
|
||||
const double Q1 = 2.0 + 2.0 * R22;
|
||||
const double Q2 = R23 + R32;
|
||||
const double Q3 = R12 + R21;
|
||||
const double r = sqrt(Q1);
|
||||
const double one_over_r = 1 / r;
|
||||
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
|
||||
const double sgn_w = W < 0 ? -1.0 : 1.0;
|
||||
const double mag = M_PI - (2 * sgn_w * W) / norm;
|
||||
const double scale = 0.5 * one_over_r * mag;
|
||||
omega = sgn_w * scale * Vector3(Q3, Q1, Q2);
|
||||
} else {
|
||||
// R11 is the largest diagonal, a=1, b=2, c=3
|
||||
const double W = R32 - R23;
|
||||
const double Q1 = 2.0 + 2.0 * R11;
|
||||
const double Q2 = R12 + R21;
|
||||
const double Q3 = R31 + R13;
|
||||
const double r = sqrt(Q1);
|
||||
const double one_over_r = 1 / r;
|
||||
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
|
||||
const double sgn_w = W < 0 ? -1.0 : 1.0;
|
||||
const double mag = M_PI - (2 * sgn_w * W) / norm;
|
||||
const double scale = 0.5 * one_over_r * mag;
|
||||
omega = sgn_w * scale * Vector3(Q1, Q2, Q3);
|
||||
}
|
||||
} else {
|
||||
double magnitude;
|
||||
const double tr_3 = tr - 3.0; // always negative
|
||||
if (tr_3 < -1e-7) {
|
||||
const double tr_3 = tr - 3.0; // could be non-negative if the matrix is off orthogonal
|
||||
if (tr_3 < -1e-6) {
|
||||
// this is the normal case -1 < trace < 3
|
||||
double theta = acos((tr - 1.0) / 2.0);
|
||||
magnitude = theta / (2.0 * sin(theta));
|
||||
} else {
|
||||
// when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0)
|
||||
// use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2)
|
||||
// see https://github.com/borglab/gtsam/issues/746 for details
|
||||
magnitude = 0.5 - tr_3 / 12.0;
|
||||
magnitude = 0.5 - tr_3 / 12.0 + tr_3*tr_3/60.0;
|
||||
}
|
||||
omega = magnitude * Vector3(R32 - R23, R13 - R31, R21 - R12);
|
||||
}
|
||||
|
|
|
@ -31,6 +31,14 @@ class Point2 {
|
|||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
class Point2Pairs {
|
||||
Point2Pairs();
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
gtsam::Point2Pair at(size_t n) const;
|
||||
void push_back(const gtsam::Point2Pair& point_pair);
|
||||
};
|
||||
|
||||
// std::vector<gtsam::Point2>
|
||||
class Point2Vector {
|
||||
|
@ -429,6 +437,8 @@ class Pose2 {
|
|||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
boost::optional<gtsam::Pose2> align(const gtsam::Point2Pairs& pairs);
|
||||
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
class Pose3 {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
@ -125,6 +126,89 @@ TEST(CameraSet, Pinhole) {
|
|||
EXPECT(assert_equal(actualE, E));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(CameraSet, SchurComplementAndRearrangeBlocks) {
|
||||
typedef PinholePose<Cal3Bundler> Camera;
|
||||
typedef CameraSet<Camera> Set;
|
||||
|
||||
// this is the (block) Jacobian with respect to the nonuniqueKeys
|
||||
std::vector<Eigen::Matrix<double, 2, 12>,
|
||||
Eigen::aligned_allocator<Eigen::Matrix<double, 2, 12> > > Fs;
|
||||
Fs.push_back(1 * Matrix::Ones(2, 12)); // corresponding to key pair (0,1)
|
||||
Fs.push_back(2 * Matrix::Ones(2, 12)); // corresponding to key pair (1,2)
|
||||
Fs.push_back(3 * Matrix::Ones(2, 12)); // corresponding to key pair (2,0)
|
||||
Matrix E = 4 * Matrix::Identity(6, 3) + Matrix::Ones(6, 3);
|
||||
E(0, 0) = 3;
|
||||
E(1, 1) = 2;
|
||||
E(2, 2) = 5;
|
||||
Matrix Et = E.transpose();
|
||||
Matrix P = (Et * E).inverse();
|
||||
Vector b = 5 * Vector::Ones(6);
|
||||
|
||||
{ // SchurComplement
|
||||
// Actual
|
||||
SymmetricBlockMatrix augmentedHessianBM = Set::SchurComplement<3, 12>(Fs, E,
|
||||
P, b);
|
||||
Matrix actualAugmentedHessian = augmentedHessianBM.selfadjointView();
|
||||
|
||||
// Expected
|
||||
Matrix F = Matrix::Zero(6, 3 * 12);
|
||||
F.block<2, 12>(0, 0) = 1 * Matrix::Ones(2, 12);
|
||||
F.block<2, 12>(2, 12) = 2 * Matrix::Ones(2, 12);
|
||||
F.block<2, 12>(4, 24) = 3 * Matrix::Ones(2, 12);
|
||||
|
||||
Matrix Ft = F.transpose();
|
||||
Matrix H = Ft * F - Ft * E * P * Et * F;
|
||||
Vector v = Ft * (b - E * P * Et * b);
|
||||
Matrix expectedAugmentedHessian = Matrix::Zero(3 * 12 + 1, 3 * 12 + 1);
|
||||
expectedAugmentedHessian.block<36, 36>(0, 0) = H;
|
||||
expectedAugmentedHessian.block<36, 1>(0, 36) = v;
|
||||
expectedAugmentedHessian.block<1, 36>(36, 0) = v.transpose();
|
||||
expectedAugmentedHessian(36, 36) = b.squaredNorm();
|
||||
|
||||
EXPECT(assert_equal(expectedAugmentedHessian, actualAugmentedHessian));
|
||||
}
|
||||
|
||||
{ // SchurComplementAndRearrangeBlocks
|
||||
KeyVector nonuniqueKeys;
|
||||
nonuniqueKeys.push_back(0);
|
||||
nonuniqueKeys.push_back(1);
|
||||
nonuniqueKeys.push_back(1);
|
||||
nonuniqueKeys.push_back(2);
|
||||
nonuniqueKeys.push_back(2);
|
||||
nonuniqueKeys.push_back(0);
|
||||
|
||||
KeyVector uniqueKeys;
|
||||
uniqueKeys.push_back(0);
|
||||
uniqueKeys.push_back(1);
|
||||
uniqueKeys.push_back(2);
|
||||
|
||||
// Actual
|
||||
SymmetricBlockMatrix augmentedHessianBM =
|
||||
Set::SchurComplementAndRearrangeBlocks<3, 12, 6>(
|
||||
Fs, E, P, b, nonuniqueKeys, uniqueKeys);
|
||||
Matrix actualAugmentedHessian = augmentedHessianBM.selfadjointView();
|
||||
|
||||
// Expected
|
||||
// we first need to build the Jacobian F according to unique keys
|
||||
Matrix F = Matrix::Zero(6, 18);
|
||||
F.block<2, 6>(0, 0) = Fs[0].block<2, 6>(0, 0);
|
||||
F.block<2, 6>(0, 6) = Fs[0].block<2, 6>(0, 6);
|
||||
F.block<2, 6>(2, 6) = Fs[1].block<2, 6>(0, 0);
|
||||
F.block<2, 6>(2, 12) = Fs[1].block<2, 6>(0, 6);
|
||||
F.block<2, 6>(4, 12) = Fs[2].block<2, 6>(0, 0);
|
||||
F.block<2, 6>(4, 0) = Fs[2].block<2, 6>(0, 6);
|
||||
|
||||
Matrix Ft = F.transpose();
|
||||
Vector v = Ft * (b - E * P * Et * b);
|
||||
Matrix H = Ft * F - Ft * E * P * Et * F;
|
||||
Matrix expectedAugmentedHessian(19, 19);
|
||||
expectedAugmentedHessian << H, v, v.transpose(), b.squaredNorm();
|
||||
|
||||
EXPECT(assert_equal(expectedAugmentedHessian, actualAugmentedHessian));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
#include <gtsam/geometry/StereoCamera.h>
|
||||
TEST(CameraSet, Stereo) {
|
||||
|
|
|
@ -1046,6 +1046,68 @@ TEST(Pose3, interpolate) {
|
|||
EXPECT(assert_equal(expected2, T2.interpolateRt(T3, t)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Pose3 testing_interpolate(const Pose3& t1, const Pose3& t2, double gamma) { return interpolate(t1,t2,gamma); }
|
||||
|
||||
TEST(Pose3, interpolateJacobians) {
|
||||
{
|
||||
Pose3 X = Pose3::identity();
|
||||
Pose3 Y(Rot3::Rz(M_PI_2), Point3(1, 0, 0));
|
||||
double t = 0.5;
|
||||
Pose3 expectedPoseInterp(Rot3::Rz(M_PI_4), Point3(0.5, -0.207107, 0)); // note: different from test above: this is full Pose3 interpolation
|
||||
Matrix actualJacobianX, actualJacobianY;
|
||||
EXPECT(assert_equal(expectedPoseInterp, interpolate(X, Y, t, actualJacobianX, actualJacobianY), 1e-5));
|
||||
|
||||
Matrix expectedJacobianX = numericalDerivative31<Pose3,Pose3,Pose3,double>(testing_interpolate, X, Y, t);
|
||||
EXPECT(assert_equal(expectedJacobianX,actualJacobianX,1e-6));
|
||||
|
||||
Matrix expectedJacobianY = numericalDerivative32<Pose3,Pose3,Pose3,double>(testing_interpolate, X, Y, t);
|
||||
EXPECT(assert_equal(expectedJacobianY,actualJacobianY,1e-6));
|
||||
}
|
||||
{
|
||||
Pose3 X = Pose3::identity();
|
||||
Pose3 Y(Rot3::identity(), Point3(1, 0, 0));
|
||||
double t = 0.3;
|
||||
Pose3 expectedPoseInterp(Rot3::identity(), Point3(0.3, 0, 0));
|
||||
Matrix actualJacobianX, actualJacobianY;
|
||||
EXPECT(assert_equal(expectedPoseInterp, interpolate(X, Y, t, actualJacobianX, actualJacobianY), 1e-5));
|
||||
|
||||
Matrix expectedJacobianX = numericalDerivative31<Pose3,Pose3,Pose3,double>(testing_interpolate, X, Y, t);
|
||||
EXPECT(assert_equal(expectedJacobianX,actualJacobianX,1e-6));
|
||||
|
||||
Matrix expectedJacobianY = numericalDerivative32<Pose3,Pose3,Pose3,double>(testing_interpolate, X, Y, t);
|
||||
EXPECT(assert_equal(expectedJacobianY,actualJacobianY,1e-6));
|
||||
}
|
||||
{
|
||||
Pose3 X = Pose3::identity();
|
||||
Pose3 Y(Rot3::Rz(M_PI_2), Point3(0, 0, 0));
|
||||
double t = 0.5;
|
||||
Pose3 expectedPoseInterp(Rot3::Rz(M_PI_4), Point3(0, 0, 0));
|
||||
Matrix actualJacobianX, actualJacobianY;
|
||||
EXPECT(assert_equal(expectedPoseInterp, interpolate(X, Y, t, actualJacobianX, actualJacobianY), 1e-5));
|
||||
|
||||
Matrix expectedJacobianX = numericalDerivative31<Pose3,Pose3,Pose3,double>(testing_interpolate, X, Y, t);
|
||||
EXPECT(assert_equal(expectedJacobianX,actualJacobianX,1e-6));
|
||||
|
||||
Matrix expectedJacobianY = numericalDerivative32<Pose3,Pose3,Pose3,double>(testing_interpolate, X, Y, t);
|
||||
EXPECT(assert_equal(expectedJacobianY,actualJacobianY,1e-6));
|
||||
}
|
||||
{
|
||||
Pose3 X(Rot3::Ypr(0.1,0.2,0.3), Point3(10, 5, -2));
|
||||
Pose3 Y(Rot3::Ypr(1.1,-2.2,-0.3), Point3(-5, 1, 1));
|
||||
double t = 0.3;
|
||||
Pose3 expectedPoseInterp(Rot3::Rz(M_PI_4), Point3(0, 0, 0));
|
||||
Matrix actualJacobianX, actualJacobianY;
|
||||
interpolate(X, Y, t, actualJacobianX, actualJacobianY);
|
||||
|
||||
Matrix expectedJacobianX = numericalDerivative31<Pose3,Pose3,Pose3,double>(testing_interpolate, X, Y, t);
|
||||
EXPECT(assert_equal(expectedJacobianX,actualJacobianX,1e-6));
|
||||
|
||||
Matrix expectedJacobianY = numericalDerivative32<Pose3,Pose3,Pose3,double>(testing_interpolate, X, Y, t);
|
||||
EXPECT(assert_equal(expectedJacobianY,actualJacobianY,1e-6));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Pose3, Create) {
|
||||
Matrix63 actualH1, actualH2;
|
||||
|
|
|
@ -122,6 +122,21 @@ TEST( Rot3, AxisAngle)
|
|||
CHECK(assert_equal(expected,actual3,1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Rot3, AxisAngle2)
|
||||
{
|
||||
// constructor from a rotation matrix, as doubles in *row-major* order.
|
||||
Rot3 R1(-0.999957, 0.00922903, 0.00203116, 0.00926964, 0.999739, 0.0208927, -0.0018374, 0.0209105, -0.999781);
|
||||
|
||||
Unit3 actualAxis;
|
||||
double actualAngle;
|
||||
// convert Rot3 to quaternion using GTSAM
|
||||
std::tie(actualAxis, actualAngle) = R1.axisAngle();
|
||||
|
||||
double expectedAngle = 3.1396582;
|
||||
CHECK(assert_equal(expectedAngle, actualAngle, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Rot3, Rodrigues)
|
||||
{
|
||||
|
@ -181,13 +196,13 @@ TEST( Rot3, retract)
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Rot3, log) {
|
||||
TEST( Rot3, log) {
|
||||
static const double PI = boost::math::constants::pi<double>();
|
||||
Vector w;
|
||||
Rot3 R;
|
||||
|
||||
#define CHECK_OMEGA(X, Y, Z) \
|
||||
w = (Vector(3) << X, Y, Z).finished(); \
|
||||
w = (Vector(3) << (X), (Y), (Z)).finished(); \
|
||||
R = Rot3::Rodrigues(w); \
|
||||
EXPECT(assert_equal(w, Rot3::Logmap(R), 1e-12));
|
||||
|
||||
|
@ -219,17 +234,17 @@ TEST(Rot3, log) {
|
|||
CHECK_OMEGA(0, 0, PI)
|
||||
|
||||
// Windows and Linux have flipped sign in quaternion mode
|
||||
#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS)
|
||||
//#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS)
|
||||
w = (Vector(3) << x * PI, y * PI, z * PI).finished();
|
||||
R = Rot3::Rodrigues(w);
|
||||
EXPECT(assert_equal(Vector(-w), Rot3::Logmap(R), 1e-12));
|
||||
#else
|
||||
CHECK_OMEGA(x * PI, y * PI, z * PI)
|
||||
#endif
|
||||
//#else
|
||||
// CHECK_OMEGA(x * PI, y * PI, z * PI)
|
||||
//#endif
|
||||
|
||||
// Check 360 degree rotations
|
||||
#define CHECK_OMEGA_ZERO(X, Y, Z) \
|
||||
w = (Vector(3) << X, Y, Z).finished(); \
|
||||
w = (Vector(3) << (X), (Y), (Z)).finished(); \
|
||||
R = Rot3::Rodrigues(w); \
|
||||
EXPECT(assert_equal((Vector)Z_3x1, Rot3::Logmap(R)));
|
||||
|
||||
|
@ -247,15 +262,15 @@ TEST(Rot3, log) {
|
|||
// Rot3's Logmap returns different, but equivalent compacted
|
||||
// axis-angle vectors depending on whether Rot3 is implemented
|
||||
// by Quaternions or SO3.
|
||||
#if defined(GTSAM_USE_QUATERNIONS)
|
||||
// Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees
|
||||
EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211),
|
||||
#if defined(GTSAM_USE_QUATERNIONS)
|
||||
// Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees
|
||||
EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211),
|
||||
(Vector)Rot3::Logmap(Rlund), 1e-8));
|
||||
#else
|
||||
// SO3 will be approximate because of the non-orthogonality
|
||||
EXPECT(assert_equal(Vector3(0.264452, -0.742197708, -3.04098184),
|
||||
(Vector)Rot3::Logmap(Rlund), 1e-8));
|
||||
#else
|
||||
// SO3 does not bound angle resulting in ~180.1 degrees
|
||||
EXPECT(assert_equal(Vector3(-0.264544406, 0.742217405, 3.04117314),
|
||||
(Vector)Rot3::Logmap(Rlund), 1e-8));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* @file testUtilities.cpp
|
||||
* @date Aug 19, 2021
|
||||
* @author Varun Agrawal
|
||||
* @brief Tests for the utilities.
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/utilities.h>
|
||||
|
||||
using namespace gtsam;
|
||||
using gtsam::symbol_shorthand::L;
|
||||
using gtsam::symbol_shorthand::R;
|
||||
using gtsam::symbol_shorthand::X;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Utilities, ExtractPoint2) {
|
||||
Point2 p0(0, 0), p1(1, 0);
|
||||
Values values;
|
||||
values.insert<Point2>(L(0), p0);
|
||||
values.insert<Point2>(L(1), p1);
|
||||
values.insert<Rot3>(R(0), Rot3());
|
||||
values.insert<Pose3>(X(0), Pose3());
|
||||
|
||||
Matrix all_points = utilities::extractPoint2(values);
|
||||
EXPECT_LONGS_EQUAL(2, all_points.rows());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Utilities, ExtractPoint3) {
|
||||
Point3 p0(0, 0, 0), p1(1, 0, 0);
|
||||
Values values;
|
||||
values.insert<Point3>(L(0), p0);
|
||||
values.insert<Point3>(L(1), p1);
|
||||
values.insert<Rot3>(R(0), Rot3());
|
||||
values.insert<Pose3>(X(0), Pose3());
|
||||
|
||||
Matrix all_points = utilities::extractPoint3(values);
|
||||
EXPECT_LONGS_EQUAL(2, all_points.rows());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
srand(time(nullptr));
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -165,22 +165,17 @@ gtsam::Values allPose2s(gtsam::Values& values);
|
|||
Matrix extractPose2(const gtsam::Values& values);
|
||||
gtsam::Values allPose3s(gtsam::Values& values);
|
||||
Matrix extractPose3(const gtsam::Values& values);
|
||||
void perturbPoint2(gtsam::Values& values, double sigma, int seed);
|
||||
void perturbPoint2(gtsam::Values& values, double sigma, int seed = 42u);
|
||||
void perturbPose2(gtsam::Values& values, double sigmaT, double sigmaR,
|
||||
int seed);
|
||||
void perturbPoint3(gtsam::Values& values, double sigma, int seed);
|
||||
int seed = 42u);
|
||||
void perturbPoint3(gtsam::Values& values, double sigma, int seed = 42u);
|
||||
void insertBackprojections(gtsam::Values& values,
|
||||
const gtsam::PinholeCamera<gtsam::Cal3_S2>& c,
|
||||
Vector J, Matrix Z, double depth);
|
||||
void insertProjectionFactors(gtsam::NonlinearFactorGraph& graph, size_t i,
|
||||
Vector J, Matrix Z,
|
||||
const gtsam::noiseModel::Base* model,
|
||||
const gtsam::Cal3_S2* K);
|
||||
void insertProjectionFactors(gtsam::NonlinearFactorGraph& graph, size_t i,
|
||||
Vector J, Matrix Z,
|
||||
const gtsam::noiseModel::Base* model,
|
||||
const gtsam::Cal3_S2* K,
|
||||
const gtsam::Pose3& body_P_sensor);
|
||||
void insertProjectionFactors(
|
||||
gtsam::NonlinearFactorGraph& graph, size_t i, Vector J, Matrix Z,
|
||||
const gtsam::noiseModel::Base* model, const gtsam::Cal3_S2* K,
|
||||
const gtsam::Pose3& body_P_sensor = gtsam::Pose3());
|
||||
Matrix reprojectionErrors(const gtsam::NonlinearFactorGraph& graph,
|
||||
const gtsam::Values& values);
|
||||
gtsam::Values localToWorld(const gtsam::Values& local,
|
||||
|
|
|
@ -70,16 +70,23 @@ namespace gtsam {
|
|||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/** Default constructor */
|
||||
/// Default constructor
|
||||
BayesTreeCliqueBase() : problemSize_(1) {}
|
||||
|
||||
/** Construct from a conditional, leaving parent and child pointers uninitialized */
|
||||
BayesTreeCliqueBase(const sharedConditional& conditional) : conditional_(conditional), problemSize_(1) {}
|
||||
/// Construct from a conditional, leaving parent and child pointers
|
||||
/// uninitialized.
|
||||
BayesTreeCliqueBase(const sharedConditional& conditional)
|
||||
: conditional_(conditional), problemSize_(1) {}
|
||||
|
||||
/** Shallow copy constructor */
|
||||
BayesTreeCliqueBase(const BayesTreeCliqueBase& c) : conditional_(c.conditional_), parent_(c.parent_), children(c.children), problemSize_(c.problemSize_), is_root(c.is_root) {}
|
||||
/// Shallow copy constructor.
|
||||
BayesTreeCliqueBase(const BayesTreeCliqueBase& c)
|
||||
: conditional_(c.conditional_),
|
||||
parent_(c.parent_),
|
||||
children(c.children),
|
||||
problemSize_(c.problemSize_),
|
||||
is_root(c.is_root) {}
|
||||
|
||||
/** Shallow copy assignment constructor */
|
||||
/// Shallow copy assignment constructor
|
||||
BayesTreeCliqueBase& operator=(const BayesTreeCliqueBase& c) {
|
||||
conditional_ = c.conditional_;
|
||||
parent_ = c.parent_;
|
||||
|
@ -89,6 +96,9 @@ namespace gtsam {
|
|||
return *this;
|
||||
}
|
||||
|
||||
// Virtual destructor.
|
||||
virtual ~BayesTreeCliqueBase() {}
|
||||
|
||||
/// @}
|
||||
|
||||
/// This stores the Cached separator marginal P(S)
|
||||
|
@ -119,7 +129,9 @@ namespace gtsam {
|
|||
bool equals(const DERIVED& other, double tol = 1e-9) const;
|
||||
|
||||
/** print this node */
|
||||
virtual void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
virtual void print(
|
||||
const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
namespace gtsam {
|
||||
|
||||
/// Typedef for a function to format a key, i.e. to convert it to a string
|
||||
typedef std::function<std::string(Key)> KeyFormatter;
|
||||
using KeyFormatter = std::function<std::string(Key)>;
|
||||
|
||||
// Helper function for DefaultKeyFormatter
|
||||
GTSAM_EXPORT std::string _defaultKeyFormatter(Key key);
|
||||
|
@ -83,28 +83,32 @@ class key_formatter {
|
|||
};
|
||||
|
||||
/// Define collection type once and for all - also used in wrappers
|
||||
typedef FastVector<Key> KeyVector;
|
||||
using KeyVector = FastVector<Key>;
|
||||
|
||||
// TODO(frank): Nothing fast about these :-(
|
||||
typedef FastList<Key> KeyList;
|
||||
typedef FastSet<Key> KeySet;
|
||||
typedef FastMap<Key, int> KeyGroupMap;
|
||||
using KeyList = FastList<Key>;
|
||||
using KeySet = FastSet<Key>;
|
||||
using KeyGroupMap = FastMap<Key, int>;
|
||||
|
||||
/// Utility function to print one key with optional prefix
|
||||
GTSAM_EXPORT void PrintKey(Key key, const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
GTSAM_EXPORT void PrintKey(
|
||||
Key key, const std::string &s = "",
|
||||
const KeyFormatter &keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
/// Utility function to print sets of keys with optional prefix
|
||||
GTSAM_EXPORT void PrintKeyList(const KeyList& keys, const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
GTSAM_EXPORT void PrintKeyList(
|
||||
const KeyList &keys, const std::string &s = "",
|
||||
const KeyFormatter &keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
/// Utility function to print sets of keys with optional prefix
|
||||
GTSAM_EXPORT void PrintKeyVector(const KeyVector& keys, const std::string& s =
|
||||
"", const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
GTSAM_EXPORT void PrintKeyVector(
|
||||
const KeyVector &keys, const std::string &s = "",
|
||||
const KeyFormatter &keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
/// Utility function to print sets of keys with optional prefix
|
||||
GTSAM_EXPORT void PrintKeySet(const KeySet& keys, const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
GTSAM_EXPORT void PrintKeySet(
|
||||
const KeySet &keys, const std::string &s = "",
|
||||
const KeyFormatter &keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
// Define Key to be Testable by specializing gtsam::traits
|
||||
template<typename T> struct traits;
|
||||
|
|
|
@ -25,8 +25,12 @@
|
|||
#include <gtsam/3rdparty/CCOLAMD/Include/ccolamd.h>
|
||||
|
||||
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
|
||||
#ifdef GTSAM_USE_SYSTEM_METIS
|
||||
#include <metis.h>
|
||||
#else
|
||||
#include <gtsam/3rdparty/metis/include/metis.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
|
|
@ -16,9 +16,9 @@ virtual class Base {
|
|||
};
|
||||
|
||||
virtual class Gaussian : gtsam::noiseModel::Base {
|
||||
static gtsam::noiseModel::Gaussian* Information(Matrix R);
|
||||
static gtsam::noiseModel::Gaussian* SqrtInformation(Matrix R);
|
||||
static gtsam::noiseModel::Gaussian* Covariance(Matrix R);
|
||||
static gtsam::noiseModel::Gaussian* Information(Matrix R, bool smart = true);
|
||||
static gtsam::noiseModel::Gaussian* SqrtInformation(Matrix R, bool smart = true);
|
||||
static gtsam::noiseModel::Gaussian* Covariance(Matrix R, bool smart = true);
|
||||
|
||||
bool equals(gtsam::noiseModel::Base& expected, double tol);
|
||||
|
||||
|
@ -37,9 +37,9 @@ virtual class Gaussian : gtsam::noiseModel::Base {
|
|||
};
|
||||
|
||||
virtual class Diagonal : gtsam::noiseModel::Gaussian {
|
||||
static gtsam::noiseModel::Diagonal* Sigmas(Vector sigmas);
|
||||
static gtsam::noiseModel::Diagonal* Variances(Vector variances);
|
||||
static gtsam::noiseModel::Diagonal* Precisions(Vector precisions);
|
||||
static gtsam::noiseModel::Diagonal* Sigmas(Vector sigmas, bool smart = true);
|
||||
static gtsam::noiseModel::Diagonal* Variances(Vector variances, bool smart = true);
|
||||
static gtsam::noiseModel::Diagonal* Precisions(Vector precisions, bool smart = true);
|
||||
Matrix R() const;
|
||||
|
||||
// access to noise model
|
||||
|
@ -69,9 +69,9 @@ virtual class Constrained : gtsam::noiseModel::Diagonal {
|
|||
};
|
||||
|
||||
virtual class Isotropic : gtsam::noiseModel::Diagonal {
|
||||
static gtsam::noiseModel::Isotropic* Sigma(size_t dim, double sigma);
|
||||
static gtsam::noiseModel::Isotropic* Variance(size_t dim, double varianace);
|
||||
static gtsam::noiseModel::Isotropic* Precision(size_t dim, double precision);
|
||||
static gtsam::noiseModel::Isotropic* Sigma(size_t dim, double sigma, bool smart = true);
|
||||
static gtsam::noiseModel::Isotropic* Variance(size_t dim, double varianace, bool smart = true);
|
||||
static gtsam::noiseModel::Isotropic* Precision(size_t dim, double precision, bool smart = true);
|
||||
|
||||
// access to noise model
|
||||
double sigma() const;
|
||||
|
@ -289,6 +289,13 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
|
|||
JacobianFactor(size_t i1, Matrix A1, size_t i2, Matrix A2, size_t i3, Matrix A3,
|
||||
Vector b, const gtsam::noiseModel::Diagonal* model);
|
||||
JacobianFactor(const gtsam::GaussianFactorGraph& graph);
|
||||
JacobianFactor(const gtsam::GaussianFactorGraph& graph,
|
||||
const gtsam::VariableSlots& p_variableSlots);
|
||||
JacobianFactor(const gtsam::GaussianFactorGraph& graph,
|
||||
const gtsam::Ordering& ordering);
|
||||
JacobianFactor(const gtsam::GaussianFactorGraph& graph,
|
||||
const gtsam::Ordering& ordering,
|
||||
const gtsam::VariableSlots& p_variableSlots);
|
||||
|
||||
//Testable
|
||||
void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
|
||||
|
|
|
@ -819,7 +819,6 @@ struct ImuFactorMergeTest {
|
|||
loop_(Vector3(0, -kAngularVelocity, 0), Vector3(kVelocity, 0, 0)) {
|
||||
// arbitrary noise values
|
||||
p_->gyroscopeCovariance = I_3x3 * 0.01;
|
||||
p_->accelerometerCovariance = I_3x3 * 0.02;
|
||||
p_->accelerometerCovariance = I_3x3 * 0.03;
|
||||
}
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ class GTSAM_EXPORT FunctorizedFactor : public NoiseModelFactor1<T> {
|
|||
bool equals(const NonlinearFactor &other, double tol = 1e-9) const override {
|
||||
const FunctorizedFactor<R, T> *e =
|
||||
dynamic_cast<const FunctorizedFactor<R, T> *>(&other);
|
||||
return e && Base::equals(other, tol) &&
|
||||
return e != nullptr && Base::equals(other, tol) &&
|
||||
traits<R>::Equals(this->measured_, e->measured_, tol);
|
||||
}
|
||||
/// @}
|
||||
|
|
|
@ -42,7 +42,7 @@ static double Chi2inv(const double alpha, const size_t dofs) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<class GncParameters>
|
||||
class GncOptimizer {
|
||||
class GTSAM_EXPORT GncOptimizer {
|
||||
public:
|
||||
/// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer.
|
||||
typedef typename GncParameters::OptimizerType BaseOptimizer;
|
||||
|
|
|
@ -39,7 +39,7 @@ enum GncLossType {
|
|||
};
|
||||
|
||||
template<class BaseOptimizerParameters>
|
||||
class GncParams {
|
||||
class GTSAM_EXPORT GncParams {
|
||||
public:
|
||||
/// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer.
|
||||
typedef typename BaseOptimizerParameters::OptimizerType OptimizerType;
|
||||
|
|
|
@ -66,12 +66,9 @@ private:
|
|||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Function that compares two values
|
||||
*/
|
||||
typedef std::function<bool(const T&, const T&)> CompareFunction;
|
||||
/// Function that compares two values.
|
||||
using CompareFunction = std::function<bool(const T&, const T&)>;
|
||||
CompareFunction compare_;
|
||||
// bool (*compare_)(const T& a, const T& b);
|
||||
|
||||
/// Default constructor - only for serialization
|
||||
NonlinearEquality() {
|
||||
|
@ -198,9 +195,8 @@ private:
|
|||
};
|
||||
// \class NonlinearEquality
|
||||
|
||||
template<typename VALUE>
|
||||
struct traits<NonlinearEquality<VALUE> > : Testable<NonlinearEquality<VALUE> > {
|
||||
};
|
||||
template <typename VALUE>
|
||||
struct traits<NonlinearEquality<VALUE>> : Testable<NonlinearEquality<VALUE>> {};
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
|
@ -285,33 +281,28 @@ private:
|
|||
};
|
||||
// \NonlinearEquality1
|
||||
|
||||
template<typename VALUE>
|
||||
struct traits<NonlinearEquality1<VALUE> > : Testable<NonlinearEquality1<VALUE> > {
|
||||
};
|
||||
template <typename VALUE>
|
||||
struct traits<NonlinearEquality1<VALUE> >
|
||||
: Testable<NonlinearEquality1<VALUE> > {};
|
||||
|
||||
/* ************************************************************************* */
|
||||
/**
|
||||
* Simple binary equality constraint - this constraint forces two variables to
|
||||
* be the same.
|
||||
*/
|
||||
template<class VALUE>
|
||||
class NonlinearEquality2: public NoiseModelFactor2<VALUE, VALUE> {
|
||||
public:
|
||||
typedef VALUE X;
|
||||
template <class T>
|
||||
class NonlinearEquality2 : public NoiseModelFactor2<T, T> {
|
||||
protected:
|
||||
using Base = NoiseModelFactor2<T, T>;
|
||||
using This = NonlinearEquality2<T>;
|
||||
|
||||
protected:
|
||||
typedef NoiseModelFactor2<VALUE, VALUE> Base;
|
||||
typedef NonlinearEquality2<VALUE> This;
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(X)
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(T)
|
||||
|
||||
/// Default constructor to allow for serialization
|
||||
NonlinearEquality2() {
|
||||
}
|
||||
NonlinearEquality2() {}
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<NonlinearEquality2<VALUE> > shared_ptr;
|
||||
public:
|
||||
typedef boost::shared_ptr<NonlinearEquality2<T>> shared_ptr;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
@ -319,11 +310,10 @@ public:
|
|||
* @param key2 the key for the second unknown variable to be constrained
|
||||
* @param mu a parameter which really turns this into a strong prior
|
||||
*/
|
||||
NonlinearEquality2(Key key1, Key key2, double mu = 1000.0) :
|
||||
Base(noiseModel::Constrained::All(traits<X>::dimension, std::abs(mu)), key1, key2) {
|
||||
}
|
||||
~NonlinearEquality2() override {
|
||||
}
|
||||
NonlinearEquality2(Key key1, Key key2, double mu = 1e4)
|
||||
: Base(noiseModel::Constrained::All(traits<T>::dimension, std::abs(mu)),
|
||||
key1, key2) {}
|
||||
~NonlinearEquality2() override {}
|
||||
|
||||
/// @return a deep copy of this factor
|
||||
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||
|
@ -332,32 +322,30 @@ public:
|
|||
}
|
||||
|
||||
/// g(x) with optional derivative2
|
||||
Vector evaluateError(const X& x1, const X& x2, boost::optional<Matrix&> H1 =
|
||||
boost::none, boost::optional<Matrix&> H2 = boost::none) const override {
|
||||
static const size_t p = traits<X>::dimension;
|
||||
if (H1) *H1 = -Matrix::Identity(p,p);
|
||||
if (H2) *H2 = Matrix::Identity(p,p);
|
||||
return traits<X>::Local(x1,x2);
|
||||
Vector evaluateError(
|
||||
const T& x1, const T& x2, boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const override {
|
||||
static const size_t p = traits<T>::dimension;
|
||||
if (H1) *H1 = -Matrix::Identity(p, p);
|
||||
if (H2) *H2 = Matrix::Identity(p, p);
|
||||
return traits<T>::Local(x1, x2);
|
||||
}
|
||||
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
private:
|
||||
|
||||
private:
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
||||
ar
|
||||
& boost::serialization::make_nvp("NoiseModelFactor2",
|
||||
boost::serialization::base_object<Base>(*this));
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
ar& boost::serialization::make_nvp(
|
||||
"NoiseModelFactor2", boost::serialization::base_object<Base>(*this));
|
||||
}
|
||||
};
|
||||
// \NonlinearEquality2
|
||||
|
||||
template<typename VALUE>
|
||||
struct traits<NonlinearEquality2<VALUE> > : Testable<NonlinearEquality2<VALUE> > {
|
||||
template <typename VALUE>
|
||||
struct traits<NonlinearEquality2<VALUE>> : Testable<NonlinearEquality2<VALUE>> {
|
||||
};
|
||||
|
||||
|
||||
}// namespace gtsam
|
||||
|
|
|
@ -75,12 +75,15 @@ size_t Z(size_t j);
|
|||
} // namespace symbol_shorthand
|
||||
|
||||
// Default keyformatter
|
||||
void PrintKeyList(const gtsam::KeyList& keys);
|
||||
void PrintKeyList(const gtsam::KeyList& keys, string s);
|
||||
void PrintKeyVector(const gtsam::KeyVector& keys);
|
||||
void PrintKeyVector(const gtsam::KeyVector& keys, string s);
|
||||
void PrintKeySet(const gtsam::KeySet& keys);
|
||||
void PrintKeySet(const gtsam::KeySet& keys, string s);
|
||||
void PrintKeyList(
|
||||
const gtsam::KeyList& keys, const string& s = "",
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter);
|
||||
void PrintKeyVector(
|
||||
const gtsam::KeyVector& keys, const string& s = "",
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter);
|
||||
void PrintKeySet(
|
||||
const gtsam::KeySet& keys, const string& s = "",
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter);
|
||||
|
||||
#include <gtsam/inference/LabeledSymbol.h>
|
||||
class LabeledSymbol {
|
||||
|
@ -522,6 +525,24 @@ virtual class DoglegParams : gtsam::NonlinearOptimizerParams {
|
|||
void setVerbosityDL(string verbosityDL) const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/GncParams.h>
|
||||
template<PARAMS>
|
||||
virtual class GncParams {
|
||||
GncParams(const PARAMS& baseOptimizerParams);
|
||||
GncParams();
|
||||
void setVerbosityGNC(const This::Verbosity value);
|
||||
void print(const string& str) const;
|
||||
|
||||
enum Verbosity {
|
||||
SILENT,
|
||||
SUMMARY,
|
||||
VALUES
|
||||
};
|
||||
};
|
||||
|
||||
typedef gtsam::GncParams<gtsam::GaussNewtonParams> GncGaussNewtonParams;
|
||||
typedef gtsam::GncParams<gtsam::LevenbergMarquardtParams> GncLMParams;
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
virtual class NonlinearOptimizer {
|
||||
gtsam::Values optimize();
|
||||
|
@ -551,6 +572,18 @@ virtual class DoglegOptimizer : gtsam::NonlinearOptimizer {
|
|||
const gtsam::DoglegParams& params);
|
||||
double getDelta() const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/GncOptimizer.h>
|
||||
template<PARAMS>
|
||||
virtual class GncOptimizer {
|
||||
GncOptimizer(const gtsam::NonlinearFactorGraph& graph,
|
||||
const gtsam::Values& initialValues,
|
||||
const PARAMS& params);
|
||||
gtsam::Values optimize();
|
||||
};
|
||||
|
||||
typedef gtsam::GncOptimizer<gtsam::GncParams<gtsam::GaussNewtonParams>> GncGaussNewtonOptimizer;
|
||||
typedef gtsam::GncOptimizer<gtsam::GncParams<gtsam::LevenbergMarquardtParams>> GncLMOptimizer;
|
||||
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
virtual class LevenbergMarquardtOptimizer : gtsam::NonlinearOptimizer {
|
||||
|
@ -801,4 +834,17 @@ virtual class NonlinearEquality : gtsam::NoiseModelFactor {
|
|||
void serialize() const;
|
||||
};
|
||||
|
||||
template <T = {gtsam::Point2, gtsam::StereoPoint2, gtsam::Point3, gtsam::Rot2,
|
||||
gtsam::SO3, gtsam::SO4, gtsam::SOn, gtsam::Rot3, gtsam::Pose2,
|
||||
gtsam::Pose3, gtsam::Cal3_S2, gtsam::CalibratedCamera,
|
||||
gtsam::PinholeCamera<gtsam::Cal3_S2>,
|
||||
gtsam::PinholeCamera<gtsam::Cal3Bundler>,
|
||||
gtsam::PinholeCamera<gtsam::Cal3Fisheye>,
|
||||
gtsam::PinholeCamera<gtsam::Cal3Unified>,
|
||||
gtsam::imuBias::ConstantBias}>
|
||||
virtual class NonlinearEquality2 : gtsam::NoiseModelFactor {
|
||||
NonlinearEquality2(Key key1, Key key2, double mu = 1e4);
|
||||
gtsam::Vector evaluateError(const T& x1, const T& x2);
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -20,8 +20,12 @@
|
|||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/nonlinear/FunctorizedFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
|
||||
using namespace std;
|
||||
|
@ -60,7 +64,7 @@ class ProjectionFunctor {
|
|||
if (H1) {
|
||||
H1->resize(x.size(), A.size());
|
||||
*H1 << I_3x3, I_3x3, I_3x3;
|
||||
}
|
||||
}
|
||||
if (H2) *H2 = A;
|
||||
return A * x;
|
||||
}
|
||||
|
@ -255,18 +259,148 @@ TEST(FunctorizedFactor, Lambda2) {
|
|||
if (H1) {
|
||||
H1->resize(x.size(), A.size());
|
||||
*H1 << I_3x3, I_3x3, I_3x3;
|
||||
}
|
||||
}
|
||||
if (H2) *H2 = A;
|
||||
return A * x;
|
||||
};
|
||||
// FunctorizedFactor<Matrix> factor(key, measurement, model, lambda);
|
||||
auto factor = MakeFunctorizedFactor2<Matrix, Vector>(keyA, keyx, measurement, model2, lambda);
|
||||
auto factor = MakeFunctorizedFactor2<Matrix, Vector>(keyA, keyx, measurement,
|
||||
model2, lambda);
|
||||
|
||||
Vector error = factor.evaluateError(A, x);
|
||||
|
||||
EXPECT(assert_equal(Vector::Zero(3), error, 1e-9));
|
||||
}
|
||||
|
||||
const size_t N = 2;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, Print2) {
|
||||
const size_t M = 1;
|
||||
|
||||
Vector measured = Vector::Ones(M) * 42;
|
||||
|
||||
auto model = noiseModel::Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> priorFactor(key, measured, model, N, 0);
|
||||
|
||||
string expected =
|
||||
" keys = { X0 }\n"
|
||||
" noise model: unit (1) \n"
|
||||
"FunctorizedFactor(X0)\n"
|
||||
" measurement: [\n"
|
||||
" 42\n"
|
||||
"]\n"
|
||||
" noise model sigmas: 1\n";
|
||||
|
||||
EXPECT(assert_print_equal(expected, priorFactor));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, VectorEvaluationFactor) {
|
||||
const size_t M = 4;
|
||||
|
||||
Vector measured = Vector::Zero(M);
|
||||
|
||||
auto model = noiseModel::Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> priorFactor(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(priorFactor);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, VectorComponentFactor) {
|
||||
const int P = 4;
|
||||
const size_t i = 2;
|
||||
const double measured = 0.0, t = 3.0, a = 2.0, b = 4.0;
|
||||
auto model = noiseModel::Isotropic::Sigma(1, 1.0);
|
||||
VectorComponentFactor<Chebyshev2, P> controlPrior(key, measured, model, N, i,
|
||||
t, a, b);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(controlPrior);
|
||||
|
||||
ParameterMatrix<P> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<P>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, VecDerivativePrior) {
|
||||
const size_t M = 4;
|
||||
|
||||
Vector measured = Vector::Zero(M);
|
||||
auto model = noiseModel::Isotropic::Sigma(M, 1.0);
|
||||
VectorDerivativeFactor<Chebyshev2, M> vecDPrior(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(vecDPrior);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(FunctorizedFactor, ComponentDerivativeFactor) {
|
||||
const size_t M = 4;
|
||||
|
||||
double measured = 0;
|
||||
auto model = noiseModel::Isotropic::Sigma(1, 1.0);
|
||||
ComponentDerivativeFactor<Chebyshev2, M> controlDPrior(key, measured, model,
|
||||
N, 0, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(controlDPrior);
|
||||
|
||||
Values initial;
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
parameters.verbosityLM = LevenbergMarquardtParams::SILENT;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file matlab.h
|
||||
* @file utilities.h
|
||||
* @brief Contains *generic* global functions designed particularly for the matlab interface
|
||||
* @author Stephen Williams
|
||||
*/
|
||||
|
@ -89,21 +89,41 @@ KeySet createKeySet(std::string s, const Vector& I) {
|
|||
|
||||
/// Extract all Point2 values into a single matrix [x y]
|
||||
Matrix extractPoint2(const Values& values) {
|
||||
Values::ConstFiltered<gtsam::Point2> points = values.filter<gtsam::Point2>();
|
||||
// Point2 is aliased as a gtsam::Vector in the wrapper
|
||||
Values::ConstFiltered<gtsam::Vector> points2 = values.filter<gtsam::Vector>();
|
||||
|
||||
Matrix result(points.size() + points2.size(), 2);
|
||||
|
||||
size_t j = 0;
|
||||
Values::ConstFiltered<Point2> points = values.filter<Point2>();
|
||||
Matrix result(points.size(), 2);
|
||||
for(const auto& key_value: points)
|
||||
for (const auto& key_value : points) {
|
||||
result.row(j++) = key_value.value;
|
||||
}
|
||||
for (const auto& key_value : points2) {
|
||||
if (key_value.value.rows() == 2) {
|
||||
result.row(j++) = key_value.value;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Extract all Point3 values into a single matrix [x y z]
|
||||
Matrix extractPoint3(const Values& values) {
|
||||
Values::ConstFiltered<Point3> points = values.filter<Point3>();
|
||||
Matrix result(points.size(), 3);
|
||||
Values::ConstFiltered<gtsam::Point3> points = values.filter<gtsam::Point3>();
|
||||
// Point3 is aliased as a gtsam::Vector in the wrapper
|
||||
Values::ConstFiltered<gtsam::Vector> points2 = values.filter<gtsam::Vector>();
|
||||
|
||||
Matrix result(points.size() + points2.size(), 3);
|
||||
|
||||
size_t j = 0;
|
||||
for(const auto& key_value: points)
|
||||
for (const auto& key_value : points) {
|
||||
result.row(j++) = key_value.value;
|
||||
}
|
||||
for (const auto& key_value : points2) {
|
||||
if (key_value.value.rows() == 3) {
|
||||
result.row(j++) = key_value.value;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -144,11 +164,18 @@ Matrix extractPose3(const Values& values) {
|
|||
|
||||
/// Perturb all Point2 values using normally distributed noise
|
||||
void perturbPoint2(Values& values, double sigma, int32_t seed = 42u) {
|
||||
noiseModel::Isotropic::shared_ptr model = noiseModel::Isotropic::Sigma(2,
|
||||
sigma);
|
||||
noiseModel::Isotropic::shared_ptr model =
|
||||
noiseModel::Isotropic::Sigma(2, sigma);
|
||||
Sampler sampler(model, seed);
|
||||
for(const auto& key_value: values.filter<Point2>()) {
|
||||
values.update<Point2>(key_value.key, key_value.value + Point2(sampler.sample()));
|
||||
for (const auto& key_value : values.filter<Point2>()) {
|
||||
values.update<Point2>(key_value.key,
|
||||
key_value.value + Point2(sampler.sample()));
|
||||
}
|
||||
for (const auto& key_value : values.filter<gtsam::Vector>()) {
|
||||
if (key_value.value.rows() == 2) {
|
||||
values.update<gtsam::Vector>(key_value.key,
|
||||
key_value.value + Point2(sampler.sample()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,19 +192,34 @@ void perturbPose2(Values& values, double sigmaT, double sigmaR, int32_t seed =
|
|||
|
||||
/// Perturb all Point3 values using normally distributed noise
|
||||
void perturbPoint3(Values& values, double sigma, int32_t seed = 42u) {
|
||||
noiseModel::Isotropic::shared_ptr model = noiseModel::Isotropic::Sigma(3,
|
||||
sigma);
|
||||
noiseModel::Isotropic::shared_ptr model =
|
||||
noiseModel::Isotropic::Sigma(3, sigma);
|
||||
Sampler sampler(model, seed);
|
||||
for(const auto& key_value: values.filter<Point3>()) {
|
||||
values.update<Point3>(key_value.key, key_value.value + Point3(sampler.sample()));
|
||||
for (const auto& key_value : values.filter<Point3>()) {
|
||||
values.update<Point3>(key_value.key,
|
||||
key_value.value + Point3(sampler.sample()));
|
||||
}
|
||||
for (const auto& key_value : values.filter<gtsam::Vector>()) {
|
||||
if (key_value.value.rows() == 3) {
|
||||
values.update<gtsam::Vector>(key_value.key,
|
||||
key_value.value + Point3(sampler.sample()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a number of initial point values by backprojecting
|
||||
/**
|
||||
* @brief Insert a number of initial point values by backprojecting
|
||||
*
|
||||
* @param values The values dict to insert the backprojections to.
|
||||
* @param camera The camera model.
|
||||
* @param J Vector of key indices.
|
||||
* @param Z 2*J matrix of pixel values.
|
||||
* @param depth Initial depth value.
|
||||
*/
|
||||
void insertBackprojections(Values& values, const PinholeCamera<Cal3_S2>& camera,
|
||||
const Vector& J, const Matrix& Z, double depth) {
|
||||
if (Z.rows() != 2)
|
||||
throw std::invalid_argument("insertBackProjections: Z must be 2*K");
|
||||
throw std::invalid_argument("insertBackProjections: Z must be 2*J");
|
||||
if (Z.cols() != J.size())
|
||||
throw std::invalid_argument(
|
||||
"insertBackProjections: J and Z must have same number of entries");
|
||||
|
@ -188,7 +230,17 @@ void insertBackprojections(Values& values, const PinholeCamera<Cal3_S2>& camera,
|
|||
}
|
||||
}
|
||||
|
||||
/// Insert multiple projection factors for a single pose key
|
||||
/**
|
||||
* @brief Insert multiple projection factors for a single pose key
|
||||
*
|
||||
* @param graph The nonlinear factor graph to add the factors to.
|
||||
* @param i Camera key.
|
||||
* @param J Vector of key indices.
|
||||
* @param Z 2*J matrix of pixel values.
|
||||
* @param model Factor noise model.
|
||||
* @param K Calibration matrix.
|
||||
* @param body_P_sensor Pose of the camera sensor in the body frame.
|
||||
*/
|
||||
void insertProjectionFactors(NonlinearFactorGraph& graph, Key i,
|
||||
const Vector& J, const Matrix& Z, const SharedNoiseModel& model,
|
||||
const Cal3_S2::shared_ptr K, const Pose3& body_P_sensor = Pose3()) {
|
||||
|
|
|
@ -103,7 +103,7 @@ namespace gtsam {
|
|||
boost::none, boost::optional<Matrix&> H2 = boost::none) const override {
|
||||
T hx = traits<T>::Between(p1, p2, H1, H2); // h(x)
|
||||
// manifold equivalent of h(x)-z -> log(z,h(x))
|
||||
#ifdef SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
#ifdef GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
typename traits<T>::ChartJacobian::Jacobian Hlocal;
|
||||
Vector rval = traits<T>::Local(measured_, hx, boost::none, (H1 || H2) ? &Hlocal : 0);
|
||||
if (H1) *H1 = Hlocal * (*H1);
|
||||
|
|
|
@ -164,10 +164,15 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** return the calibration object */
|
||||
inline const boost::shared_ptr<CALIBRATION> calibration() const {
|
||||
const boost::shared_ptr<CALIBRATION> calibration() const {
|
||||
return K_;
|
||||
}
|
||||
|
||||
/** return the (optional) sensor pose with respect to the vehicle frame */
|
||||
const boost::optional<POSE>& body_P_sensor() const {
|
||||
return body_P_sensor_;
|
||||
}
|
||||
|
||||
/** return verbosity */
|
||||
inline bool verboseCheirality() const { return verboseCheirality_; }
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ protected:
|
|||
DefaultKeyFormatter) const override {
|
||||
std::cout << s << "SmartFactorBase, z = \n";
|
||||
for (size_t k = 0; k < measured_.size(); ++k) {
|
||||
std::cout << "measurement, p = " << measured_[k] << "\t";
|
||||
std::cout << "measurement " << k<<", px = \n" << measured_[k] << "\n";
|
||||
noiseModel_->print("noise model = ");
|
||||
}
|
||||
if(body_P_sensor_)
|
||||
|
|
|
@ -101,7 +101,7 @@ public:
|
|||
void print(const std::string& s = "", const KeyFormatter& keyFormatter =
|
||||
DefaultKeyFormatter) const override {
|
||||
std::cout << s << "SmartProjectionFactor\n";
|
||||
std::cout << "linearizationMode:\n" << params_.linearizationMode
|
||||
std::cout << "linearizationMode: " << params_.linearizationMode
|
||||
<< std::endl;
|
||||
std::cout << "triangulationParameters:\n" << params_.triangulation
|
||||
<< std::endl;
|
||||
|
|
|
@ -138,4 +138,21 @@ Point2_ uncalibrate(const Expression<CALIBRATION>& K, const Point2_& xy_hat) {
|
|||
return Point2_(K, &CALIBRATION::uncalibrate, xy_hat);
|
||||
}
|
||||
|
||||
|
||||
/// logmap
|
||||
// TODO(dellaert): Should work but fails because of a type deduction conflict.
|
||||
// template <typename T>
|
||||
// gtsam::Expression<typename gtsam::traits<T>::TangentVector> logmap(
|
||||
// const gtsam::Expression<T> &x1, const gtsam::Expression<T> &x2) {
|
||||
// return gtsam::Expression<typename gtsam::traits<T>::TangentVector>(
|
||||
// x1, &T::logmap, x2);
|
||||
// }
|
||||
|
||||
template <typename T>
|
||||
gtsam::Expression<typename gtsam::traits<T>::TangentVector> logmap(
|
||||
const gtsam::Expression<T> &x1, const gtsam::Expression<T> &x2) {
|
||||
return Expression<typename gtsam::traits<T>::TangentVector>(
|
||||
gtsam::traits<T>::Logmap, between(x1, x2));
|
||||
}
|
||||
|
||||
} // \namespace gtsam
|
||||
|
|
|
@ -58,6 +58,13 @@ TEST(SlamExpressions, unrotate) {
|
|||
const Point3_ q_ = unrotate(R_, p_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SlamExpressions, logmap) {
|
||||
Pose3_ T1_(0);
|
||||
Pose3_ T2_(1);
|
||||
const Vector6_ l = logmap(T1_, T2_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
@ -50,7 +50,7 @@ static Point2 measurement1(323.0, 240.0);
|
|||
|
||||
LevenbergMarquardtParams lmParams;
|
||||
// Make more verbose like so (in tests):
|
||||
// params.verbosityLM = LevenbergMarquardtParams::SUMMARY;
|
||||
// lmParams.verbosityLM = LevenbergMarquardtParams::SUMMARY;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( SmartProjectionPoseFactor, Constructor) {
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
|
||||
#include "FindSeparator.h"
|
||||
|
||||
#ifndef GTSAM_USE_SYSTEM_METIS
|
||||
|
||||
extern "C" {
|
||||
#include <metis.h>
|
||||
#include "metislib.h"
|
||||
|
@ -564,3 +566,5 @@ namespace gtsam { namespace partition {
|
|||
}
|
||||
|
||||
}} //namespace
|
||||
|
||||
#endif
|
||||
|
|
|
@ -20,6 +20,8 @@ using namespace std;
|
|||
using namespace gtsam;
|
||||
using namespace gtsam::partition;
|
||||
|
||||
#ifndef GTSAM_USE_SYSTEM_METIS
|
||||
|
||||
/* ************************************************************************* */
|
||||
// x0 - x1 - x2
|
||||
// l3 l4
|
||||
|
@ -227,6 +229,8 @@ TEST ( Partition, findSeparator3_with_reduced_camera )
|
|||
LONGS_EQUAL(2, partitionTable[28]);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file ProjectionFactorRollingShutter.cpp
|
||||
* @brief Basic projection factor for rolling shutter cameras
|
||||
* @author Yotam Stern
|
||||
*/
|
||||
|
||||
#include <gtsam_unstable/slam/ProjectionFactorRollingShutter.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
Vector ProjectionFactorRollingShutter::evaluateError(
|
||||
const Pose3& pose_a, const Pose3& pose_b, const Point3& point,
|
||||
boost::optional<Matrix&> H1, boost::optional<Matrix&> H2,
|
||||
boost::optional<Matrix&> H3) const {
|
||||
try {
|
||||
Pose3 pose = interpolate<Pose3>(pose_a, pose_b, alpha_, H1, H2);
|
||||
gtsam::Matrix Hprj;
|
||||
if (body_P_sensor_) {
|
||||
if (H1 || H2 || H3) {
|
||||
gtsam::Matrix HbodySensor;
|
||||
PinholeCamera<Cal3_S2> camera(
|
||||
pose.compose(*body_P_sensor_, HbodySensor), *K_);
|
||||
Point2 reprojectionError(camera.project(point, Hprj, H3, boost::none) -
|
||||
measured_);
|
||||
if (H1) *H1 = Hprj * HbodySensor * (*H1);
|
||||
if (H2) *H2 = Hprj * HbodySensor * (*H2);
|
||||
return reprojectionError;
|
||||
} else {
|
||||
PinholeCamera<Cal3_S2> camera(pose.compose(*body_P_sensor_), *K_);
|
||||
return camera.project(point) - measured_;
|
||||
}
|
||||
} else {
|
||||
PinholeCamera<Cal3_S2> camera(pose, *K_);
|
||||
Point2 reprojectionError(camera.project(point, Hprj, H3, boost::none) -
|
||||
measured_);
|
||||
if (H1) *H1 = Hprj * (*H1);
|
||||
if (H2) *H2 = Hprj * (*H2);
|
||||
return reprojectionError;
|
||||
}
|
||||
} catch (CheiralityException& e) {
|
||||
if (H1) *H1 = Matrix::Zero(2, 6);
|
||||
if (H2) *H2 = Matrix::Zero(2, 6);
|
||||
if (H3) *H3 = Matrix::Zero(2, 3);
|
||||
if (verboseCheirality_)
|
||||
std::cout << e.what() << ": Landmark "
|
||||
<< DefaultKeyFormatter(this->key2()) << " moved behind camera "
|
||||
<< DefaultKeyFormatter(this->key1()) << std::endl;
|
||||
if (throwCheirality_) throw CheiralityException(this->key2());
|
||||
}
|
||||
return Vector2::Constant(2.0 * K_->fx());
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,217 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file ProjectionFactorRollingShutter.h
|
||||
* @brief Basic projection factor for rolling shutter cameras
|
||||
* @author Yotam Stern
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/CalibratedCamera.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Non-linear factor for 2D projection measurement obtained using a rolling
|
||||
* shutter camera. The calibration is known here. This version takes rolling
|
||||
* shutter information into account as follows: consider two consecutive poses A
|
||||
* and B, and a Point2 measurement taken starting at time A using a rolling
|
||||
* shutter camera. Pose A has timestamp t_A, and Pose B has timestamp t_B. The
|
||||
* Point2 measurement has timestamp t_p (with t_A <= t_p <= t_B) corresponding
|
||||
* to the time of exposure of the row of the image the pixel belongs to. Let us
|
||||
* define the alpha = (t_p - t_A) / (t_B - t_A), we will use the pose
|
||||
* interpolated between A and B by the alpha to project the corresponding
|
||||
* landmark to Point2.
|
||||
* @addtogroup SLAM
|
||||
*/
|
||||
|
||||
class ProjectionFactorRollingShutter
|
||||
: public NoiseModelFactor3<Pose3, Pose3, Point3> {
|
||||
protected:
|
||||
// Keep a copy of measurement and calibration for I/O
|
||||
Point2 measured_; ///< 2D measurement
|
||||
double alpha_; ///< interpolation parameter in [0,1] corresponding to the
|
||||
///< point2 measurement
|
||||
boost::shared_ptr<Cal3_S2> K_; ///< shared pointer to calibration object
|
||||
boost::optional<Pose3>
|
||||
body_P_sensor_; ///< The pose of the sensor in the body frame
|
||||
|
||||
// verbosity handling for Cheirality Exceptions
|
||||
bool throwCheirality_; ///< If true, rethrows Cheirality exceptions (default:
|
||||
///< false)
|
||||
bool verboseCheirality_; ///< If true, prints text for Cheirality exceptions
|
||||
///< (default: false)
|
||||
|
||||
public:
|
||||
/// shorthand for base class type
|
||||
typedef NoiseModelFactor3<Pose3, Pose3, Point3> Base;
|
||||
|
||||
/// shorthand for this class
|
||||
typedef ProjectionFactorRollingShutter This;
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/// Default constructor
|
||||
ProjectionFactorRollingShutter()
|
||||
: measured_(0, 0),
|
||||
alpha_(0),
|
||||
throwCheirality_(false),
|
||||
verboseCheirality_(false) {}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param measured is the 2-dimensional pixel location of point in the image
|
||||
* (the measurement)
|
||||
* @param alpha in [0,1] is the rolling shutter parameter for the measurement
|
||||
* @param model is the noise model
|
||||
* @param poseKey_a is the key of the first camera
|
||||
* @param poseKey_b is the key of the second camera
|
||||
* @param pointKey is the key of the landmark
|
||||
* @param K shared pointer to the constant calibration
|
||||
* @param body_P_sensor is the transform from body to sensor frame (default
|
||||
* identity)
|
||||
*/
|
||||
ProjectionFactorRollingShutter(
|
||||
const Point2& measured, double alpha, const SharedNoiseModel& model,
|
||||
Key poseKey_a, Key poseKey_b, Key pointKey,
|
||||
const boost::shared_ptr<Cal3_S2>& K,
|
||||
boost::optional<Pose3> body_P_sensor = boost::none)
|
||||
: Base(model, poseKey_a, poseKey_b, pointKey),
|
||||
measured_(measured),
|
||||
alpha_(alpha),
|
||||
K_(K),
|
||||
body_P_sensor_(body_P_sensor),
|
||||
throwCheirality_(false),
|
||||
verboseCheirality_(false) {}
|
||||
|
||||
/**
|
||||
* Constructor with exception-handling flags
|
||||
* @param measured is the 2-dimensional pixel location of point in the image
|
||||
* (the measurement)
|
||||
* @param alpha in [0,1] is the rolling shutter parameter for the measurement
|
||||
* @param model is the noise model
|
||||
* @param poseKey_a is the key of the first camera
|
||||
* @param poseKey_b is the key of the second camera
|
||||
* @param pointKey is the key of the landmark
|
||||
* @param K shared pointer to the constant calibration
|
||||
* @param throwCheirality determines whether Cheirality exceptions are
|
||||
* rethrown
|
||||
* @param verboseCheirality determines whether exceptions are printed for
|
||||
* Cheirality
|
||||
* @param body_P_sensor is the transform from body to sensor frame (default
|
||||
* identity)
|
||||
*/
|
||||
ProjectionFactorRollingShutter(
|
||||
const Point2& measured, double alpha, const SharedNoiseModel& model,
|
||||
Key poseKey_a, Key poseKey_b, Key pointKey,
|
||||
const boost::shared_ptr<Cal3_S2>& K, bool throwCheirality,
|
||||
bool verboseCheirality,
|
||||
boost::optional<Pose3> body_P_sensor = boost::none)
|
||||
: Base(model, poseKey_a, poseKey_b, pointKey),
|
||||
measured_(measured),
|
||||
alpha_(alpha),
|
||||
K_(K),
|
||||
body_P_sensor_(body_P_sensor),
|
||||
throwCheirality_(throwCheirality),
|
||||
verboseCheirality_(verboseCheirality) {}
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~ProjectionFactorRollingShutter() {}
|
||||
|
||||
/// @return a deep copy of this factor
|
||||
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||
gtsam::NonlinearFactor::shared_ptr(new This(*this)));
|
||||
}
|
||||
|
||||
/**
|
||||
* print
|
||||
* @param s optional string naming the factor
|
||||
* @param keyFormatter optional formatter useful for printing Symbols
|
||||
*/
|
||||
void print(
|
||||
const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override {
|
||||
std::cout << s << "ProjectionFactorRollingShutter, z = ";
|
||||
traits<Point2>::Print(measured_);
|
||||
std::cout << " rolling shutter interpolation param = " << alpha_;
|
||||
if (this->body_P_sensor_)
|
||||
this->body_P_sensor_->print(" sensor pose in body frame: ");
|
||||
Base::print("", keyFormatter);
|
||||
}
|
||||
|
||||
/// equals
|
||||
bool equals(const NonlinearFactor& p, double tol = 1e-9) const override {
|
||||
const This* e = dynamic_cast<const This*>(&p);
|
||||
return e && Base::equals(p, tol) && (alpha_ == e->alpha()) &&
|
||||
traits<Point2>::Equals(this->measured_, e->measured_, tol) &&
|
||||
this->K_->equals(*e->K_, tol) &&
|
||||
(this->throwCheirality_ == e->throwCheirality_) &&
|
||||
(this->verboseCheirality_ == e->verboseCheirality_) &&
|
||||
((!body_P_sensor_ && !e->body_P_sensor_) ||
|
||||
(body_P_sensor_ && e->body_P_sensor_ &&
|
||||
body_P_sensor_->equals(*e->body_P_sensor_)));
|
||||
}
|
||||
|
||||
/// Evaluate error h(x)-z and optionally derivatives
|
||||
Vector evaluateError(
|
||||
const Pose3& pose_a, const Pose3& pose_b, const Point3& point,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none,
|
||||
boost::optional<Matrix&> H3 = boost::none) const override;
|
||||
|
||||
/** return the measurement */
|
||||
const Point2& measured() const { return measured_; }
|
||||
|
||||
/** return the calibration object */
|
||||
inline const boost::shared_ptr<Cal3_S2> calibration() const { return K_; }
|
||||
|
||||
/** returns the rolling shutter interp param*/
|
||||
inline double alpha() const { return alpha_; }
|
||||
|
||||
/** return verbosity */
|
||||
inline bool verboseCheirality() const { return verboseCheirality_; }
|
||||
|
||||
/** return flag for throwing cheirality exceptions */
|
||||
inline bool throwCheirality() const { return throwCheirality_; }
|
||||
|
||||
private:
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar& BOOST_SERIALIZATION_NVP(measured_);
|
||||
ar& BOOST_SERIALIZATION_NVP(alpha_);
|
||||
ar& BOOST_SERIALIZATION_NVP(K_);
|
||||
ar& BOOST_SERIALIZATION_NVP(body_P_sensor_);
|
||||
ar& BOOST_SERIALIZATION_NVP(throwCheirality_);
|
||||
ar& BOOST_SERIALIZATION_NVP(verboseCheirality_);
|
||||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
/// traits
|
||||
template <>
|
||||
struct traits<ProjectionFactorRollingShutter>
|
||||
: public Testable<ProjectionFactorRollingShutter> {};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,485 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SmartProjectionPoseFactorRollingShutter.h
|
||||
* @brief Smart projection factor on poses modeling rolling shutter effect with
|
||||
* given readout time
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/slam/SmartProjectionFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
/**
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
*
|
||||
* If you are using the factor, please cite:
|
||||
* L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert,
|
||||
* Eliminating conditionally independent sets in factor graphs:
|
||||
* a unifying perspective based on smart factors,
|
||||
* Int. Conf. on Robotics and Automation (ICRA), 2014.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This factor optimizes two consecutive poses of the body assuming a rolling
|
||||
* shutter model of the camera with given readout time. The factor requires that
|
||||
* values contain (for each pixel observation) two consecutive camera poses from
|
||||
* which the pixel observation pose can be interpolated.
|
||||
* @addtogroup SLAM
|
||||
*/
|
||||
template <class CAMERA>
|
||||
class SmartProjectionPoseFactorRollingShutter
|
||||
: public SmartProjectionFactor<CAMERA> {
|
||||
public:
|
||||
typedef typename CAMERA::CalibrationType CALIBRATION;
|
||||
|
||||
protected:
|
||||
/// shared pointer to calibration object (one for each observation)
|
||||
std::vector<boost::shared_ptr<CALIBRATION>> K_all_;
|
||||
|
||||
/// The keys of the pose of the body (with respect to an external world
|
||||
/// frame): two consecutive poses for each observation
|
||||
std::vector<std::pair<Key, Key>> world_P_body_key_pairs_;
|
||||
|
||||
/// interpolation factor (one for each observation) to interpolate between
|
||||
/// pair of consecutive poses
|
||||
std::vector<double> alphas_;
|
||||
|
||||
/// Pose of the camera in the body frame
|
||||
std::vector<Pose3> body_P_sensors_;
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
/// shorthand for base class type
|
||||
typedef SmartProjectionFactor<PinholePose<CALIBRATION>> Base;
|
||||
|
||||
/// shorthand for this class
|
||||
typedef SmartProjectionPoseFactorRollingShutter This;
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
static const int DimBlock =
|
||||
12; ///< size of the variable stacking 2 poses from which the observation
|
||||
///< pose is interpolated
|
||||
static const int DimPose = 6; ///< Pose3 dimension
|
||||
static const int ZDim = 2; ///< Measurement dimension (Point2)
|
||||
typedef Eigen::Matrix<double, ZDim, DimBlock>
|
||||
MatrixZD; // F blocks (derivatives wrt block of 2 poses)
|
||||
typedef std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD>>
|
||||
FBlocks; // vector of F blocks
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param Isotropic measurement noise
|
||||
* @param params internal parameters of the smart factors
|
||||
*/
|
||||
SmartProjectionPoseFactorRollingShutter(
|
||||
const SharedNoiseModel& sharedNoiseModel,
|
||||
const SmartProjectionParams& params = SmartProjectionParams())
|
||||
: Base(sharedNoiseModel, params) {}
|
||||
|
||||
/** Virtual destructor */
|
||||
~SmartProjectionPoseFactorRollingShutter() override = default;
|
||||
|
||||
/**
|
||||
* add a new measurement, with 2 pose keys, interpolation factor, camera
|
||||
* (intrinsic and extrinsic) calibration, and observed pixel.
|
||||
* @param measured 2-dimensional location of the projection of a single
|
||||
* landmark in a single view (the measurement), interpolated from the 2 poses
|
||||
* @param world_P_body_key1 key corresponding to the first body poses (time <=
|
||||
* time pixel is acquired)
|
||||
* @param world_P_body_key2 key corresponding to the second body poses (time
|
||||
* >= time pixel is acquired)
|
||||
* @param alpha interpolation factor in [0,1], such that if alpha = 0 the
|
||||
* interpolated pose is the same as world_P_body_key1
|
||||
* @param K (fixed) camera intrinsic calibration
|
||||
* @param body_P_sensor (fixed) camera extrinsic calibration
|
||||
*/
|
||||
void add(const Point2& measured, const Key& world_P_body_key1,
|
||||
const Key& world_P_body_key2, const double& alpha,
|
||||
const boost::shared_ptr<CALIBRATION>& K,
|
||||
const Pose3& body_P_sensor = Pose3::identity()) {
|
||||
// store measurements in base class
|
||||
this->measured_.push_back(measured);
|
||||
|
||||
// store the pair of keys for each measurement, in the same order
|
||||
world_P_body_key_pairs_.push_back(
|
||||
std::make_pair(world_P_body_key1, world_P_body_key2));
|
||||
|
||||
// also store keys in the keys_ vector: these keys are assumed to be
|
||||
// unique, so we avoid duplicates here
|
||||
if (std::find(this->keys_.begin(), this->keys_.end(), world_P_body_key1) ==
|
||||
this->keys_.end())
|
||||
this->keys_.push_back(world_P_body_key1); // add only unique keys
|
||||
if (std::find(this->keys_.begin(), this->keys_.end(), world_P_body_key2) ==
|
||||
this->keys_.end())
|
||||
this->keys_.push_back(world_P_body_key2); // add only unique keys
|
||||
|
||||
// store interpolation factor
|
||||
alphas_.push_back(alpha);
|
||||
|
||||
// store fixed intrinsic calibration
|
||||
K_all_.push_back(K);
|
||||
|
||||
// store fixed extrinsics of the camera
|
||||
body_P_sensors_.push_back(body_P_sensor);
|
||||
}
|
||||
|
||||
/**
|
||||
* Variant of the previous "add" function in which we include multiple
|
||||
* measurements
|
||||
* @param measurements vector of the 2m dimensional location of the projection
|
||||
* of a single landmark in the m views (the measurements)
|
||||
* @param world_P_body_key_pairs vector where the i-th element contains a pair
|
||||
* of keys corresponding to the pair of poses from which the observation pose
|
||||
* for the i0-th measurement can be interpolated
|
||||
* @param alphas vector of interpolation params (in [0,1]), one for each
|
||||
* measurement (in the same order)
|
||||
* @param Ks vector of (fixed) intrinsic calibration objects
|
||||
* @param body_P_sensors vector of (fixed) extrinsic calibration objects
|
||||
*/
|
||||
void add(const Point2Vector& measurements,
|
||||
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs,
|
||||
const std::vector<double>& alphas,
|
||||
const std::vector<boost::shared_ptr<CALIBRATION>>& Ks,
|
||||
const std::vector<Pose3>& body_P_sensors) {
|
||||
assert(world_P_body_key_pairs.size() == measurements.size());
|
||||
assert(world_P_body_key_pairs.size() == alphas.size());
|
||||
assert(world_P_body_key_pairs.size() == Ks.size());
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
add(measurements[i], world_P_body_key_pairs[i].first,
|
||||
world_P_body_key_pairs[i].second, alphas[i], Ks[i],
|
||||
body_P_sensors[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Variant of the previous "add" function in which we include multiple
|
||||
* measurements with the same (intrinsic and extrinsic) calibration
|
||||
* @param measurements vector of the 2m dimensional location of the projection
|
||||
* of a single landmark in the m views (the measurements)
|
||||
* @param world_P_body_key_pairs vector where the i-th element contains a pair
|
||||
* of keys corresponding to the pair of poses from which the observation pose
|
||||
* for the i0-th measurement can be interpolated
|
||||
* @param alphas vector of interpolation params (in [0,1]), one for each
|
||||
* measurement (in the same order)
|
||||
* @param K (fixed) camera intrinsic calibration (same for all measurements)
|
||||
* @param body_P_sensor (fixed) camera extrinsic calibration (same for all
|
||||
* measurements)
|
||||
*/
|
||||
void add(const Point2Vector& measurements,
|
||||
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs,
|
||||
const std::vector<double>& alphas,
|
||||
const boost::shared_ptr<CALIBRATION>& K,
|
||||
const Pose3& body_P_sensor = Pose3::identity()) {
|
||||
assert(world_P_body_key_pairs.size() == measurements.size());
|
||||
assert(world_P_body_key_pairs.size() == alphas.size());
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
add(measurements[i], world_P_body_key_pairs[i].first,
|
||||
world_P_body_key_pairs[i].second, alphas[i], K, body_P_sensor);
|
||||
}
|
||||
}
|
||||
|
||||
/// return the calibration object
|
||||
const std::vector<boost::shared_ptr<CALIBRATION>>& calibration() const {
|
||||
return K_all_;
|
||||
}
|
||||
|
||||
/// return (for each observation) the keys of the pair of poses from which we
|
||||
/// interpolate
|
||||
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs() const {
|
||||
return world_P_body_key_pairs_;
|
||||
}
|
||||
|
||||
/// return the interpolation factors alphas
|
||||
const std::vector<double>& alphas() const { return alphas_; }
|
||||
|
||||
/// return the extrinsic camera calibration body_P_sensors
|
||||
const std::vector<Pose3>& body_P_sensors() const { return body_P_sensors_; }
|
||||
|
||||
/**
|
||||
* print
|
||||
* @param s optional string naming the factor
|
||||
* @param keyFormatter optional formatter useful for printing Symbols
|
||||
*/
|
||||
void print(
|
||||
const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override {
|
||||
std::cout << s << "SmartProjectionPoseFactorRollingShutter: \n ";
|
||||
for (size_t i = 0; i < K_all_.size(); i++) {
|
||||
std::cout << "-- Measurement nr " << i << std::endl;
|
||||
std::cout << " pose1 key: "
|
||||
<< keyFormatter(world_P_body_key_pairs_[i].first) << std::endl;
|
||||
std::cout << " pose2 key: "
|
||||
<< keyFormatter(world_P_body_key_pairs_[i].second) << std::endl;
|
||||
std::cout << " alpha: " << alphas_[i] << std::endl;
|
||||
body_P_sensors_[i].print("extrinsic calibration:\n");
|
||||
K_all_[i]->print("intrinsic calibration = ");
|
||||
}
|
||||
Base::print("", keyFormatter);
|
||||
}
|
||||
|
||||
/// equals
|
||||
bool equals(const NonlinearFactor& p, double tol = 1e-9) const override {
|
||||
const SmartProjectionPoseFactorRollingShutter<CAMERA>* e =
|
||||
dynamic_cast<const SmartProjectionPoseFactorRollingShutter<CAMERA>*>(
|
||||
&p);
|
||||
|
||||
double keyPairsEqual = true;
|
||||
if (this->world_P_body_key_pairs_.size() ==
|
||||
e->world_P_body_key_pairs().size()) {
|
||||
for (size_t k = 0; k < this->world_P_body_key_pairs_.size(); k++) {
|
||||
const Key key1own = world_P_body_key_pairs_[k].first;
|
||||
const Key key1e = e->world_P_body_key_pairs()[k].first;
|
||||
const Key key2own = world_P_body_key_pairs_[k].second;
|
||||
const Key key2e = e->world_P_body_key_pairs()[k].second;
|
||||
if (!(key1own == key1e) || !(key2own == key2e)) {
|
||||
keyPairsEqual = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
keyPairsEqual = false;
|
||||
}
|
||||
|
||||
double extrinsicCalibrationEqual = true;
|
||||
if (this->body_P_sensors_.size() == e->body_P_sensors().size()) {
|
||||
for (size_t i = 0; i < this->body_P_sensors_.size(); i++) {
|
||||
if (!body_P_sensors_[i].equals(e->body_P_sensors()[i])) {
|
||||
extrinsicCalibrationEqual = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
extrinsicCalibrationEqual = false;
|
||||
}
|
||||
|
||||
return e && Base::equals(p, tol) && K_all_ == e->calibration() &&
|
||||
alphas_ == e->alphas() && keyPairsEqual && extrinsicCalibrationEqual;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute jacobian F, E and error vector at a given linearization point
|
||||
* @param values Values structure which must contain camera poses
|
||||
* corresponding to keys involved in this factor
|
||||
* @return Return arguments are the camera jacobians Fs (including the
|
||||
* jacobian with respect to both body poses we interpolate from), the point
|
||||
* Jacobian E, and the error vector b. Note that the jacobians are computed
|
||||
* for a given point.
|
||||
*/
|
||||
void computeJacobiansWithTriangulatedPoint(FBlocks& Fs, Matrix& E, Vector& b,
|
||||
const Values& values) const {
|
||||
if (!this->result_) {
|
||||
throw("computeJacobiansWithTriangulatedPoint");
|
||||
} else { // valid result: compute jacobians
|
||||
size_t numViews = this->measured_.size();
|
||||
E = Matrix::Zero(2 * numViews,
|
||||
3); // a Point2 for each view (point jacobian)
|
||||
b = Vector::Zero(2 * numViews); // a Point2 for each view
|
||||
// intermediate Jacobians
|
||||
Eigen::Matrix<double, ZDim, DimPose> dProject_dPoseCam;
|
||||
Eigen::Matrix<double, DimPose, DimPose> dInterpPose_dPoseBody1,
|
||||
dInterpPose_dPoseBody2, dPoseCam_dInterpPose;
|
||||
Eigen::Matrix<double, ZDim, 3> Ei;
|
||||
|
||||
for (size_t i = 0; i < numViews; i++) { // for each camera/measurement
|
||||
auto w_P_body1 = values.at<Pose3>(world_P_body_key_pairs_[i].first);
|
||||
auto w_P_body2 = values.at<Pose3>(world_P_body_key_pairs_[i].second);
|
||||
double interpolationFactor = alphas_[i];
|
||||
// get interpolated pose:
|
||||
auto w_P_body =
|
||||
interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor,
|
||||
dInterpPose_dPoseBody1, dInterpPose_dPoseBody2);
|
||||
auto body_P_cam = body_P_sensors_[i];
|
||||
auto w_P_cam = w_P_body.compose(body_P_cam, dPoseCam_dInterpPose);
|
||||
PinholeCamera<CALIBRATION> camera(w_P_cam, *K_all_[i]);
|
||||
|
||||
// get jacobians and error vector for current measurement
|
||||
Point2 reprojectionError_i =
|
||||
Point2(camera.project(*this->result_, dProject_dPoseCam, Ei) -
|
||||
this->measured_.at(i));
|
||||
Eigen::Matrix<double, ZDim, DimBlock> J; // 2 x 12
|
||||
J.block(0, 0, ZDim, 6) =
|
||||
dProject_dPoseCam * dPoseCam_dInterpPose *
|
||||
dInterpPose_dPoseBody1; // (2x6) * (6x6) * (6x6)
|
||||
J.block(0, 6, ZDim, 6) =
|
||||
dProject_dPoseCam * dPoseCam_dInterpPose *
|
||||
dInterpPose_dPoseBody2; // (2x6) * (6x6) * (6x6)
|
||||
|
||||
// fit into the output structures
|
||||
Fs.push_back(J);
|
||||
size_t row = 2 * i;
|
||||
b.segment<ZDim>(row) = -reprojectionError_i;
|
||||
E.block<ZDim, 3>(row, 0) = Ei;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// linearize and return a Hessianfactor that is an approximation of error(p)
|
||||
boost::shared_ptr<RegularHessianFactor<DimPose>> createHessianFactor(
|
||||
const Values& values, const double lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
// we may have multiple observation sharing the same keys (due to the
|
||||
// rolling shutter interpolation), hence the number of unique keys may be
|
||||
// smaller than 2 * nrMeasurements
|
||||
size_t nrUniqueKeys =
|
||||
this->keys_
|
||||
.size(); // note: by construction, keys_ only contains unique keys
|
||||
|
||||
// Create structures for Hessian Factors
|
||||
KeyVector js;
|
||||
std::vector<Matrix> Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2);
|
||||
std::vector<Vector> gs(nrUniqueKeys);
|
||||
|
||||
if (this->measured_.size() !=
|
||||
this->cameras(values).size()) // 1 observation per interpolated camera
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionPoseFactorRollingShutter: "
|
||||
"measured_.size() inconsistent with input");
|
||||
|
||||
// triangulate 3D point at given linearization point
|
||||
this->triangulateSafe(this->cameras(values));
|
||||
|
||||
if (!this->result_) { // failed: return "empty/zero" Hessian
|
||||
if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) {
|
||||
for (Matrix& m : Gs) m = Matrix::Zero(DimPose, DimPose);
|
||||
for (Vector& v : gs) v = Vector::Zero(DimPose);
|
||||
return boost::make_shared<RegularHessianFactor<DimPose>>(this->keys_,
|
||||
Gs, gs, 0.0);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionPoseFactorRollingShutter: "
|
||||
"only supported degeneracy mode is ZERO_ON_DEGENERACY");
|
||||
}
|
||||
}
|
||||
// compute Jacobian given triangulated 3D Point
|
||||
FBlocks Fs;
|
||||
Matrix E;
|
||||
Vector b;
|
||||
this->computeJacobiansWithTriangulatedPoint(Fs, E, b, values);
|
||||
|
||||
// Whiten using noise model
|
||||
this->noiseModel_->WhitenSystem(E, b);
|
||||
for (size_t i = 0; i < Fs.size(); i++)
|
||||
Fs[i] = this->noiseModel_->Whiten(Fs[i]);
|
||||
|
||||
Matrix3 P = Base::Cameras::PointCov(E, lambda, diagonalDamping);
|
||||
|
||||
// Collect all the key pairs: these are the keys that correspond to the
|
||||
// blocks in Fs (on which we apply the Schur Complement)
|
||||
KeyVector nonuniqueKeys;
|
||||
for (size_t i = 0; i < world_P_body_key_pairs_.size(); i++) {
|
||||
nonuniqueKeys.push_back(world_P_body_key_pairs_.at(i).first);
|
||||
nonuniqueKeys.push_back(world_P_body_key_pairs_.at(i).second);
|
||||
}
|
||||
|
||||
// Build augmented Hessian (with last row/column being the information
|
||||
// vector) Note: we need to get the augumented hessian wrt the unique keys
|
||||
// in key_
|
||||
SymmetricBlockMatrix augmentedHessianUniqueKeys =
|
||||
Base::Cameras::template SchurComplementAndRearrangeBlocks<3, 12, 6>(
|
||||
Fs, E, P, b, nonuniqueKeys, this->keys_);
|
||||
|
||||
return boost::make_shared<RegularHessianFactor<DimPose>>(
|
||||
this->keys_, augmentedHessianUniqueKeys);
|
||||
}
|
||||
|
||||
/**
|
||||
* error calculates the error of the factor.
|
||||
*/
|
||||
double error(const Values& values) const override {
|
||||
if (this->active(values)) {
|
||||
return this->totalReprojectionError(this->cameras(values));
|
||||
} else { // else of active flag
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect all cameras involved in this factor
|
||||
* @param values Values structure which must contain camera poses
|
||||
* corresponding to keys involved in this factor
|
||||
* @return Cameras
|
||||
*/
|
||||
typename Base::Cameras cameras(const Values& values) const override {
|
||||
size_t numViews = this->measured_.size();
|
||||
assert(numViews == K_all_.size());
|
||||
assert(numViews == alphas_.size());
|
||||
assert(numViews == body_P_sensors_.size());
|
||||
assert(numViews == world_P_body_key_pairs_.size());
|
||||
|
||||
typename Base::Cameras cameras;
|
||||
for (size_t i = 0; i < numViews; i++) { // for each measurement
|
||||
const Pose3& w_P_body1 =
|
||||
values.at<Pose3>(world_P_body_key_pairs_[i].first);
|
||||
const Pose3& w_P_body2 =
|
||||
values.at<Pose3>(world_P_body_key_pairs_[i].second);
|
||||
double interpolationFactor = alphas_[i];
|
||||
const Pose3& w_P_body =
|
||||
interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor);
|
||||
const Pose3& body_P_cam = body_P_sensors_[i];
|
||||
const Pose3& w_P_cam = w_P_body.compose(body_P_cam);
|
||||
cameras.emplace_back(w_P_cam, K_all_[i]);
|
||||
}
|
||||
return cameras;
|
||||
}
|
||||
|
||||
/**
|
||||
* Linearize to Gaussian Factor (possibly adding a damping factor Lambda for
|
||||
* LM)
|
||||
* @param values Values structure which must contain camera poses and
|
||||
* extrinsic pose for this factor
|
||||
* @return a Gaussian factor
|
||||
*/
|
||||
boost::shared_ptr<GaussianFactor> linearizeDamped(
|
||||
const Values& values, const double lambda = 0.0) const {
|
||||
// depending on flag set on construction we may linearize to different
|
||||
// linear factors
|
||||
switch (this->params_.linearizationMode) {
|
||||
case HESSIAN:
|
||||
return this->createHessianFactor(values, lambda);
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionPoseFactorRollingShutter: unknown linearization "
|
||||
"mode");
|
||||
}
|
||||
}
|
||||
|
||||
/// linearize
|
||||
boost::shared_ptr<GaussianFactor> linearize(
|
||||
const Values& values) const override {
|
||||
return this->linearizeDamped(values);
|
||||
}
|
||||
|
||||
private:
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar& BOOST_SERIALIZATION_NVP(K_all_);
|
||||
}
|
||||
};
|
||||
// end of class declaration
|
||||
|
||||
/// traits
|
||||
template <class CAMERA>
|
||||
struct traits<SmartProjectionPoseFactorRollingShutter<CAMERA>>
|
||||
: public Testable<SmartProjectionPoseFactorRollingShutter<CAMERA>> {};
|
||||
|
||||
} // namespace gtsam
|
|
@ -61,10 +61,10 @@ class SmartStereoProjectionFactorPP : public SmartStereoProjectionFactor {
|
|||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
static const int Dim = 12; ///< Camera dimension: 6 for body pose, 6 for extrinsic pose
|
||||
static const int DimBlock = 12; ///< Camera dimension: 6 for body pose, 6 for extrinsic pose
|
||||
static const int DimPose = 6; ///< Pose3 dimension
|
||||
static const int ZDim = 3; ///< Measurement dimension (for a StereoPoint2 measurement)
|
||||
typedef Eigen::Matrix<double, ZDim, Dim> MatrixZD; // F blocks (derivatives wrt camera)
|
||||
typedef Eigen::Matrix<double, ZDim, DimBlock> MatrixZD; // F blocks (derivatives wrt camera)
|
||||
typedef std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> > FBlocks; // vector of F blocks
|
||||
|
||||
/**
|
||||
|
@ -180,7 +180,7 @@ class SmartStereoProjectionFactorPP : public SmartStereoProjectionFactor {
|
|||
// get jacobians and error vector for current measurement
|
||||
StereoPoint2 reprojectionError_i = StereoPoint2(
|
||||
camera.project(*result_, dProject_dPoseCam_i, Ei) - measured_.at(i));
|
||||
Eigen::Matrix<double, ZDim, Dim> J; // 3 x 12
|
||||
Eigen::Matrix<double, ZDim, DimBlock> J; // 3 x 12
|
||||
J.block<ZDim, 6>(0, 0) = dProject_dPoseCam_i * dPoseCam_dPoseBody_i; // (3x6) * (6x6)
|
||||
J.block<ZDim, 6>(0, 6) = dProject_dPoseCam_i * dPoseCam_dPoseExt_i; // (3x6) * (6x6)
|
||||
// if the right pixel is invalid, fix jacobians
|
||||
|
@ -209,8 +209,6 @@ class SmartStereoProjectionFactorPP : public SmartStereoProjectionFactor {
|
|||
// of keys may be smaller than 2 * nrMeasurements (which is the upper bound where we
|
||||
// have a body key and an extrinsic calibration key for each measurement)
|
||||
size_t nrUniqueKeys = keys_.size();
|
||||
size_t nrNonuniqueKeys = world_P_body_keys_.size()
|
||||
+ body_P_cam_keys_.size();
|
||||
|
||||
// Create structures for Hessian Factors
|
||||
KeyVector js;
|
||||
|
@ -246,81 +244,19 @@ class SmartStereoProjectionFactorPP : public SmartStereoProjectionFactor {
|
|||
|
||||
// build augmented Hessian (with last row/column being the information vector)
|
||||
Matrix3 P;
|
||||
Cameras::ComputePointCovariance<3>(P, E, lambda, diagonalDamping);
|
||||
Cameras::ComputePointCovariance <3> (P, E, lambda, diagonalDamping);
|
||||
|
||||
// marginalize point: note - we reuse the standard SchurComplement function
|
||||
SymmetricBlockMatrix augmentedHessian =
|
||||
Cameras::SchurComplement<3, Dim>(Fs, E, P, b);
|
||||
|
||||
// now pack into an Hessian factor
|
||||
std::vector<DenseIndex> dims(nrUniqueKeys + 1); // this also includes the b term
|
||||
std::fill(dims.begin(), dims.end() - 1, 6);
|
||||
dims.back() = 1;
|
||||
SymmetricBlockMatrix augmentedHessianUniqueKeys;
|
||||
|
||||
// here we have to deal with the fact that some cameras may share the same extrinsic key
|
||||
if (nrUniqueKeys == nrNonuniqueKeys) { // if there is 1 calibration key per camera
|
||||
augmentedHessianUniqueKeys = SymmetricBlockMatrix(
|
||||
dims, Matrix(augmentedHessian.selfadjointView()));
|
||||
} else { // if multiple cameras share a calibration we have to rearrange
|
||||
// the results of the Schur complement matrix
|
||||
std::vector<DenseIndex> nonuniqueDims(nrNonuniqueKeys + 1); // this also includes the b term
|
||||
std::fill(nonuniqueDims.begin(), nonuniqueDims.end() - 1, 6);
|
||||
nonuniqueDims.back() = 1;
|
||||
augmentedHessian = SymmetricBlockMatrix(
|
||||
nonuniqueDims, Matrix(augmentedHessian.selfadjointView()));
|
||||
|
||||
// these are the keys that correspond to the blocks in augmentedHessian (output of SchurComplement)
|
||||
KeyVector nonuniqueKeys;
|
||||
for (size_t i = 0; i < world_P_body_keys_.size(); i++) {
|
||||
nonuniqueKeys.push_back(world_P_body_keys_.at(i));
|
||||
nonuniqueKeys.push_back(body_P_cam_keys_.at(i));
|
||||
}
|
||||
|
||||
// get map from key to location in the new augmented Hessian matrix (the one including only unique keys)
|
||||
std::map<Key, size_t> keyToSlotMap;
|
||||
for (size_t k = 0; k < nrUniqueKeys; k++) {
|
||||
keyToSlotMap[keys_[k]] = k;
|
||||
}
|
||||
|
||||
// initialize matrix to zero
|
||||
augmentedHessianUniqueKeys = SymmetricBlockMatrix(
|
||||
dims, Matrix::Zero(6 * nrUniqueKeys + 1, 6 * nrUniqueKeys + 1));
|
||||
|
||||
// add contributions for each key: note this loops over the hessian with nonUnique keys (augmentedHessian)
|
||||
// and populates an Hessian that only includes the unique keys (that is what we want to return)
|
||||
for (size_t i = 0; i < nrNonuniqueKeys; i++) { // rows
|
||||
Key key_i = nonuniqueKeys.at(i);
|
||||
|
||||
// update information vector
|
||||
augmentedHessianUniqueKeys.updateOffDiagonalBlock(
|
||||
keyToSlotMap[key_i], nrUniqueKeys,
|
||||
augmentedHessian.aboveDiagonalBlock(i, nrNonuniqueKeys));
|
||||
|
||||
// update blocks
|
||||
for (size_t j = i; j < nrNonuniqueKeys; j++) { // cols
|
||||
Key key_j = nonuniqueKeys.at(j);
|
||||
if (i == j) {
|
||||
augmentedHessianUniqueKeys.updateDiagonalBlock(
|
||||
keyToSlotMap[key_i], augmentedHessian.diagonalBlock(i));
|
||||
} else { // (i < j)
|
||||
if (keyToSlotMap[key_i] != keyToSlotMap[key_j]) {
|
||||
augmentedHessianUniqueKeys.updateOffDiagonalBlock(
|
||||
keyToSlotMap[key_i], keyToSlotMap[key_j],
|
||||
augmentedHessian.aboveDiagonalBlock(i, j));
|
||||
} else {
|
||||
augmentedHessianUniqueKeys.updateDiagonalBlock(
|
||||
keyToSlotMap[key_i],
|
||||
augmentedHessian.aboveDiagonalBlock(i, j)
|
||||
+ augmentedHessian.aboveDiagonalBlock(i, j).transpose());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// update bottom right element of the matrix
|
||||
augmentedHessianUniqueKeys.updateDiagonalBlock(
|
||||
nrUniqueKeys, augmentedHessian.diagonalBlock(nrNonuniqueKeys));
|
||||
// these are the keys that correspond to the blocks in augmentedHessian (output of SchurComplement)
|
||||
KeyVector nonuniqueKeys;
|
||||
for (size_t i = 0; i < world_P_body_keys_.size(); i++) {
|
||||
nonuniqueKeys.push_back(world_P_body_keys_.at(i));
|
||||
nonuniqueKeys.push_back(body_P_cam_keys_.at(i));
|
||||
}
|
||||
// but we need to get the augumented hessian wrt the unique keys in key_
|
||||
SymmetricBlockMatrix augmentedHessianUniqueKeys =
|
||||
Cameras::SchurComplementAndRearrangeBlocks<3,DimBlock,DimPose>(Fs,E,P,b,
|
||||
nonuniqueKeys, keys_);
|
||||
|
||||
return boost::make_shared < RegularHessianFactor<DimPose>
|
||||
> (keys_, augmentedHessianUniqueKeys);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,407 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file ProjectionFactorRollingShutterRollingShutter.cpp
|
||||
* @brief Unit tests for ProjectionFactorRollingShutter Class
|
||||
* @author Luca Carlone
|
||||
* @date July 2021
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/geometry/Cal3DS2.h>
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam_unstable/slam/ProjectionFactorRollingShutter.h>
|
||||
|
||||
using namespace std::placeholders;
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// make a realistic calibration matrix
|
||||
static double fov = 60; // degrees
|
||||
static size_t w = 640, h = 480;
|
||||
static Cal3_S2::shared_ptr K(new Cal3_S2(fov, w, h));
|
||||
|
||||
// Create a noise model for the pixel error
|
||||
static SharedNoiseModel model(noiseModel::Unit::Create(2));
|
||||
|
||||
// Convenience for named keys
|
||||
using symbol_shorthand::L;
|
||||
using symbol_shorthand::T;
|
||||
using symbol_shorthand::X;
|
||||
|
||||
// Convenience to define common variables across many tests
|
||||
static Key poseKey1(X(1));
|
||||
static Key poseKey2(X(2));
|
||||
static Key pointKey(L(1));
|
||||
static double interp_params = 0.5;
|
||||
static Point2 measurement(323.0, 240.0);
|
||||
static Pose3 body_P_sensor(Rot3::RzRyRx(-M_PI_2, 0.0, -M_PI_2),
|
||||
Point3(0.25, -0.10, 1.0));
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ProjectionFactorRollingShutter, Constructor) {
|
||||
ProjectionFactorRollingShutter factor(measurement, interp_params, model,
|
||||
poseKey1, poseKey2, pointKey, K);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ProjectionFactorRollingShutter, ConstructorWithTransform) {
|
||||
ProjectionFactorRollingShutter factor(measurement, interp_params, model,
|
||||
poseKey1, poseKey2, pointKey, K,
|
||||
body_P_sensor);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ProjectionFactorRollingShutter, Equals) {
|
||||
{ // factors are equal
|
||||
ProjectionFactorRollingShutter factor1(measurement, interp_params, model,
|
||||
poseKey1, poseKey2, pointKey, K);
|
||||
ProjectionFactorRollingShutter factor2(measurement, interp_params, model,
|
||||
poseKey1, poseKey2, pointKey, K);
|
||||
CHECK(assert_equal(factor1, factor2));
|
||||
}
|
||||
{ // factors are NOT equal (keys are different)
|
||||
ProjectionFactorRollingShutter factor1(measurement, interp_params, model,
|
||||
poseKey1, poseKey2, pointKey, K);
|
||||
ProjectionFactorRollingShutter factor2(measurement, interp_params, model,
|
||||
poseKey1, poseKey1, pointKey, K);
|
||||
CHECK(!assert_equal(factor1, factor2)); // not equal
|
||||
}
|
||||
{ // factors are NOT equal (different interpolation)
|
||||
ProjectionFactorRollingShutter factor1(measurement, 0.1, model, poseKey1,
|
||||
poseKey1, pointKey, K);
|
||||
ProjectionFactorRollingShutter factor2(measurement, 0.5, model, poseKey1,
|
||||
poseKey2, pointKey, K);
|
||||
CHECK(!assert_equal(factor1, factor2)); // not equal
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ProjectionFactorRollingShutter, EqualsWithTransform) {
|
||||
{ // factors are equal
|
||||
ProjectionFactorRollingShutter factor1(measurement, interp_params, model,
|
||||
poseKey1, poseKey2, pointKey, K,
|
||||
body_P_sensor);
|
||||
ProjectionFactorRollingShutter factor2(measurement, interp_params, model,
|
||||
poseKey1, poseKey2, pointKey, K,
|
||||
body_P_sensor);
|
||||
CHECK(assert_equal(factor1, factor2));
|
||||
}
|
||||
{ // factors are NOT equal
|
||||
ProjectionFactorRollingShutter factor1(measurement, interp_params, model,
|
||||
poseKey1, poseKey2, pointKey, K,
|
||||
body_P_sensor);
|
||||
Pose3 body_P_sensor2(
|
||||
Rot3::RzRyRx(0.0, 0.0, 0.0),
|
||||
Point3(0.25, -0.10, 1.0)); // rotation different from body_P_sensor
|
||||
ProjectionFactorRollingShutter factor2(measurement, interp_params, model,
|
||||
poseKey1, poseKey2, pointKey, K,
|
||||
body_P_sensor2);
|
||||
CHECK(!assert_equal(factor1, factor2));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ProjectionFactorRollingShutter, Error) {
|
||||
{
|
||||
// Create the factor with a measurement that is 3 pixels off in x
|
||||
// Camera pose corresponds to the first camera
|
||||
double t = 0.0;
|
||||
ProjectionFactorRollingShutter factor(measurement, t, model, poseKey1,
|
||||
poseKey2, pointKey, K);
|
||||
|
||||
// Set the linearization point
|
||||
Pose3 pose1(Rot3(), Point3(0, 0, -6));
|
||||
Pose3 pose2(Rot3(), Point3(0, 0, -4));
|
||||
Point3 point(0.0, 0.0, 0.0);
|
||||
|
||||
// Use the factor to calculate the error
|
||||
Vector actualError(factor.evaluateError(pose1, pose2, point));
|
||||
|
||||
// The expected error is (-3.0, 0.0) pixels / UnitCovariance
|
||||
Vector expectedError = Vector2(-3.0, 0.0);
|
||||
|
||||
// Verify we get the expected error
|
||||
CHECK(assert_equal(expectedError, actualError, 1e-9));
|
||||
}
|
||||
{
|
||||
// Create the factor with a measurement that is 3 pixels off in x
|
||||
// Camera pose is actually interpolated now
|
||||
double t = 0.5;
|
||||
ProjectionFactorRollingShutter factor(measurement, t, model, poseKey1,
|
||||
poseKey2, pointKey, K);
|
||||
|
||||
// Set the linearization point
|
||||
Pose3 pose1(Rot3(), Point3(0, 0, -8));
|
||||
Pose3 pose2(Rot3(), Point3(0, 0, -4));
|
||||
Point3 point(0.0, 0.0, 0.0);
|
||||
|
||||
// Use the factor to calculate the error
|
||||
Vector actualError(factor.evaluateError(pose1, pose2, point));
|
||||
|
||||
// The expected error is (-3.0, 0.0) pixels / UnitCovariance
|
||||
Vector expectedError = Vector2(-3.0, 0.0);
|
||||
|
||||
// Verify we get the expected error
|
||||
CHECK(assert_equal(expectedError, actualError, 1e-9));
|
||||
}
|
||||
{
|
||||
// Create measurement by projecting 3D landmark
|
||||
double t = 0.3;
|
||||
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
|
||||
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
|
||||
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
|
||||
PinholeCamera<Cal3_S2> camera(poseInterp, *K);
|
||||
Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera
|
||||
Point2 measured = camera.project(point);
|
||||
|
||||
// create factor
|
||||
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1,
|
||||
poseKey2, pointKey, K);
|
||||
|
||||
// Use the factor to calculate the error
|
||||
Vector actualError(factor.evaluateError(pose1, pose2, point));
|
||||
|
||||
// The expected error is zero
|
||||
Vector expectedError = Vector2(0.0, 0.0);
|
||||
|
||||
// Verify we get the expected error
|
||||
CHECK(assert_equal(expectedError, actualError, 1e-9));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ProjectionFactorRollingShutter, ErrorWithTransform) {
|
||||
// Create measurement by projecting 3D landmark
|
||||
double t = 0.3;
|
||||
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
|
||||
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
|
||||
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
|
||||
Pose3 body_P_sensor3(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0.2, 0.1));
|
||||
PinholeCamera<Cal3_S2> camera(poseInterp * body_P_sensor3, *K);
|
||||
Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera
|
||||
Point2 measured = camera.project(point);
|
||||
|
||||
// create factor
|
||||
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2,
|
||||
pointKey, K, body_P_sensor3);
|
||||
|
||||
// Use the factor to calculate the error
|
||||
Vector actualError(factor.evaluateError(pose1, pose2, point));
|
||||
|
||||
// The expected error is zero
|
||||
Vector expectedError = Vector2(0.0, 0.0);
|
||||
|
||||
// Verify we get the expected error
|
||||
CHECK(assert_equal(expectedError, actualError, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ProjectionFactorRollingShutter, Jacobian) {
|
||||
// Create measurement by projecting 3D landmark
|
||||
double t = 0.3;
|
||||
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
|
||||
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
|
||||
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
|
||||
PinholeCamera<Cal3_S2> camera(poseInterp, *K);
|
||||
Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera
|
||||
Point2 measured = camera.project(point);
|
||||
|
||||
// create factor
|
||||
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2,
|
||||
pointKey, K);
|
||||
|
||||
// Use the factor to calculate the Jacobians
|
||||
Matrix H1Actual, H2Actual, H3Actual;
|
||||
factor.evaluateError(pose1, pose2, point, H1Actual, H2Actual, H3Actual);
|
||||
|
||||
// Expected Jacobians via numerical derivatives
|
||||
Matrix H1Expected = numericalDerivative31<Vector, Pose3, Pose3, Point3>(
|
||||
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
|
||||
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2,
|
||||
std::placeholders::_3, boost::none, boost::none,
|
||||
boost::none)),
|
||||
pose1, pose2, point);
|
||||
|
||||
Matrix H2Expected = numericalDerivative32<Vector, Pose3, Pose3, Point3>(
|
||||
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
|
||||
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2,
|
||||
std::placeholders::_3, boost::none, boost::none,
|
||||
boost::none)),
|
||||
pose1, pose2, point);
|
||||
|
||||
Matrix H3Expected = numericalDerivative33<Vector, Pose3, Pose3, Point3>(
|
||||
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
|
||||
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2,
|
||||
std::placeholders::_3, boost::none, boost::none,
|
||||
boost::none)),
|
||||
pose1, pose2, point);
|
||||
|
||||
CHECK(assert_equal(H1Expected, H1Actual, 1e-5));
|
||||
CHECK(assert_equal(H2Expected, H2Actual, 1e-5));
|
||||
CHECK(assert_equal(H3Expected, H3Actual, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ProjectionFactorRollingShutter, JacobianWithTransform) {
|
||||
// Create measurement by projecting 3D landmark
|
||||
double t = 0.6;
|
||||
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
|
||||
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
|
||||
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
|
||||
Pose3 body_P_sensor3(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0.2, 0.1));
|
||||
PinholeCamera<Cal3_S2> camera(poseInterp * body_P_sensor3, *K);
|
||||
Point3 point(0.0, 0.0, 5.0); // 5 meters in front of the camera
|
||||
Point2 measured = camera.project(point);
|
||||
|
||||
// create factor
|
||||
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1, poseKey2,
|
||||
pointKey, K, body_P_sensor3);
|
||||
|
||||
// Use the factor to calculate the Jacobians
|
||||
Matrix H1Actual, H2Actual, H3Actual;
|
||||
factor.evaluateError(pose1, pose2, point, H1Actual, H2Actual, H3Actual);
|
||||
|
||||
// Expected Jacobians via numerical derivatives
|
||||
Matrix H1Expected = numericalDerivative31<Vector, Pose3, Pose3, Point3>(
|
||||
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
|
||||
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2,
|
||||
std::placeholders::_3, boost::none, boost::none,
|
||||
boost::none)),
|
||||
pose1, pose2, point);
|
||||
|
||||
Matrix H2Expected = numericalDerivative32<Vector, Pose3, Pose3, Point3>(
|
||||
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
|
||||
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2,
|
||||
std::placeholders::_3, boost::none, boost::none,
|
||||
boost::none)),
|
||||
pose1, pose2, point);
|
||||
|
||||
Matrix H3Expected = numericalDerivative33<Vector, Pose3, Pose3, Point3>(
|
||||
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
|
||||
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2,
|
||||
std::placeholders::_3, boost::none, boost::none,
|
||||
boost::none)),
|
||||
pose1, pose2, point);
|
||||
|
||||
CHECK(assert_equal(H1Expected, H1Actual, 1e-5));
|
||||
CHECK(assert_equal(H2Expected, H2Actual, 1e-5));
|
||||
CHECK(assert_equal(H3Expected, H3Actual, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ProjectionFactorRollingShutter, cheirality) {
|
||||
// Create measurement by projecting 3D landmark behind camera
|
||||
double t = 0.3;
|
||||
Pose3 pose1(Rot3::RzRyRx(0.1, 0.0, 0.1), Point3(0, 0, 0));
|
||||
Pose3 pose2(Rot3::RzRyRx(-0.1, -0.1, 0.0), Point3(0, 0, 1));
|
||||
Pose3 poseInterp = interpolate<Pose3>(pose1, pose2, t);
|
||||
PinholeCamera<Cal3_S2> camera(poseInterp, *K);
|
||||
Point3 point(0.0, 0.0, -5.0); // 5 meters behind the camera
|
||||
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
Point2 measured = Point2(0.0, 0.0); // project would throw an exception
|
||||
{ // check that exception is thrown if we set throwCheirality = true
|
||||
bool throwCheirality = true;
|
||||
bool verboseCheirality = true;
|
||||
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1,
|
||||
poseKey2, pointKey, K,
|
||||
throwCheirality, verboseCheirality);
|
||||
CHECK_EXCEPTION(factor.evaluateError(pose1, pose2, point),
|
||||
CheiralityException);
|
||||
}
|
||||
{ // check that exception is NOT thrown if we set throwCheirality = false,
|
||||
// and outputs are correct
|
||||
bool throwCheirality = false; // default
|
||||
bool verboseCheirality = false; // default
|
||||
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1,
|
||||
poseKey2, pointKey, K,
|
||||
throwCheirality, verboseCheirality);
|
||||
|
||||
// Use the factor to calculate the error
|
||||
Matrix H1Actual, H2Actual, H3Actual;
|
||||
Vector actualError(factor.evaluateError(pose1, pose2, point, H1Actual,
|
||||
H2Actual, H3Actual));
|
||||
|
||||
// The expected error is zero
|
||||
Vector expectedError = Vector2::Constant(
|
||||
2.0 * K->fx()); // this is what we return when point is behind camera
|
||||
|
||||
// Verify we get the expected error
|
||||
CHECK(assert_equal(expectedError, actualError, 1e-9));
|
||||
CHECK(assert_equal(Matrix::Zero(2, 6), H1Actual, 1e-5));
|
||||
CHECK(assert_equal(Matrix::Zero(2, 6), H2Actual, 1e-5));
|
||||
CHECK(assert_equal(Matrix::Zero(2, 3), H3Actual, 1e-5));
|
||||
}
|
||||
#else
|
||||
{
|
||||
// everything is well defined, hence this matches the test "Jacobian" above:
|
||||
Point2 measured = camera.project(point);
|
||||
|
||||
// create factor
|
||||
ProjectionFactorRollingShutter factor(measured, t, model, poseKey1,
|
||||
poseKey2, pointKey, K);
|
||||
|
||||
// Use the factor to calculate the Jacobians
|
||||
Matrix H1Actual, H2Actual, H3Actual;
|
||||
factor.evaluateError(pose1, pose2, point, H1Actual, H2Actual, H3Actual);
|
||||
|
||||
// Expected Jacobians via numerical derivatives
|
||||
Matrix H1Expected = numericalDerivative31<Vector, Pose3, Pose3, Point3>(
|
||||
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
|
||||
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2,
|
||||
std::placeholders::_3, boost::none, boost::none,
|
||||
boost::none)),
|
||||
pose1, pose2, point);
|
||||
|
||||
Matrix H2Expected = numericalDerivative32<Vector, Pose3, Pose3, Point3>(
|
||||
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
|
||||
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2,
|
||||
std::placeholders::_3, boost::none, boost::none,
|
||||
boost::none)),
|
||||
pose1, pose2, point);
|
||||
|
||||
Matrix H3Expected = numericalDerivative33<Vector, Pose3, Pose3, Point3>(
|
||||
std::function<Vector(const Pose3&, const Pose3&, const Point3&)>(
|
||||
std::bind(&ProjectionFactorRollingShutter::evaluateError, &factor,
|
||||
std::placeholders::_1, std::placeholders::_2,
|
||||
std::placeholders::_3, boost::none, boost::none,
|
||||
boost::none)),
|
||||
pose1, pose2, point);
|
||||
|
||||
CHECK(assert_equal(H1Expected, H1Actual, 1e-5));
|
||||
CHECK(assert_equal(H2Expected, H2Actual, 1e-5));
|
||||
CHECK(assert_equal(H3Expected, H3Actual, 1e-5));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
File diff suppressed because it is too large
Load Diff
|
@ -43,6 +43,7 @@ set(ignore
|
|||
gtsam::BetweenFactorPose2s
|
||||
gtsam::BetweenFactorPose3s
|
||||
gtsam::Point2Vector
|
||||
gtsam::Point2Pairs
|
||||
gtsam::Point3Pairs
|
||||
gtsam::Pose3Pairs
|
||||
gtsam::Pose3Vector
|
||||
|
@ -61,10 +62,13 @@ set(interface_headers
|
|||
${PROJECT_SOURCE_DIR}/gtsam/slam/slam.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/sfm/sfm.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/navigation/navigation.i
|
||||
${PROJECT_SOURCE_DIR}/gtsam/basis/basis.i
|
||||
)
|
||||
|
||||
set(GTSAM_PYTHON_TARGET gtsam_py)
|
||||
set(GTSAM_PYTHON_UNSTABLE_TARGET gtsam_unstable_py)
|
||||
|
||||
pybind_wrap(gtsam_py # target
|
||||
pybind_wrap(${GTSAM_PYTHON_TARGET} # target
|
||||
"${interface_headers}" # interface_headers
|
||||
"gtsam.cpp" # generated_cpp
|
||||
"gtsam" # module_name
|
||||
|
@ -76,7 +80,7 @@ pybind_wrap(gtsam_py # target
|
|||
ON # use_boost
|
||||
)
|
||||
|
||||
set_target_properties(gtsam_py PROPERTIES
|
||||
set_target_properties(${GTSAM_PYTHON_TARGET} PROPERTIES
|
||||
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
|
||||
INSTALL_RPATH_USE_LINK_PATH TRUE
|
||||
OUTPUT_NAME "gtsam"
|
||||
|
@ -96,7 +100,7 @@ create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
|
|||
file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}")
|
||||
|
||||
# Add gtsam as a dependency to the install target
|
||||
set(GTSAM_PYTHON_DEPENDENCIES gtsam_py)
|
||||
set(GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_TARGET})
|
||||
|
||||
|
||||
if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
||||
|
@ -120,7 +124,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
gtsam::CameraSetCal3Fisheye
|
||||
gtsam::KeyPairDoubleMap)
|
||||
|
||||
pybind_wrap(gtsam_unstable_py # target
|
||||
pybind_wrap(${GTSAM_PYTHON_UNSTABLE_TARGET} # target
|
||||
${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header
|
||||
"gtsam_unstable.cpp" # generated_cpp
|
||||
"gtsam_unstable" # module_name
|
||||
|
@ -132,7 +136,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
ON # use_boost
|
||||
)
|
||||
|
||||
set_target_properties(gtsam_unstable_py PROPERTIES
|
||||
set_target_properties(${GTSAM_PYTHON_UNSTABLE_TARGET} PROPERTIES
|
||||
INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib"
|
||||
INSTALL_RPATH_USE_LINK_PATH TRUE
|
||||
OUTPUT_NAME "gtsam_unstable"
|
||||
|
@ -148,7 +152,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
"${GTSAM_UNSTABLE_MODULE_PATH}")
|
||||
|
||||
# Add gtsam_unstable to the install target
|
||||
list(APPEND GTSAM_PYTHON_DEPENDENCIES gtsam_unstable_py)
|
||||
list(APPEND GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_UNSTABLE_TARGET})
|
||||
|
||||
endif()
|
||||
|
||||
|
@ -165,6 +169,6 @@ add_custom_target(
|
|||
COMMAND
|
||||
${CMAKE_COMMAND} -E env # add package to python path so no need to install
|
||||
"PYTHONPATH=${GTSAM_PYTHON_BUILD_DIRECTORY}/$ENV{PYTHONPATH}"
|
||||
${PYTHON_EXECUTABLE} -m unittest discover
|
||||
${PYTHON_EXECUTABLE} -m unittest discover -v -s .
|
||||
DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}
|
||||
WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam/tests)
|
||||
WORKING_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam/tests")
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
from . import utils
|
||||
from .gtsam import *
|
||||
from .utils import findExampleDataFile
|
||||
"""Module definition file for GTSAM"""
|
||||
|
||||
# pylint: disable=import-outside-toplevel, global-variable-not-assigned, possibly-unused-variable, import-error, import-self
|
||||
|
||||
import sys
|
||||
|
||||
from gtsam import gtsam, utils
|
||||
from gtsam.gtsam import *
|
||||
from gtsam.utils import findExampleDataFile
|
||||
|
||||
|
||||
def _init():
|
||||
|
@ -13,7 +19,7 @@ def _init():
|
|||
def Point2(x=np.nan, y=np.nan):
|
||||
"""Shim for the deleted Point2 type."""
|
||||
if isinstance(x, np.ndarray):
|
||||
assert x.shape == (2,), "Point2 takes 2-vector"
|
||||
assert x.shape == (2, ), "Point2 takes 2-vector"
|
||||
return x # "copy constructor"
|
||||
return np.array([x, y], dtype=float)
|
||||
|
||||
|
@ -22,7 +28,7 @@ def _init():
|
|||
def Point3(x=np.nan, y=np.nan, z=np.nan):
|
||||
"""Shim for the deleted Point3 type."""
|
||||
if isinstance(x, np.ndarray):
|
||||
assert x.shape == (3,), "Point3 takes 3-vector"
|
||||
assert x.shape == (3, ), "Point3 takes 3-vector"
|
||||
return x # "copy constructor"
|
||||
return np.array([x, y, z], dtype=float)
|
||||
|
||||
|
|
|
@ -10,28 +10,30 @@ A script validating and demonstrating the ImuFactor inference.
|
|||
Author: Frank Dellaert, Varun Agrawal
|
||||
"""
|
||||
|
||||
# pylint: disable=no-name-in-module,unused-import,arguments-differ
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import math
|
||||
|
||||
import gtsam
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
import gtsam
|
||||
from gtsam.symbol_shorthand import B, V, X
|
||||
from gtsam.utils.plot import plot_pose3
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
from PreintegrationExample import POSES_FIG, PreintegrationExample
|
||||
|
||||
BIAS_KEY = B(0)
|
||||
|
||||
|
||||
np.set_printoptions(precision=3, suppress=True)
|
||||
|
||||
|
||||
class ImuFactorExample(PreintegrationExample):
|
||||
|
||||
"""Class to run example of the Imu Factor."""
|
||||
def __init__(self, twist_scenario="sick_twist"):
|
||||
self.velocity = np.array([2, 0, 0])
|
||||
self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1)
|
||||
|
@ -42,9 +44,8 @@ class ImuFactorExample(PreintegrationExample):
|
|||
zero_twist=(np.zeros(3), np.zeros(3)),
|
||||
forward_twist=(np.zeros(3), self.velocity),
|
||||
loop_twist=(np.array([0, -math.radians(30), 0]), self.velocity),
|
||||
sick_twist=(np.array([math.radians(30), -math.radians(30), 0]),
|
||||
self.velocity)
|
||||
)
|
||||
sick_twist=(np.array([math.radians(30), -math.radians(30),
|
||||
0]), self.velocity))
|
||||
|
||||
accBias = np.array([-0.3, 0.1, 0.2])
|
||||
gyroBias = np.array([0.1, 0.3, -0.1])
|
||||
|
@ -55,19 +56,44 @@ class ImuFactorExample(PreintegrationExample):
|
|||
bias, dt)
|
||||
|
||||
def addPrior(self, i, graph):
|
||||
"""Add priors at time step `i`."""
|
||||
state = self.scenario.navState(i)
|
||||
graph.push_back(gtsam.PriorFactorPose3(
|
||||
X(i), state.pose(), self.priorNoise))
|
||||
graph.push_back(gtsam.PriorFactorVector(
|
||||
V(i), state.velocity(), self.velNoise))
|
||||
graph.push_back(
|
||||
gtsam.PriorFactorPose3(X(i), state.pose(), self.priorNoise))
|
||||
graph.push_back(
|
||||
gtsam.PriorFactorVector(V(i), state.velocity(), self.velNoise))
|
||||
|
||||
def optimize(self, graph, initial):
|
||||
"""Optimize using Levenberg-Marquardt optimization."""
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
params.setVerbosityLM("SUMMARY")
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
return result
|
||||
|
||||
def plot(self, result):
|
||||
"""Plot resulting poses."""
|
||||
i = 0
|
||||
while result.exists(X(i)):
|
||||
pose_i = result.atPose3(X(i))
|
||||
plot_pose3(POSES_FIG + 1, pose_i, 1)
|
||||
i += 1
|
||||
plt.title("Estimated Trajectory")
|
||||
|
||||
gtsam.utils.plot.set_axes_equal(POSES_FIG + 1)
|
||||
|
||||
print("Bias Values", result.atConstantBias(BIAS_KEY))
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
def run(self, T=12, compute_covariances=False, verbose=True):
|
||||
"""Main runner."""
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# initialize data structure for pre-integrated IMU measurements
|
||||
pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)
|
||||
|
||||
T = 12
|
||||
num_poses = T # assumes 1 factor per second
|
||||
initial = gtsam.Values()
|
||||
initial.insert(BIAS_KEY, self.actualBias)
|
||||
|
@ -91,14 +117,13 @@ class ImuFactorExample(PreintegrationExample):
|
|||
if k % 10 == 0:
|
||||
self.plotImu(t, measuredOmega, measuredAcc)
|
||||
|
||||
if (k+1) % int(1 / self.dt) == 0:
|
||||
if (k + 1) % int(1 / self.dt) == 0:
|
||||
# Plot every second
|
||||
self.plotGroundTruthPose(t, scale=1)
|
||||
plt.title("Ground Truth Trajectory")
|
||||
|
||||
# create IMU factor every second
|
||||
factor = gtsam.ImuFactor(X(i), V(i),
|
||||
X(i + 1), V(i + 1),
|
||||
factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1),
|
||||
BIAS_KEY, pim)
|
||||
graph.push_back(factor)
|
||||
|
||||
|
@ -108,34 +133,34 @@ class ImuFactorExample(PreintegrationExample):
|
|||
|
||||
pim.resetIntegration()
|
||||
|
||||
rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3)*0.1)
|
||||
translationNoise = gtsam.Point3(*np.random.randn(3)*1)
|
||||
rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3) * 0.1)
|
||||
translationNoise = gtsam.Point3(*np.random.randn(3) * 1)
|
||||
poseNoise = gtsam.Pose3(rotationNoise, translationNoise)
|
||||
|
||||
actual_state_i = self.scenario.navState(t + self.dt)
|
||||
print("Actual state at {0}:\n{1}".format(
|
||||
t+self.dt, actual_state_i))
|
||||
t + self.dt, actual_state_i))
|
||||
|
||||
noisy_state_i = gtsam.NavState(
|
||||
actual_state_i.pose().compose(poseNoise),
|
||||
actual_state_i.velocity() + np.random.randn(3)*0.1)
|
||||
actual_state_i.velocity() + np.random.randn(3) * 0.1)
|
||||
|
||||
initial.insert(X(i+1), noisy_state_i.pose())
|
||||
initial.insert(V(i+1), noisy_state_i.velocity())
|
||||
initial.insert(X(i + 1), noisy_state_i.pose())
|
||||
initial.insert(V(i + 1), noisy_state_i.velocity())
|
||||
i += 1
|
||||
|
||||
# add priors on end
|
||||
self.addPrior(num_poses - 1, graph)
|
||||
|
||||
initial.print_("Initial values:")
|
||||
initial.print("Initial values:")
|
||||
|
||||
# optimize using Levenberg-Marquardt optimization
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
params.setVerbosityLM("SUMMARY")
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
result = self.optimize(graph, initial)
|
||||
|
||||
result.print_("Optimized values:")
|
||||
result.print("Optimized values:")
|
||||
print("------------------")
|
||||
print(graph.error(initial))
|
||||
print(graph.error(result))
|
||||
print("------------------")
|
||||
|
||||
if compute_covariances:
|
||||
# Calculate and print marginal covariances
|
||||
|
@ -148,33 +173,26 @@ class ImuFactorExample(PreintegrationExample):
|
|||
print("Covariance on vel {}:\n{}\n".format(
|
||||
i, marginals.marginalCovariance(V(i))))
|
||||
|
||||
# Plot resulting poses
|
||||
i = 0
|
||||
while result.exists(X(i)):
|
||||
pose_i = result.atPose3(X(i))
|
||||
plot_pose3(POSES_FIG+1, pose_i, 1)
|
||||
i += 1
|
||||
plt.title("Estimated Trajectory")
|
||||
|
||||
gtsam.utils.plot.set_axes_equal(POSES_FIG+1)
|
||||
|
||||
print("Bias Values", result.atConstantBias(BIAS_KEY))
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
self.plot(result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser("ImuFactorExample.py")
|
||||
parser.add_argument("--twist_scenario",
|
||||
default="sick_twist",
|
||||
choices=("zero_twist", "forward_twist", "loop_twist", "sick_twist"))
|
||||
parser.add_argument("--time", "-T", default=12,
|
||||
type=int, help="Total time in seconds")
|
||||
choices=("zero_twist", "forward_twist", "loop_twist",
|
||||
"sick_twist"))
|
||||
parser.add_argument("--time",
|
||||
"-T",
|
||||
default=12,
|
||||
type=int,
|
||||
help="Total time in seconds")
|
||||
parser.add_argument("--compute_covariances",
|
||||
default=False, action='store_true')
|
||||
default=False,
|
||||
action='store_true')
|
||||
parser.add_argument("--verbose", default=False, action='store_true')
|
||||
args = parser.parse_args()
|
||||
|
||||
ImuFactorExample(args.twist_scenario).run(
|
||||
args.time, args.compute_covariances, args.verbose)
|
||||
ImuFactorExample(args.twist_scenario).run(args.time,
|
||||
args.compute_covariances,
|
||||
args.verbose)
|
||||
|
|
|
@ -5,36 +5,39 @@ A structure-from-motion example with landmarks
|
|||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import Cal3_S2, Point3, Pose3
|
||||
|
||||
|
||||
def createPoints():
|
||||
def createPoints() -> List[Point3]:
|
||||
# Create the set of ground-truth landmarks
|
||||
points = [gtsam.Point3(10.0, 10.0, 10.0),
|
||||
gtsam.Point3(-10.0, 10.0, 10.0),
|
||||
gtsam.Point3(-10.0, -10.0, 10.0),
|
||||
gtsam.Point3(10.0, -10.0, 10.0),
|
||||
gtsam.Point3(10.0, 10.0, -10.0),
|
||||
gtsam.Point3(-10.0, 10.0, -10.0),
|
||||
gtsam.Point3(-10.0, -10.0, -10.0),
|
||||
gtsam.Point3(10.0, -10.0, -10.0)]
|
||||
points = [
|
||||
Point3(10.0, 10.0, 10.0),
|
||||
Point3(-10.0, 10.0, 10.0),
|
||||
Point3(-10.0, -10.0, 10.0),
|
||||
Point3(10.0, -10.0, 10.0),
|
||||
Point3(10.0, 10.0, -10.0),
|
||||
Point3(-10.0, 10.0, -10.0),
|
||||
Point3(-10.0, -10.0, -10.0),
|
||||
Point3(10.0, -10.0, -10.0),
|
||||
]
|
||||
return points
|
||||
|
||||
|
||||
def createPoses(K):
|
||||
# Create the set of ground-truth poses
|
||||
def createPoses(K: Cal3_S2) -> List[Pose3]:
|
||||
"""Generate a set of ground-truth camera poses arranged in a circle about the origin."""
|
||||
radius = 40.0
|
||||
height = 10.0
|
||||
angles = np.linspace(0, 2*np.pi, 8, endpoint=False)
|
||||
angles = np.linspace(0, 2 * np.pi, 8, endpoint=False)
|
||||
up = gtsam.Point3(0, 0, 1)
|
||||
target = gtsam.Point3(0, 0, 0)
|
||||
poses = []
|
||||
for theta in angles:
|
||||
position = gtsam.Point3(radius*np.cos(theta),
|
||||
radius*np.sin(theta),
|
||||
height)
|
||||
position = gtsam.Point3(radius * np.cos(theta), radius * np.sin(theta), height)
|
||||
camera = gtsam.PinholeCameraCal3_S2.Lookat(position, target, up, K)
|
||||
poses.append(camera.pose())
|
||||
return poses
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/**
|
||||
* @file gtsam.cpp
|
||||
* @file {module_name}.cpp
|
||||
* @brief The auto-generated wrapper C++ source code.
|
||||
* @author Duy-Nguyen Ta, Fan Jiang, Matthew Sklar
|
||||
* @date Aug. 18, 2020
|
||||
* @author Duy-Nguyen Ta, Fan Jiang, Matthew Sklar, Varun Agrawal
|
||||
* @date Aug. 18, 2020
|
||||
*
|
||||
* ** THIS FILE IS AUTO-GENERATED, DO NOT MODIFY! **
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
/* Please refer to:
|
||||
* https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
* These are required to save one copy operation on Python calls.
|
||||
*
|
||||
* NOTES
|
||||
* =================
|
||||
*
|
||||
* `PYBIND11_MAKE_OPAQUE` will mark the type as "opaque" for the pybind11
|
||||
* automatic STL binding, such that the raw objects can be accessed in Python.
|
||||
* Without this they will be automatically converted to a Python object, and all
|
||||
* mutations on Python side will not be reflected on C++.
|
||||
*/
|
||||
|
||||
#include <pybind11/stl.h>
|
|
@ -10,9 +10,18 @@
|
|||
* Without this they will be automatically converted to a Python object, and all
|
||||
* mutations on Python side will not be reflected on C++.
|
||||
*/
|
||||
#include <pybind11/stl.h>
|
||||
|
||||
// Support for binding boost::optional types in C++11.
|
||||
// https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
namespace pybind11 { namespace detail {
|
||||
template <typename T>
|
||||
struct type_caster<boost::optional<T>> : optional_caster<boost::optional<T>> {};
|
||||
}}
|
||||
|
||||
PYBIND11_MAKE_OPAQUE(
|
||||
std::vector<gtsam::Point2, Eigen::aligned_allocator<gtsam::Point2>>);
|
||||
PYBIND11_MAKE_OPAQUE(gtsam::Point2Pairs);
|
||||
PYBIND11_MAKE_OPAQUE(gtsam::Point3Pairs);
|
||||
PYBIND11_MAKE_OPAQUE(gtsam::Pose3Pairs);
|
||||
PYBIND11_MAKE_OPAQUE(std::vector<gtsam::Pose3>);
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
/* Please refer to:
|
||||
* https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
|
||||
* These are required to save one copy operation on Python calls.
|
||||
*
|
||||
* NOTES
|
||||
* =================
|
||||
*
|
||||
* `py::bind_vector` and similar machinery gives the std container a Python-like
|
||||
* interface, but without the `<pybind11/stl.h>` copying mechanism. Combined
|
||||
* with `PYBIND11_MAKE_OPAQUE` this allows the types to be modified with Python,
|
||||
* and saves one copy operation.
|
||||
*/
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue