Merge branch 'develop' into fix/combined-imu
commit
2d3859db01
|
@ -75,7 +75,7 @@ cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
|||
-DGTSAM_UNSTABLE_BUILD_PYTHON=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
|
||||
-DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=OFF \
|
||||
-DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
|
||||
|
||||
|
||||
|
@ -83,6 +83,6 @@ cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
|||
make -j2 install
|
||||
|
||||
cd $GITHUB_WORKSPACE/build/python
|
||||
$PYTHON setup.py install --user --prefix=
|
||||
$PYTHON -m pip install --user .
|
||||
cd $GITHUB_WORKSPACE/python/gtsam/tests
|
||||
$PYTHON -m unittest discover -v
|
||||
|
|
|
@ -64,13 +64,14 @@ function configure()
|
|||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V42=${GTSAM_ALLOW_DEPRECATED_SINCE_V42:-OFF} \
|
||||
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
|
||||
-DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \
|
||||
-DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \
|
||||
-DGTSAM_USE_SYSTEM_EIGEN=${GTSAM_USE_SYSTEM_EIGEN:-OFF} \
|
||||
-DGTSAM_USE_SYSTEM_METIS=${GTSAM_USE_SYSTEM_METIS:-OFF} \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DGTSAM_SINGLE_TEST_EXE=ON \
|
||||
-DBOOST_ROOT=$BOOST_ROOT \
|
||||
-DBoost_NO_SYSTEM_PATHS=ON \
|
||||
-DBoost_ARCHITECTURE=-x64
|
||||
|
@ -95,7 +96,11 @@ function build ()
|
|||
configure
|
||||
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
if (($(nproc) > 2)); then
|
||||
make -j$(nproc)
|
||||
else
|
||||
make -j2
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu)
|
||||
fi
|
||||
|
@ -113,9 +118,13 @@ function test ()
|
|||
|
||||
# Actual testing
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
make -j$(nproc)
|
||||
if (($(nproc) > 2)); then
|
||||
make -j$(nproc) check
|
||||
else
|
||||
make -j2 check
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu)
|
||||
make -j$(sysctl -n hw.physicalcpu) check
|
||||
fi
|
||||
|
||||
finish
|
||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
BOOST_VERSION: 1.67.0
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
fail-fast: true
|
||||
matrix:
|
||||
# Github Actions requires a single row to be added to the build matrix.
|
||||
# See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
|
||||
|
|
|
@ -110,7 +110,7 @@ jobs:
|
|||
- name: Set Allow Deprecated Flag
|
||||
if: matrix.flag == 'deprecated'
|
||||
run: |
|
||||
echo "GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON" >> $GITHUB_ENV
|
||||
echo "GTSAM_ALLOW_DEPRECATED_SINCE_V42=ON" >> $GITHUB_ENV
|
||||
echo "Allow deprecated since version 4.1"
|
||||
|
||||
- name: Set Use Quaternions Flag
|
||||
|
|
|
@ -26,7 +26,11 @@ jobs:
|
|||
windows-2019-cl,
|
||||
]
|
||||
|
||||
build_type: [Debug, Release]
|
||||
build_type: [
|
||||
Debug,
|
||||
#TODO(Varun) The release build takes over 2.5 hours, need to figure out why.
|
||||
# Release
|
||||
]
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
#TODO This build fails, need to understand why.
|
||||
|
@ -44,7 +48,9 @@ jobs:
|
|||
- name: Install Dependencies
|
||||
shell: powershell
|
||||
run: |
|
||||
Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh')
|
||||
iwr -useb get.scoop.sh -outfile 'install_scoop.ps1'
|
||||
.\install_scoop.ps1 -RunAsAdmin
|
||||
|
||||
scoop install cmake --global # So we don't get issues with CMP0074 policy
|
||||
scoop install ninja --global
|
||||
|
||||
|
@ -90,13 +96,33 @@ jobs:
|
|||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build
|
||||
- name: Configuration
|
||||
run: |
|
||||
cmake -E remove_directory build
|
||||
cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib"
|
||||
cmake --build build --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
cmake --build build --config ${{ matrix.build_type }} --target wrap
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
cmake --build build --config ${{ matrix.build_type }} --target check.linear
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
# Since Visual Studio is a multi-generator, we need to use --config
|
||||
# https://stackoverflow.com/a/24470998/1236990
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target wrap
|
||||
|
||||
# Run GTSAM tests
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.basis
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.discrete
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.geometry
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.inference
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.linear
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.navigation
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.nonlinear
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.sam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.sfm
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.slam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.symbolic
|
||||
|
||||
# Run GTSAM_UNSTABLE tests
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
.idea
|
||||
*.pyc
|
||||
*.DS_Store
|
||||
*.swp
|
||||
/examples/Data/dubrovnik-3-7-pre-rewritten.txt
|
||||
/examples/Data/pose2example-rewritten.txt
|
||||
/examples/Data/pose3example-rewritten.txt
|
||||
|
@ -16,3 +17,4 @@
|
|||
# for QtCreator:
|
||||
CMakeLists.txt.user*
|
||||
xcode/
|
||||
/Dockerfile
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
project(GTSAM CXX C)
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
# new feature to Cmake Version > 2.8.12
|
||||
|
@ -9,12 +8,23 @@ endif()
|
|||
|
||||
# Set the version number for the library
|
||||
set (GTSAM_VERSION_MAJOR 4)
|
||||
set (GTSAM_VERSION_MINOR 1)
|
||||
set (GTSAM_VERSION_MINOR 2)
|
||||
set (GTSAM_VERSION_PATCH 0)
|
||||
set (GTSAM_PRERELEASE_VERSION "a6")
|
||||
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
|
||||
set (CMAKE_PROJECT_VERSION ${GTSAM_VERSION_STRING})
|
||||
if (${GTSAM_VERSION_PATCH} EQUAL 0)
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}${GTSAM_PRERELEASE_VERSION}")
|
||||
else()
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}${GTSAM_PRERELEASE_VERSION}")
|
||||
endif()
|
||||
|
||||
project(GTSAM
|
||||
LANGUAGES CXX C
|
||||
VERSION "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
|
||||
message(STATUS "GTSAM Version: ${GTSAM_VERSION_STRING}")
|
||||
|
||||
set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
|
||||
set (CMAKE_PROJECT_VERSION_MINOR ${GTSAM_VERSION_MINOR})
|
||||
set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH})
|
||||
|
@ -87,6 +97,13 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
CACHE STRING "The Python version to use for wrapping")
|
||||
# Set the include directory for matlab.h
|
||||
set(GTWRAP_INCLUDE_NAME "wrap")
|
||||
|
||||
# Copy matlab.h to the correct folder.
|
||||
configure_file(${PROJECT_SOURCE_DIR}/wrap/matlab.h
|
||||
${PROJECT_BINARY_DIR}/wrap/matlab.h COPYONLY)
|
||||
# Add the include directories so that matlab.h can be found
|
||||
include_directories("${PROJECT_BINARY_DIR}" "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}")
|
||||
|
||||
add_subdirectory(wrap)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake")
|
||||
endif()
|
||||
|
@ -105,6 +122,11 @@ endif()
|
|||
GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
|
||||
export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake)
|
||||
|
||||
if (GTSAM_BUILD_UNSTABLE)
|
||||
GtsamMakeConfigFile(GTSAM_UNSTABLE "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
|
||||
export(TARGETS ${GTSAM_UNSTABLE_EXPORTED_TARGETS} FILE GTSAM_UNSTABLE-exports.cmake)
|
||||
endif()
|
||||
|
||||
# Check for doxygen availability - optional dependency
|
||||
find_package(Doxygen)
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ For example:
|
|||
```cpp
|
||||
class GTSAM_EXPORT MyClass { ... };
|
||||
|
||||
GTSAM_EXPORT myFunction();
|
||||
GTSAM_EXPORT return_type myFunction();
|
||||
```
|
||||
|
||||
More details [here](Using-GTSAM-EXPORT.md).
|
||||
|
|
|
@ -13,7 +13,7 @@ $ make install
|
|||
## Important Installation Notes
|
||||
|
||||
1. GTSAM requires the following libraries to be installed on your system:
|
||||
- BOOST version 1.65 or greater (install through Linux repositories or MacPorts). Please see [Boost Notes](#boost-notes).
|
||||
- BOOST version 1.65 or greater (install through Linux repositories or MacPorts). Please see [Boost Notes](#boost-notes) for version recommendations based on your compiler.
|
||||
|
||||
- Cmake version 3.0 or higher
|
||||
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
|
||||
|
@ -72,7 +72,7 @@ execute commands as follows for an out-of-source build:
|
|||
Versions of Boost prior to 1.65 have a known bug that prevents proper "deep" serialization of objects, which means that objects encapsulated inside other objects don't get serialized.
|
||||
This is particularly seen when using `clang` as the C++ compiler.
|
||||
|
||||
For this reason we require Boost>=1.65, and recommend installing it through alternative channels when it is not available through your operating system's primary package manager.
|
||||
For this reason we recommend Boost>=1.65, and recommend installing it through alternative channels when it is not available through your operating system's primary package manager.
|
||||
|
||||
## Known Issues
|
||||
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
**Important Note**
|
||||
|
||||
As of August 1 2020, the `develop` branch is officially in "Pre 4.1" mode, and features deprecated in 4.0 have been removed. Please use the last [4.0.3 release](https://github.com/borglab/gtsam/releases/tag/4.0.3) if you need those features.
|
||||
As of Dec 2021, the `develop` branch is officially in "Pre 4.2" mode. A great new feature we will be adding in 4.2 is *hybrid inference* a la DCSLAM (Kevin Doherty et al) and we envision several API-breaking changes will happen in the discrete folder.
|
||||
|
||||
However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4`.
|
||||
In addition, features deprecated in 4.1 will be removed. Please use the last [4.1.1 release](https://github.com/borglab/gtsam/releases/tag/4.1.1) if you need those features. However, most (not all, unfortunately) are easily converted and can be tracked down (in 4.1.1) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42`.
|
||||
|
||||
## What is GTSAM?
|
||||
|
||||
|
@ -57,7 +57,7 @@ GTSAM 4 introduces several new features, most notably Expressions and a Python t
|
|||
|
||||
GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4` to use the deprecated methods.
|
||||
|
||||
GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag `GTSAM_ALLOW_DEPRECATED_SINCE_V41` for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile.
|
||||
GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42` for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile.
|
||||
|
||||
|
||||
## Wrappers
|
||||
|
|
|
@ -8,6 +8,7 @@ To create a DLL in windows, the `GTSAM_EXPORT` keyword has been created and need
|
|||
* At least one of the functions inside that class is declared in a .cpp file and not just the .h file.
|
||||
* You can `GTSAM_EXPORT` any class it inherits from as well. (Note that this implictly requires the class does not derive from a "header-only" class. Note that Eigen is a "header-only" library, so if your class derives from Eigen, _do not_ use `GTSAM_EXPORT` in the class definition!)
|
||||
3. If you have defined a class using `GTSAM_EXPORT`, do not use `GTSAM_EXPORT` in any of its individual function declarations. (Note that you _can_ put `GTSAM_EXPORT` in the definition of individual functions within a class as long as you don't put `GTSAM_EXPORT` in the class definition.)
|
||||
4. For template specializations, you need to add `GTSAM_EXPORT` to each individual specialization.
|
||||
|
||||
## When is GTSAM_EXPORT being used incorrectly
|
||||
Unfortunately, using `GTSAM_EXPORT` incorrectly often does not cause a compiler or linker error in the library that is being compiled, but only when you try to use that DLL in a different library. For example, an error in `gtsam/base` will often show up when compiling the `check_base_program` or the MATLAB wrapper, but not when compiling/linking gtsam itself. The most common errors will say something like:
|
||||
|
@ -29,7 +30,7 @@ Rule #1 doesn't seem very bad, until you combine it with rule #2
|
|||
|
||||
***Compiler Rule #2*** Anything declared in a header file is not included in a DLL.
|
||||
|
||||
When these two rules are combined, you get some very confusing results. For example, a class which is completely defined in a header (e.g. LieMatrix) cannot use `GTSAM_EXPORT` in its definition. If LieMatrix is defined with `GTSAM_EXPORT`, then the compiler _must_ find LieMatrix in a DLL. Because LieMatrix is a header-only class, however, it can't find it, leading to a very confusing "I can't find this symbol" type of error. Note that the linker says it can't find the symbol even though the compiler found the header file that completely defines the class.
|
||||
When these two rules are combined, you get some very confusing results. For example, a class which is completely defined in a header (e.g. Foo) cannot use `GTSAM_EXPORT` in its definition. If Foo is defined with `GTSAM_EXPORT`, then the compiler _must_ find Foo in a DLL. Because Foo is a header-only class, however, it can't find it, leading to a very confusing "I can't find this symbol" type of error. Note that the linker says it can't find the symbol even though the compiler found the header file that completely defines the class.
|
||||
|
||||
Also note that when a class that you want to export inherits from another class that is not exportable, this can cause significant issues. According to this [MSVC Warning page](https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-2-c4275?view=vs-2019), it may not strictly be a rule, but we have seen several linker errors when a class that is defined with `GTSAM_EXPORT` extended an Eigen class. In general, it appears that any inheritance of non-exportable class by an exportable class is a bad idea.
|
||||
|
||||
|
|
|
@ -93,6 +93,10 @@ if(MSVC)
|
|||
/wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data
|
||||
)
|
||||
|
||||
add_compile_options(/wd4005)
|
||||
add_compile_options(/wd4101)
|
||||
add_compile_options(/wd4834)
|
||||
|
||||
endif()
|
||||
|
||||
# Other (non-preprocessor macros) compiler flags:
|
||||
|
@ -187,7 +191,7 @@ endif()
|
|||
|
||||
if (NOT MSVC)
|
||||
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" ON)
|
||||
if(GTSAM_BUILD_WITH_MARCH_NATIVE)
|
||||
if(GTSAM_BUILD_WITH_MARCH_NATIVE AND (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64"))
|
||||
# Add as public flag so all dependant projects also use it, as required
|
||||
# by Eigen to avid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
|
|
|
@ -27,6 +27,8 @@ function(GtsamMakeConfigFile PACKAGE_NAME)
|
|||
# here.
|
||||
if(NOT DEFINED ${PACKAGE_NAME}_VERSION AND DEFINED ${PACKAGE_NAME}_VERSION_STRING)
|
||||
set(${PACKAGE_NAME}_VERSION ${${PACKAGE_NAME}_VERSION_STRING})
|
||||
elseif(NOT DEFINED ${PACKAGE_NAME}_VERSION_STRING)
|
||||
set(${PACKAGE_NAME}_VERSION ${GTSAM_VERSION_STRING})
|
||||
endif()
|
||||
|
||||
# Version file
|
||||
|
|
|
@ -25,9 +25,10 @@ option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also
|
|||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V42 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF)
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
|
||||
endif()
|
||||
|
|
|
@ -21,7 +21,12 @@ if(GTSAM_USE_SYSTEM_METIS)
|
|||
mark_as_advanced(METIS_LIBRARY)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
target_include_directories(metis-gtsam-if BEFORE INTERFACE ${METIS_INCLUDE_DIR})
|
||||
target_include_directories(metis-gtsam-if BEFORE INTERFACE ${METIS_INCLUDE_DIR}
|
||||
# gtsam_unstable/partition/FindSeparator-inl.h uses internal metislib.h API
|
||||
# via extern "C"
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
)
|
||||
target_link_libraries(metis-gtsam-if INTERFACE ${METIS_LIBRARY})
|
||||
endif()
|
||||
else()
|
||||
|
@ -30,10 +35,12 @@ else()
|
|||
add_subdirectory(${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis)
|
||||
|
||||
target_include_directories(metis-gtsam BEFORE PUBLIC
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/include>
|
||||
# gtsam_unstable/partition/FindSeparator-inl.h uses internal metislib.h API
|
||||
# via extern "C"
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
|
|
|
@ -86,7 +86,7 @@ print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as defaul
|
|||
print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
|
||||
print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
|
||||
print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
|
||||
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1")
|
||||
print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V42} "Allow features deprecated in GTSAM 4.1")
|
||||
print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
||||
print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
||||
|
||||
|
|
|
@ -7,9 +7,9 @@ if (GTSAM_WITH_TBB)
|
|||
if(TBB_FOUND)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
|
||||
if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
endif()
|
||||
# if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
# message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
# endif()
|
||||
|
||||
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
|
||||
set(TBB_GREATER_EQUAL_2020 1)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// add unary measurement factors, like GPS, on all three poses
|
||||
noiseModel::Diagonal::shared_ptr unaryNoise =
|
||||
auto unaryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.1)); // 10cm std on x,y
|
||||
graph.add(boost::make_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise));
|
||||
graph.emplace_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise);
|
||||
graph.emplace_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise);
|
||||
graph.emplace_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise);
|
||||
|
||||
|
|
|
@ -6,8 +6,7 @@ public:
|
|||
NoiseModelFactor1<Pose2>(model, j), mx_(x), my_(y) {}
|
||||
|
||||
Vector evaluateError(const Pose2& q,
|
||||
boost::optional<Matrix&> H = boost::none) const
|
||||
{
|
||||
boost::optional<Matrix&> H = boost::none) const override {
|
||||
const Rot2& R = q.rotation();
|
||||
if (H) (*H) = (gtsam::Matrix(2, 3) <<
|
||||
R.c(), -R.s(), 0.0,
|
||||
|
|
|
@ -3,13 +3,11 @@ NonlinearFactorGraph graph;
|
|||
|
||||
// Add a Gaussian prior on pose x_1
|
||||
Pose2 priorMean(0.0, 0.0, 0.0);
|
||||
noiseModel::Diagonal::shared_ptr priorNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
|
||||
graph.addPrior(1, priorMean, priorNoise);
|
||||
auto priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
|
||||
graph.add(PriorFactor<Pose2>(1, priorMean, priorNoise));
|
||||
|
||||
// Add two odometry factors
|
||||
Pose2 odometry(2.0, 0.0, 0.0);
|
||||
noiseModel::Diagonal::shared_ptr odometryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
auto odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
graph.add(BetweenFactor<Pose2>(1, 2, odometry, odometryNoise));
|
||||
graph.add(BetweenFactor<Pose2>(2, 3, odometry, odometryNoise));
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
Factor Graph:
|
||||
size: 3
|
||||
factor 0: PriorFactor on 1
|
||||
|
||||
Factor 0: PriorFactor on 1
|
||||
prior mean: (0, 0, 0)
|
||||
noise model: diagonal sigmas [0.3; 0.3; 0.1];
|
||||
factor 1: BetweenFactor(1,2)
|
||||
|
||||
Factor 1: BetweenFactor(1,2)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
||||
factor 2: BetweenFactor(2,3)
|
||||
|
||||
Factor 2: BetweenFactor(2,3)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
|
@ -1,11 +1,23 @@
|
|||
Initial Estimate:
|
||||
|
||||
Values with 3 values:
|
||||
Value 1: (0.5, 0, 0.2)
|
||||
Value 2: (2.3, 0.1, -0.2)
|
||||
Value 3: (4.1, 0.1, 0.1)
|
||||
Value 1: (gtsam::Pose2)
|
||||
(0.5, 0, 0.2)
|
||||
|
||||
Value 2: (gtsam::Pose2)
|
||||
(2.3, 0.1, -0.2)
|
||||
|
||||
Value 3: (gtsam::Pose2)
|
||||
(4.1, 0.1, 0.1)
|
||||
|
||||
Final Result:
|
||||
|
||||
Values with 3 values:
|
||||
Value 1: (-1.8e-16, 8.7e-18, -9.1e-19)
|
||||
Value 2: (2, 7.4e-18, -2.5e-18)
|
||||
Value 3: (4, -1.8e-18, -3.1e-18)
|
||||
Value 1: (gtsam::Pose2)
|
||||
(7.5-16, -5.3-16, -1.8-16)
|
||||
|
||||
Value 2: (gtsam::Pose2)
|
||||
(2, -1.1-15, -2.5-16)
|
||||
|
||||
Value 3: (gtsam::Pose2)
|
||||
(4, -1.7-15, -2.5-16)
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
x1 covariance:
|
||||
0.09 1.1e-47 5.7e-33
|
||||
1.1e-47 0.09 1.9e-17
|
||||
5.7e-33 1.9e-17 0.01
|
||||
0.09 1.7e-33 2.8e-33
|
||||
1.7e-33 0.09 2.6e-17
|
||||
2.8e-33 2.6e-17 0.01
|
||||
x2 covariance:
|
||||
0.13 4.7e-18 2.4e-18
|
||||
4.7e-18 0.17 0.02
|
||||
2.4e-18 0.02 0.02
|
||||
0.13 1.2e-18 6.1e-19
|
||||
1.2e-18 0.17 0.02
|
||||
6.1e-19 0.02 0.02
|
||||
x3 covariance:
|
||||
0.17 2.7e-17 8.4e-18
|
||||
2.7e-17 0.37 0.06
|
||||
8.4e-18 0.06 0.03
|
||||
0.17 8.6e-18 2.7e-18
|
||||
8.6e-18 0.37 0.06
|
||||
2.7e-18 0.06 0.03
|
|
@ -1188,7 +1188,7 @@ USE_MATHJAX = YES
|
|||
# MathJax, but it is strongly recommended to install a local copy of MathJax
|
||||
# before deployment.
|
||||
|
||||
MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest
|
||||
# MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest
|
||||
|
||||
# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
|
||||
# names that should be enabled during MathJax rendering.
|
||||
|
|
287
doc/gtsam.lyx
287
doc/gtsam.lyx
|
@ -1,5 +1,5 @@
|
|||
#LyX 2.2 created this file. For more info see http://www.lyx.org/
|
||||
\lyxformat 508
|
||||
#LyX 2.3 created this file. For more info see http://www.lyx.org/
|
||||
\lyxformat 544
|
||||
\begin_document
|
||||
\begin_header
|
||||
\save_transient_properties true
|
||||
|
@ -62,6 +62,8 @@
|
|||
\font_osf false
|
||||
\font_sf_scale 100 100
|
||||
\font_tt_scale 100 100
|
||||
\use_microtype false
|
||||
\use_dash_ligatures true
|
||||
\graphics default
|
||||
\default_output_format default
|
||||
\output_sync 0
|
||||
|
@ -91,6 +93,7 @@
|
|||
\suppress_date false
|
||||
\justification true
|
||||
\use_refstyle 0
|
||||
\use_minted 0
|
||||
\index Index
|
||||
\shortcut idx
|
||||
\color #008000
|
||||
|
@ -105,7 +108,10 @@
|
|||
\tocdepth 3
|
||||
\paragraph_separation indent
|
||||
\paragraph_indentation default
|
||||
\quotes_language english
|
||||
\is_math_indent 0
|
||||
\math_numbering_side default
|
||||
\quotes_style english
|
||||
\dynamic_quotes 0
|
||||
\papercolumns 1
|
||||
\papersides 1
|
||||
\paperpagestyle default
|
||||
|
@ -128,14 +134,10 @@ A Hands-on Introduction
|
|||
|
||||
\begin_layout Author
|
||||
Frank Dellaert
|
||||
\begin_inset Newline newline
|
||||
\end_inset
|
||||
|
||||
Technical Report number GT-RIM-CP&R-2014-XXX
|
||||
\end_layout
|
||||
|
||||
\begin_layout Date
|
||||
September 2014
|
||||
Updated Last March 2022
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
@ -148,18 +150,14 @@ filename "common_macros.tex"
|
|||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Section*
|
||||
Overview
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
In this document I provide a hands-on introduction to both factor graphs
|
||||
and GTSAM.
|
||||
This is an updated version from the 2012 TR that is tailored to our GTSAM
|
||||
3.0 library and beyond.
|
||||
4.0 library and beyond.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
|
||||
\series bold
|
||||
Factor graphs
|
||||
|
@ -168,6 +166,7 @@ Factor graphs
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "Koller09book"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -199,7 +198,7 @@ ts or prior knowledge.
|
|||
robotics and vision.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
The GTSAM toolbox (GTSAM stands for
|
||||
\begin_inset Quotes eld
|
||||
\end_inset
|
||||
|
@ -214,11 +213,13 @@ Georgia Tech Smoothing and Mapping
|
|||
It provides state of the art solutions to the SLAM and SFM problems, but
|
||||
can also be used to model and solve both simpler and more complex estimation
|
||||
problems.
|
||||
It also provides a MATLAB interface which allows for rapid prototype developmen
|
||||
t, visualization, and user interaction.
|
||||
It also provides MATLAB and Python wrappers which allow for rapid prototype
|
||||
development, visualization, and user interaction.
|
||||
In addition, it is easy to use in Jupyter notebooks and/or Google's coLaborator
|
||||
y.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
GTSAM exploits sparsity to be computationally efficient.
|
||||
Typically measurements only provide information on the relationship between
|
||||
a handful of variables, and hence the resulting factor graph will be sparsely
|
||||
|
@ -229,14 +230,17 @@ l complexity.
|
|||
GTSAM provides iterative methods that are quite efficient regardless.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
You can download the latest version of GTSAM at
|
||||
\begin_layout Abstract
|
||||
You can download the latest version of GTSAM from GitHub at
|
||||
\end_layout
|
||||
|
||||
\begin_layout Abstract
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
http://tinyurl.com/gtsam
|
||||
https://github.com/borglab/gtsam
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
@ -270,6 +274,7 @@ Let us start with a one-page primer on factor graphs, which in no way replaces
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Kschischang01it"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -277,6 +282,7 @@ key "Kschischang01it"
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Loeliger04spm"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -732,7 +738,7 @@ noindent
|
|||
\begin_inset Formula $f_{0}(x_{1})$
|
||||
\end_inset
|
||||
|
||||
on lines 5-8 as an instance of
|
||||
on lines 5-7 as an instance of
|
||||
\series bold
|
||||
\emph on
|
||||
PriorFactor<T>
|
||||
|
@ -764,7 +770,7 @@ Pose2,
|
|||
noiseModel::Diagonal
|
||||
\series default
|
||||
\emph default
|
||||
by specifying three standard deviations in line 7, respectively 30 cm.
|
||||
by specifying three standard deviations in line 6, respectively 30 cm.
|
||||
\begin_inset space ~
|
||||
\end_inset
|
||||
|
||||
|
@ -786,7 +792,7 @@ Similarly, odometry measurements are specified as
|
|||
Pose2
|
||||
\series default
|
||||
\emph default
|
||||
on line 11, with a slightly different noise model defined on line 12-13.
|
||||
on line 10, with a slightly different noise model defined on line 11.
|
||||
We then add the two factors
|
||||
\begin_inset Formula $f_{1}(x_{1},x_{2};o_{1})$
|
||||
\end_inset
|
||||
|
@ -795,7 +801,7 @@ Pose2
|
|||
\begin_inset Formula $f_{2}(x_{2},x_{3};o_{2})$
|
||||
\end_inset
|
||||
|
||||
on lines 14-15, as instances of yet another templated class,
|
||||
on lines 12-13, as instances of yet another templated class,
|
||||
\series bold
|
||||
\emph on
|
||||
BetweenFactor<T>
|
||||
|
@ -866,7 +872,7 @@ smoothing and mapping
|
|||
|
||||
.
|
||||
Later in this document we will talk about how we can also use GTSAM to
|
||||
do filtering (which you often do
|
||||
do filtering (which often you do
|
||||
\emph on
|
||||
not
|
||||
\emph default
|
||||
|
@ -919,7 +925,11 @@ Values
|
|||
\begin_layout Standard
|
||||
The latter point is often a point of confusion with beginning users of GTSAM.
|
||||
It helps to remember that when designing GTSAM we took a functional approach
|
||||
of classes corresponding to mathematical objects, which are usually immutable.
|
||||
of classes corresponding to mathematical objects, which are usually
|
||||
\emph on
|
||||
immutable
|
||||
\emph default
|
||||
.
|
||||
You should think of a factor graph as a
|
||||
\emph on
|
||||
function
|
||||
|
@ -1321,6 +1331,7 @@ r in a pre-existing map, or indeed the presence of absence of ceiling lights
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Dellaert99b"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -1353,14 +1364,18 @@ where
|
|||
\end_inset
|
||||
|
||||
is the measurement,
|
||||
\begin_inset Formula $q$
|
||||
\begin_inset Formula $q\in SE(2)$
|
||||
\end_inset
|
||||
|
||||
is the unknown variable,
|
||||
\begin_inset Formula $h(q)$
|
||||
\end_inset
|
||||
|
||||
is a (possibly nonlinear) measurement function, and
|
||||
is a
|
||||
\series bold
|
||||
measurement function
|
||||
\series default
|
||||
, and
|
||||
\begin_inset Formula $\Sigma$
|
||||
\end_inset
|
||||
|
||||
|
@ -1536,12 +1551,13 @@ E(q)\define h(q)-m
|
|||
|
||||
\end_inset
|
||||
|
||||
which is done on line 12.
|
||||
which is done on line 14.
|
||||
Importantly, because we want to use this factor for nonlinear optimization
|
||||
(see e.g.,
|
||||
\begin_inset CommandInset citation
|
||||
LatexCommand citealt
|
||||
key "Dellaert06ijrr"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -1588,11 +1604,11 @@ q_{y}
|
|||
\begin_inset Formula $q=\left(q_{x},q_{y},q_{\theta}\right)$
|
||||
\end_inset
|
||||
|
||||
, yields the following simple
|
||||
, yields the following
|
||||
\begin_inset Formula $2\times3$
|
||||
\end_inset
|
||||
|
||||
matrix in tangent space which is the same the as the rotation matrix:
|
||||
matrix:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
@ -1607,6 +1623,171 @@ H=\left[\begin{array}{ccc}
|
|||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Paragraph*
|
||||
Important Note
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Many of our users, when attempting to create a custom factor, are initially
|
||||
surprised at the Jacobian matrix not agreeing with their intuition.
|
||||
For example, above you might simply expect a
|
||||
\begin_inset Formula $2\times3$
|
||||
\end_inset
|
||||
|
||||
identity matrix.
|
||||
This
|
||||
\emph on
|
||||
would
|
||||
\emph default
|
||||
be true for variables belonging to a vector space.
|
||||
However, in GTSAM we define the Jacobian more generally to be the matrix
|
||||
|
||||
\begin_inset Formula $H$
|
||||
\end_inset
|
||||
|
||||
such that
|
||||
\begin_inset Formula
|
||||
\[
|
||||
h(q\exp\hat{\xi})\approx h(q)+H\xi
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
where
|
||||
\begin_inset Formula $\xi=(\delta x,\delta y,\delta\theta)$
|
||||
\end_inset
|
||||
|
||||
is an incremental update and
|
||||
\begin_inset Formula $\exp\hat{\xi}$
|
||||
\end_inset
|
||||
|
||||
is the
|
||||
\series bold
|
||||
exponential map
|
||||
\series default
|
||||
for the variable we want to update.
|
||||
In this case
|
||||
\begin_inset Formula $q\in SE(2)$
|
||||
\end_inset
|
||||
|
||||
, where
|
||||
\begin_inset Formula $SE(2)$
|
||||
\end_inset
|
||||
|
||||
is the group of 2D rigid transforms, implemented by
|
||||
\series bold
|
||||
\emph on
|
||||
Pose2
|
||||
\emph default
|
||||
.
|
||||
|
||||
\series default
|
||||
The exponential map for
|
||||
\begin_inset Formula $SE(2)$
|
||||
\end_inset
|
||||
|
||||
can be approximated to first order as
|
||||
\begin_inset Formula
|
||||
\[
|
||||
\exp\hat{\xi}\approx\left[\begin{array}{ccc}
|
||||
1 & -\delta\theta & \delta x\\
|
||||
\delta\theta & 1 & \delta y\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
when using the
|
||||
\begin_inset Formula $3\times3$
|
||||
\end_inset
|
||||
|
||||
matrix representation for 2D poses, and hence
|
||||
\begin_inset Formula
|
||||
\[
|
||||
h(qe^{\hat{\xi}})\approx h\left(\left[\begin{array}{ccc}
|
||||
\cos(q_{\theta}) & -\sin(q_{\theta}) & q_{x}\\
|
||||
\sin(q_{\theta}) & \cos(q_{\theta}) & q_{y}\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]\left[\begin{array}{ccc}
|
||||
1 & -\delta\theta & \delta x\\
|
||||
\delta\theta & 1 & \delta y\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]\right)=\left[\begin{array}{c}
|
||||
q_{x}+\cos(q_{\theta})\delta x-\sin(q_{\theta})\delta y\\
|
||||
q_{y}+\sin(q_{\theta})\delta x+\cos(q_{\theta})\delta y
|
||||
\end{array}\right]
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
which then explains the Jacobian
|
||||
\begin_inset Formula $H$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Lie groups are very relevant in the robotics context, and you can read more
|
||||
here:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://github.com/borglab/gtsam/blob/develop/doc/LieGroups.pdf
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://github.com/borglab/gtsam/blob/develop/doc/math.pdf
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
In some cases you want to go even beyond Lie groups to a looser concept,
|
||||
|
||||
\series bold
|
||||
manifolds
|
||||
\series default
|
||||
, because not all unknown variables behave like a group, e.g., the space of
|
||||
3D planes, 2D lines, directions in space, etc.
|
||||
For manifolds we do not always have an exponential map, but we have a retractio
|
||||
n that plays the same role.
|
||||
Some of this is explained here:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://gtsam.org/notes/GTSAM-Concepts.html
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
|
@ -1669,13 +1850,13 @@ UnaryFactor
|
|||
\series default
|
||||
\emph default
|
||||
instances, and add them to graph.
|
||||
GTSAM uses shared pointers to refer to factors in factor graphs, and
|
||||
GTSAM uses shared pointers to refer to factors, and
|
||||
\series bold
|
||||
\emph on
|
||||
boost::make_shared
|
||||
emplace_shared
|
||||
\series default
|
||||
\emph default
|
||||
is a convenience function to simultaneously construct a class and create
|
||||
is a convenience method to simultaneously construct a class and create
|
||||
a
|
||||
\series bold
|
||||
\emph on
|
||||
|
@ -1683,22 +1864,6 @@ shared_ptr
|
|||
\series default
|
||||
\emph default
|
||||
to it.
|
||||
|
||||
\begin_inset Note Note
|
||||
status collapsed
|
||||
|
||||
\begin_layout Plain Layout
|
||||
and on lines 4-6 we add three newly created
|
||||
\series bold
|
||||
\emph on
|
||||
UnaryFactor
|
||||
\series default
|
||||
\emph default
|
||||
instances to the graph.
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
We obtain the factor graph from Figure
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand vref
|
||||
|
@ -1936,8 +2101,8 @@ reference "fig:CompareMarginals"
|
|||
|
||||
\end_inset
|
||||
|
||||
, where I show the marginals on position as covariance ellipses that contain
|
||||
68.26% of all probability mass.
|
||||
, where I show the marginals on position as 5-sigma covariance ellipses
|
||||
that contain 99.9996% of all probability mass.
|
||||
For the odometry marginals, it is immediately apparent from the figure
|
||||
that (1) the uncertainty on pose keeps growing, and (2) the uncertainty
|
||||
on angular odometry translates into increasing uncertainty on y.
|
||||
|
@ -1992,6 +2157,7 @@ PoseSLAM
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "DurrantWhyte06ram"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -2190,9 +2356,9 @@ reference "fig:example"
|
|||
\end_inset
|
||||
|
||||
, along with covariance ellipses shown in green.
|
||||
These covariance ellipses in 2D indicate the marginal over position, over
|
||||
all possible orientations, and show the area which contain 68.26% of the
|
||||
probability mass (in 1D this would correspond to one standard deviation).
|
||||
These 5-sigma covariance ellipses in 2D indicate the marginal over position,
|
||||
over all possible orientations, and show the area which contain 99.9996%
|
||||
of the probability mass.
|
||||
The graph shows in a clear manner that the uncertainty on pose
|
||||
\begin_inset Formula $x_{5}$
|
||||
\end_inset
|
||||
|
@ -3076,6 +3242,7 @@ reference "fig:Victoria-1"
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "Kaess09ras"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3088,6 +3255,7 @@ key "Kaess09ras"
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "Kaess08tro"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3355,6 +3523,7 @@ iSAM
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Kaess08tro,Kaess12ijrr"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3606,6 +3775,7 @@ subgraph preconditioning
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Dellaert10iros,Jian11iccv"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3638,6 +3808,7 @@ Visual Odometry
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Nister04cvpr2"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3661,6 +3832,7 @@ Visual SLAM
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citet
|
||||
key "Davison03iccv"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
@ -3711,6 +3883,7 @@ Filtering
|
|||
\begin_inset CommandInset citation
|
||||
LatexCommand citep
|
||||
key "Smith87b"
|
||||
literal "true"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
|
BIN
doc/gtsam.pdf
BIN
doc/gtsam.pdf
Binary file not shown.
396
doc/math.lyx
396
doc/math.lyx
|
@ -2668,7 +2668,7 @@ reference "eq:pushforward"
|
|||
\begin{eqnarray*}
|
||||
\varphi(a)e^{\yhat} & = & \varphi(ae^{\xhat})\\
|
||||
a^{-1}e^{\yhat} & = & \left(ae^{\xhat}\right)^{-1}\\
|
||||
e^{\yhat} & = & -ae^{\xhat}a^{-1}\\
|
||||
e^{\yhat} & = & ae^{-\xhat}a^{-1}\\
|
||||
\yhat & = & -\Ad a\xhat
|
||||
\end{eqnarray*}
|
||||
|
||||
|
@ -3003,8 +3003,8 @@ between
|
|||
\begin_inset Formula
|
||||
\begin{align}
|
||||
\varphi(g,h)e^{\yhat} & =\varphi(ge^{\xhat},h)\nonumber \\
|
||||
g^{-1}he^{\yhat} & =\left(ge^{\xhat}\right)^{-1}h=-e^{\xhat}g^{-1}h\nonumber \\
|
||||
e^{\yhat} & =-\left(h^{-1}g\right)e^{\xhat}\left(h^{-1}g\right)^{-1}=-\exp\Ad{\left(h^{-1}g\right)}\xhat\nonumber \\
|
||||
g^{-1}he^{\yhat} & =\left(ge^{\xhat}\right)^{-1}h=e^{-\xhat}g^{-1}h\nonumber \\
|
||||
e^{\yhat} & =\left(h^{-1}g\right)e^{-\xhat}\left(h^{-1}g\right)^{-1}=\exp\Ad{\left(h^{-1}g\right)}(-\xhat)\nonumber \\
|
||||
\yhat & =-\Ad{\left(h^{-1}g\right)}\xhat=-\Ad{\varphi\left(h,g\right)}\xhat\label{eq:Dbetween1}
|
||||
\end{align}
|
||||
|
||||
|
@ -5082,6 +5082,394 @@ reference "ex:projection"
|
|||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of Adjoint
|
||||
\begin_inset CommandInset label
|
||||
LatexCommand label
|
||||
name "subsec:pose3_adjoint_deriv"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Consider
|
||||
\begin_inset Formula $f:SE(3)\times\mathbb{R}^{6}\rightarrow\mathbb{R}^{6}$
|
||||
\end_inset
|
||||
|
||||
is defined as
|
||||
\begin_inset Formula $f(T,\xi_{b})=Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
The derivative is notated (see Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
):
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
Df_{(T,\xi_{b})}(\xi,\delta\xi_{b})=D_{1}f_{(T,\xi_{b})}(\xi)+D_{2}f_{(T,\xi_{b})}(\delta\xi_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
First, computing
|
||||
\begin_inset Formula $D_{2}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
is easy, as its matrix is simply
|
||||
\begin_inset Formula $Ad_{T}$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
f(T,\xi_{b}+\delta\xi_{b})=Ad_{T}(\widehat{\xi_{b}+\delta\xi_{b}})=Ad_{T}(\hat{\xi}_{b})+Ad_{T}(\delta\hat{\xi}_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{2}f_{(T,\xi_{b})}(\xi_{b})=Ad_{T}
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
We will derive
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi)$
|
||||
\end_inset
|
||||
|
||||
using two approaches.
|
||||
In the first, we'll define
|
||||
\begin_inset Formula $g(T,\xi)\triangleq T\exp\hat{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
From Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{2}g_{(T,\xi)}(\xi) & =T\hat{\xi}\\
|
||||
D_{2}g_{(T,\xi)}^{-1}(\xi) & =-\hat{\xi}T^{-1}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Now we can use the definition of the Adjoint representation
|
||||
\begin_inset Formula $Ad_{g}\hat{\xi}=g\hat{\xi}g^{-1}$
|
||||
\end_inset
|
||||
|
||||
(aka conjugation by
|
||||
\begin_inset Formula $g$
|
||||
\end_inset
|
||||
|
||||
) then apply product rule and simplify:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{1}f_{(T,\xi_{b})}(\xi)=D_{1}\left(Ad_{T\exp(\hat{\xi})}\hat{\xi}_{b}\right)(\xi) & =D_{1}\left(g\hat{\xi}_{b}g^{-1}\right)(\xi)\\
|
||||
& =\left(D_{2}g_{(T,\xi)}(\xi)\right)\hat{\xi}_{b}g^{-1}(T,0)+g(T,0)\hat{\xi}_{b}\left(D_{2}g_{(T,\xi)}^{-1}(\xi)\right)\\
|
||||
& =T\hat{\xi}\hat{\xi}_{b}T^{-1}-T\hat{\xi}_{b}\hat{\xi}T^{-1}\\
|
||||
& =T\left(\hat{\xi}\hat{\xi}_{b}-\hat{\xi}_{b}\hat{\xi}\right)T^{-1}\\
|
||||
& =Ad_{T}(ad_{\hat{\xi}}\hat{\xi}_{b})\\
|
||||
& =-Ad_{T}(ad_{\hat{\xi}_{b}}\hat{\xi})\\
|
||||
D_{1}F_{(T,\xi_{b})} & =-(Ad_{T})(ad_{\hat{\xi}_{b}})
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Where
|
||||
\begin_inset Formula $ad_{\hat{\xi}}:\mathfrak{g}\rightarrow\mathfrak{g}$
|
||||
\end_inset
|
||||
|
||||
is the adjoint map of the lie algebra.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The second, perhaps more intuitive way of deriving
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
, would be to use the fact that the derivative at the origin
|
||||
\begin_inset Formula $D_{1}Ad_{I}\hat{\xi}_{b}=ad_{\hat{\xi}_{b}}$
|
||||
\end_inset
|
||||
|
||||
by definition of the adjoint
|
||||
\begin_inset Formula $ad_{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
Then applying the property
|
||||
\begin_inset Formula $Ad_{AB}=Ad_{A}Ad_{B}$
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{1}Ad_{T}\hat{\xi}_{b}(\xi)=D_{1}Ad_{T*I}\hat{\xi}_{b}(\xi)=Ad_{T}\left(D_{1}Ad_{I}\hat{\xi}_{b}(\xi)\right)=Ad_{T}\left(ad_{\hat{\xi}}(\hat{\xi}_{b})\right)=-Ad_{T}\left(ad_{\hat{\xi}_{b}}(\hat{\xi})\right)
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of AdjointTranspose
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The transpose of the Adjoint,
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}:\mathfrak{g^{*}\rightarrow g^{*}}$
|
||||
\end_inset
|
||||
|
||||
, is useful as a way to change the reference frame of vectors in the dual
|
||||
space
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
(note the
|
||||
\begin_inset Formula $^{*}$
|
||||
\end_inset
|
||||
|
||||
denoting that we are now in the dual space)
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
.
|
||||
To be more concrete, where
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
as
|
||||
\begin_inset Formula $Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\emph on
|
||||
twist
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame,
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph on
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
wrench
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
.
|
||||
It's difficult to apply a similar derivation as in Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "subsec:pose3_adjoint_deriv"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
for the derivative of
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
because
|
||||
\begin_inset Formula $Ad_{T}^{T}$
|
||||
\end_inset
|
||||
|
||||
cannot be naturally defined as a conjugation, so we resort to crunching
|
||||
through the algebra.
|
||||
The details are omitted but the result is a form that vaguely resembles
|
||||
(but does not exactly match)
|
||||
\begin_inset Formula $ad(Ad_{T}^{T}\hat{\xi}_{b}^{*})$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
\begin{bmatrix}\omega_{T}\\
|
||||
v_{T}
|
||||
\end{bmatrix}^{*} & \triangleq Ad_{T}^{T}\hat{\xi}_{b}^{*}\\
|
||||
D_{1}Ad_{T}^{T}\hat{\xi}_{b}^{*}(\xi) & =\begin{bmatrix}\hat{\omega}_{T} & \hat{v}_{T}\\
|
||||
\hat{v}_{T} & 0
|
||||
\end{bmatrix}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
|
@ -6286,7 +6674,7 @@ One representation of a line is through 2 vectors
|
|||
\begin_inset Formula $d$
|
||||
\end_inset
|
||||
|
||||
points from the orgin to the closest point on the line.
|
||||
points from the origin to the closest point on the line.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
|
BIN
doc/math.pdf
BIN
doc/math.pdf
Binary file not shown.
|
@ -7,7 +7,7 @@
|
|||
<count>32</count>
|
||||
<item_version>1</item_version>
|
||||
<item class_id="3" tracking_level="0" version="1">
|
||||
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<px class_id="4" class_name="gtsam::JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<Base class_id="5" tracking_level="0" version="0">
|
||||
<Base class_id="6" tracking_level="0" version="0">
|
||||
<keys_>
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
<count>2</count>
|
||||
<item_version>1</item_version>
|
||||
<item class_id="3" tracking_level="0" version="1">
|
||||
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<px class_id="4" class_name="gtsam::JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<Base class_id="5" tracking_level="0" version="0">
|
||||
<Base class_id="6" tracking_level="0" version="0">
|
||||
<keys_>
|
||||
|
|
|
@ -53,11 +53,10 @@ int main(int argc, char **argv) {
|
|||
// Create solver and eliminate
|
||||
Ordering ordering;
|
||||
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
|
||||
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||
GTSAM_PRINT(*mpe);
|
||||
auto mpe = fg.optimize();
|
||||
GTSAM_PRINT(mpe);
|
||||
|
||||
// We can also build a Bayes tree (directed junction tree).
|
||||
// The elimination order above will do fine:
|
||||
|
@ -69,15 +68,15 @@ int main(int argc, char **argv) {
|
|||
fg.add(Dyspnea, "0 1");
|
||||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||
DiscreteFactor::sharedValues mpe2 = chordal2->optimize();
|
||||
GTSAM_PRINT(*mpe2);
|
||||
auto mpe2 = fg.optimize();
|
||||
GTSAM_PRINT(mpe2);
|
||||
|
||||
// We can also sample from it
|
||||
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
DiscreteFactor::sharedValues sample = chordal2->sample();
|
||||
GTSAM_PRINT(*sample);
|
||||
auto sample = chordal->sample();
|
||||
GTSAM_PRINT(sample);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -33,11 +33,11 @@ using namespace gtsam;
|
|||
int main(int argc, char **argv) {
|
||||
// Define keys and a print function
|
||||
Key C(1), S(2), R(3), W(4);
|
||||
auto print = [=](DiscreteFactor::sharedValues values) {
|
||||
cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C])
|
||||
<< " Sprinkler = " << static_cast<bool>((*values)[S])
|
||||
<< " Rain = " << boolalpha << static_cast<bool>((*values)[R])
|
||||
<< " WetGrass = " << static_cast<bool>((*values)[W]) << endl;
|
||||
auto print = [=](const DiscreteFactor::Values& values) {
|
||||
cout << boolalpha << "Cloudy = " << static_cast<bool>(values.at(C))
|
||||
<< " Sprinkler = " << static_cast<bool>(values.at(S))
|
||||
<< " Rain = " << boolalpha << static_cast<bool>(values.at(R))
|
||||
<< " WetGrass = " << static_cast<bool>(values.at(W)) << endl;
|
||||
};
|
||||
|
||||
// We assume binary state variables
|
||||
|
@ -85,7 +85,7 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
|
||||
// "Most Probable Explanation", i.e., configuration with largest value
|
||||
DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize();
|
||||
auto mpe = graph.optimize();
|
||||
cout << "\nMost Probable Explanation (MPE):" << endl;
|
||||
print(mpe);
|
||||
|
||||
|
@ -96,8 +96,7 @@ int main(int argc, char **argv) {
|
|||
graph.add(Cloudy, "1 0");
|
||||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize();
|
||||
auto mpe_with_evidence = graph.optimize();
|
||||
|
||||
cout << "\nMPE given C=0:" << endl;
|
||||
print(mpe_with_evidence);
|
||||
|
@ -110,10 +109,11 @@ int main(int argc, char **argv) {
|
|||
cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1]
|
||||
<< endl;
|
||||
|
||||
// We can also sample from it
|
||||
// We can also sample from the eliminated graph
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||
auto sample = chordal->sample();
|
||||
print(sample);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -122,8 +122,7 @@ int main(int argc, char *argv[]) {
|
|||
std::cout << "initial error=" << graph.error(initialEstimate) << std::endl;
|
||||
std::cout << "final error=" << graph.error(result) << std::endl;
|
||||
|
||||
std::ofstream os("examples/vio_batch.dot");
|
||||
graph.saveGraph(os, result);
|
||||
graph.saveGraph("examples/vio_batch.dot", result);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -59,21 +59,21 @@ int main(int argc, char **argv) {
|
|||
// Convert to factor graph
|
||||
DiscreteFactorGraph factorGraph(hmm);
|
||||
|
||||
// Do max-prodcut
|
||||
auto mpe = factorGraph.optimize();
|
||||
GTSAM_PRINT(mpe);
|
||||
|
||||
// Create solver and eliminate
|
||||
// This will create a DAG ordered with arrow of time reversed
|
||||
DiscreteBayesNet::shared_ptr chordal =
|
||||
factorGraph.eliminateSequential(ordering);
|
||||
chordal->print("Eliminated");
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||
GTSAM_PRINT(*mpe);
|
||||
|
||||
// We can also sample from it
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t k = 0; k < 10; k++) {
|
||||
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||
GTSAM_PRINT(*sample);
|
||||
auto sample = chordal->sample();
|
||||
GTSAM_PRINT(sample);
|
||||
}
|
||||
|
||||
// Or compute the marginals. This re-eliminates the FG into a Bayes tree
|
||||
|
|
|
@ -11,21 +11,23 @@
|
|||
|
||||
/**
|
||||
* @file IMUKittiExampleGPS
|
||||
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
|
||||
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics
|
||||
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI
|
||||
* VISION BENCHMARK SUITE
|
||||
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ
|
||||
* Electronics
|
||||
*/
|
||||
|
||||
// GTSAM related includes.
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||
#include <gtsam/navigation/GPSFactor.h>
|
||||
#include <gtsam/navigation/ImuFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
#include <gtsam/nonlinear/ISAM2Params.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
|
@ -34,9 +36,9 @@
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
|
||||
struct KittiCalibration {
|
||||
double body_ptx;
|
||||
|
@ -73,9 +75,11 @@ void loadKittiData(KittiCalibration& kitti_calibration,
|
|||
string line;
|
||||
|
||||
// Read IMU metadata and compute relative sensor pose transforms
|
||||
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
|
||||
// AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
|
||||
string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma
|
||||
// GyroscopeSigma IntegrationSigma AccelerometerBiasSigma GyroscopeBiasSigma
|
||||
// AverageDeltaT
|
||||
string imu_metadata_file =
|
||||
findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||
|
||||
printf("-- Reading sensor metadata\n");
|
||||
|
@ -85,12 +89,9 @@ void loadKittiData(KittiCalibration& kitti_calibration,
|
|||
// Load Kitti calibration
|
||||
getline(imu_metadata, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&kitti_calibration.body_ptx,
|
||||
&kitti_calibration.body_pty,
|
||||
&kitti_calibration.body_ptz,
|
||||
&kitti_calibration.body_prx,
|
||||
&kitti_calibration.body_pry,
|
||||
&kitti_calibration.body_prz,
|
||||
&kitti_calibration.body_ptx, &kitti_calibration.body_pty,
|
||||
&kitti_calibration.body_ptz, &kitti_calibration.body_prx,
|
||||
&kitti_calibration.body_pry, &kitti_calibration.body_prz,
|
||||
&kitti_calibration.accelerometer_sigma,
|
||||
&kitti_calibration.gyroscope_sigma,
|
||||
&kitti_calibration.integration_sigma,
|
||||
|
@ -98,15 +99,11 @@ void loadKittiData(KittiCalibration& kitti_calibration,
|
|||
&kitti_calibration.gyroscope_bias_sigma,
|
||||
&kitti_calibration.average_delta_t);
|
||||
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||
kitti_calibration.body_ptx,
|
||||
kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry,
|
||||
kitti_calibration.body_prz,
|
||||
kitti_calibration.body_ptx, kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz, kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry, kitti_calibration.body_prz,
|
||||
kitti_calibration.accelerometer_sigma,
|
||||
kitti_calibration.gyroscope_sigma,
|
||||
kitti_calibration.integration_sigma,
|
||||
kitti_calibration.gyroscope_sigma, kitti_calibration.integration_sigma,
|
||||
kitti_calibration.accelerometer_bias_sigma,
|
||||
kitti_calibration.gyroscope_bias_sigma,
|
||||
kitti_calibration.average_delta_t);
|
||||
|
@ -119,13 +116,12 @@ void loadKittiData(KittiCalibration& kitti_calibration,
|
|||
ifstream imu_data(imu_data_file.c_str());
|
||||
getline(imu_data, line, '\n'); // ignore the first line
|
||||
|
||||
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0;
|
||||
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0,
|
||||
gyro_y = 0, gyro_z = 0;
|
||||
while (!imu_data.eof()) {
|
||||
getline(imu_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&time, &dt,
|
||||
&acc_x, &acc_y, &acc_z,
|
||||
&gyro_x, &gyro_y, &gyro_z);
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", &time, &dt,
|
||||
&acc_x, &acc_y, &acc_z, &gyro_x, &gyro_y, &gyro_z);
|
||||
|
||||
ImuMeasurement measurement;
|
||||
measurement.time = time;
|
||||
|
@ -163,12 +159,16 @@ int main(int argc, char* argv[]) {
|
|||
vector<GpsMeasurement> gps_measurements;
|
||||
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||
|
||||
Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||
Vector6 BodyP =
|
||||
(Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz, kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||
.finished();
|
||||
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||
printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same");
|
||||
printf(
|
||||
"Currently only support IMUinBody is identity, i.e. IMU and body frame "
|
||||
"are the same");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
|
@ -180,38 +180,45 @@ int main(int argc, char* argv[]) {
|
|||
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||
|
||||
// Configure noise models
|
||||
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0/0.07))
|
||||
auto noise_model_gps = noiseModel::Diagonal::Precisions(
|
||||
(Vector6() << Vector3::Constant(0), Vector3::Constant(1.0 / 0.07))
|
||||
.finished());
|
||||
|
||||
// Set initial conditions for the estimated trajectory
|
||||
// initial pose is the reference frame (navigation frame)
|
||||
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||
auto current_pose_global =
|
||||
Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||
// the vehicle is stationary at the beginning at position 0,0,0
|
||||
Vector3 current_velocity_global = Vector3::Zero();
|
||||
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||
|
||||
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0))
|
||||
.finished());
|
||||
auto sigma_init_x = noiseModel::Diagonal::Precisions(
|
||||
(Vector6() << Vector3::Constant(0), Vector3::Constant(1.0)).finished());
|
||||
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100),
|
||||
Vector3::Constant(5.00e-05))
|
||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas(
|
||||
(Vector6() << Vector3::Constant(0.100), Vector3::Constant(5.00e-05))
|
||||
.finished());
|
||||
|
||||
// Set IMU preintegration parameters
|
||||
Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||
Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||
Matrix33 measured_acc_cov =
|
||||
I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||
Matrix33 measured_omega_cov =
|
||||
I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||
// error committed in integrating position from velocities
|
||||
Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||
Matrix33 integration_error_cov =
|
||||
I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||
|
||||
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||
imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
|
||||
imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous
|
||||
imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
|
||||
imu_params->accelerometerCovariance =
|
||||
measured_acc_cov; // acc white noise in continuous
|
||||
imu_params->integrationCovariance =
|
||||
integration_error_cov; // integration uncertainty continuous
|
||||
imu_params->gyroscopeCovariance =
|
||||
measured_omega_cov; // gyro white noise in continuous
|
||||
imu_params->omegaCoriolis = w_coriolis;
|
||||
|
||||
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr;
|
||||
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement =
|
||||
nullptr;
|
||||
|
||||
// Set ISAM2 parameters and create ISAM2 solver object
|
||||
ISAM2Params isam_params;
|
||||
|
@ -220,16 +227,22 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
ISAM2 isam(isam_params);
|
||||
|
||||
// Create the factor graph and values object that will store new factors and values to add to the incremental graph
|
||||
// Create the factor graph and values object that will store new factors and
|
||||
// values to add to the incremental graph
|
||||
NonlinearFactorGraph new_factors;
|
||||
Values new_values; // values storing the initial estimates of new nodes in the factor graph
|
||||
Values new_values; // values storing the initial estimates of new nodes in
|
||||
// the factor graph
|
||||
|
||||
/// Main loop:
|
||||
/// (1) we read the measurements
|
||||
/// (2) we create the corresponding factors in the graph
|
||||
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||
printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n");
|
||||
printf(
|
||||
"-- Starting main loop: inference is performed at each time step, but we "
|
||||
"plot trajectory every 10 steps\n");
|
||||
size_t j = 0;
|
||||
size_t included_imu_measurement_count = 0;
|
||||
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
// At each non=IMU measurement we initialize a new node in the graph
|
||||
auto current_pose_key = X(i);
|
||||
|
@ -242,19 +255,24 @@ int main(int argc, char* argv[]) {
|
|||
new_values.insert(current_pose_key, current_pose_global);
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x);
|
||||
new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v);
|
||||
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(
|
||||
current_pose_key, current_pose_global, sigma_init_x);
|
||||
new_factors.emplace_shared<PriorFactor<Vector3>>(
|
||||
current_vel_key, current_velocity_global, sigma_init_v);
|
||||
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(
|
||||
current_bias_key, current_bias, sigma_init_b);
|
||||
} else {
|
||||
double t_previous = gps_measurements[i-1].time;
|
||||
double t_previous = gps_measurements[i - 1].time;
|
||||
|
||||
// Summarize IMU data between the previous GPS measurement and now
|
||||
current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias);
|
||||
static size_t included_imu_measurement_count = 0;
|
||||
current_summarized_measurement =
|
||||
std::make_shared<PreintegratedImuMeasurements>(imu_params,
|
||||
current_bias);
|
||||
|
||||
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||
if (imu_measurements[j].time >= t_previous) {
|
||||
current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer,
|
||||
imu_measurements[j].gyroscope,
|
||||
current_summarized_measurement->integrateMeasurement(
|
||||
imu_measurements[j].accelerometer, imu_measurements[j].gyroscope,
|
||||
imu_measurements[j].dt);
|
||||
included_imu_measurement_count++;
|
||||
}
|
||||
|
@ -262,38 +280,44 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
// Create IMU factor
|
||||
auto previous_pose_key = X(i-1);
|
||||
auto previous_vel_key = V(i-1);
|
||||
auto previous_bias_key = B(i-1);
|
||||
auto previous_pose_key = X(i - 1);
|
||||
auto previous_vel_key = V(i - 1);
|
||||
auto previous_bias_key = B(i - 1);
|
||||
|
||||
new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key,
|
||||
current_pose_key, current_vel_key,
|
||||
previous_bias_key, *current_summarized_measurement);
|
||||
new_factors.emplace_shared<ImuFactor>(
|
||||
previous_pose_key, previous_vel_key, current_pose_key,
|
||||
current_vel_key, previous_bias_key, *current_summarized_measurement);
|
||||
|
||||
// Bias evolution as given in the IMU metadata
|
||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() <<
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
|
||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas(
|
||||
(Vector6() << Vector3::Constant(
|
||||
sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.accelerometer_bias_sigma),
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.gyroscope_bias_sigma))
|
||||
.finished());
|
||||
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key,
|
||||
current_bias_key,
|
||||
imuBias::ConstantBias(),
|
||||
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(
|
||||
previous_bias_key, current_bias_key, imuBias::ConstantBias(),
|
||||
sigma_between_b);
|
||||
|
||||
// Create GPS factor
|
||||
auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||
auto gps_pose =
|
||||
Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||
if ((i % gps_skip) == 0) {
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(
|
||||
current_pose_key, gps_pose, noise_model_gps);
|
||||
new_values.insert(current_pose_key, gps_pose);
|
||||
|
||||
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
|
||||
printf("############ POSE INCLUDED AT TIME %.6lf ############\n",
|
||||
t);
|
||||
cout << gps_pose.translation();
|
||||
printf("\n\n");
|
||||
} else {
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
}
|
||||
|
||||
// Add initial values for velocity and bias based on the previous estimates
|
||||
// Add initial values for velocity and bias based on the previous
|
||||
// estimates
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
|
||||
|
@ -301,8 +325,9 @@ int main(int argc, char* argv[]) {
|
|||
// =======================================================================
|
||||
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||
// first so that the heading becomes observable.
|
||||
if (i > (first_gps_pose + 2*gps_skip)) {
|
||||
printf("################ NEW FACTORS AT TIME %lf ################\n", t);
|
||||
if (i > (first_gps_pose + 2 * gps_skip)) {
|
||||
printf("############ NEW FACTORS AT TIME %.6lf ############\n",
|
||||
t);
|
||||
new_factors.print();
|
||||
|
||||
isam.update(new_factors, new_values);
|
||||
|
@ -318,7 +343,7 @@ int main(int argc, char* argv[]) {
|
|||
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||
|
||||
printf("\n################ POSE AT TIME %lf ################\n", t);
|
||||
printf("\n############ POSE AT TIME %lf ############\n", t);
|
||||
current_pose_global.print();
|
||||
printf("\n\n");
|
||||
}
|
||||
|
@ -328,7 +353,8 @@ int main(int argc, char* argv[]) {
|
|||
// Save results to file
|
||||
printf("\nWriting results to file...\n");
|
||||
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||
fprintf(fp_out,
|
||||
"#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||
|
||||
Values result = isam.calculateEstimate();
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
|
@ -349,10 +375,9 @@ int main(int argc, char* argv[]) {
|
|||
cout << "Bias:" << endl << bias << endl;
|
||||
|
||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||
gps_measurements[i].time,
|
||||
pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
|
||||
gps(0), gps(1), gps(2));
|
||||
gps_measurements[i].time, pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), gps(0),
|
||||
gps(1), gps(2));
|
||||
}
|
||||
|
||||
fclose(fp_out);
|
||||
|
|
|
@ -60,11 +60,10 @@ int main(int argc, char** argv) {
|
|||
|
||||
// save factor graph as graphviz dot file
|
||||
// Render to PDF using "fdp Pose2SLAMExample.dot -Tpdf > graph.pdf"
|
||||
ofstream os("Pose2SLAMExample.dot");
|
||||
graph.saveGraph(os, result);
|
||||
graph.saveGraph("Pose2SLAMExample.dot", result);
|
||||
|
||||
// Also print out to console
|
||||
graph.saveGraph(cout, result);
|
||||
graph.dot(cout, result);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -10,62 +10,81 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file RangeISAMExample_plaza1.cpp
|
||||
* @file RangeISAMExample_plaza2.cpp
|
||||
* @brief A 2D Range SLAM example
|
||||
* @date June 20, 2013
|
||||
* @author FRank Dellaert
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
// Both relative poses and recovered trajectory poses will be stored as Pose2 objects
|
||||
// Both relative poses and recovered trajectory poses will be stored as Pose2.
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
using gtsam::Pose2;
|
||||
|
||||
// Each variable in the system (poses and landmarks) must be identified with a unique key.
|
||||
// We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1).
|
||||
// Here we will use Symbols
|
||||
// gtsam::Vectors are dynamic Eigen vectors, Vector3 is statically sized.
|
||||
#include <gtsam/base/Vector.h>
|
||||
using gtsam::Vector;
|
||||
using gtsam::Vector3;
|
||||
|
||||
// Unknown landmarks are of type Point2 (which is just a 2D Eigen vector).
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
using gtsam::Point2;
|
||||
|
||||
// Each variable in the system (poses and landmarks) must be identified with a
|
||||
// unique key. We can either use simple integer keys (1, 2, 3, ...) or symbols
|
||||
// (X1, X2, L1). Here we will use Symbols.
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
using gtsam::Symbol;
|
||||
|
||||
// We want to use iSAM2 to solve the range-SLAM problem incrementally
|
||||
// We want to use iSAM2 to solve the range-SLAM problem incrementally.
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
|
||||
// iSAM2 requires as input a set set of new factors to be added stored in a factor graph,
|
||||
// and initial guesses for any new variables used in the added factors
|
||||
// iSAM2 requires as input a set set of new factors to be added stored in a
|
||||
// factor graph, and initial guesses for any new variables in the added factors.
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
||||
// We will use a non-liear solver to batch-inituialize from the first 150 frames
|
||||
// We will use a non-linear solver to batch-initialize from the first 150 frames
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
// In GTSAM, measurement functions are represented as 'factors'. Several common factors
|
||||
// have been provided with the library for solving robotics SLAM problems.
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
// Measurement functions are represented as 'factors'. Several common factors
|
||||
// have been provided with the library for solving robotics SLAM problems:
|
||||
#include <gtsam/sam/RangeFactor.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
// Standard headers, added last, so we know headers above work on their own
|
||||
// Timing, with functions below, provides nice facilities to benchmark.
|
||||
#include <gtsam/base/timing.h>
|
||||
using gtsam::tictoc_print_;
|
||||
|
||||
// Standard headers, added last, so we know headers above work on their own.
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
namespace NM = gtsam::noiseModel;
|
||||
|
||||
// data available at http://www.frc.ri.cmu.edu/projects/emergencyresponse/RangeData/
|
||||
// Datafile format (from http://www.frc.ri.cmu.edu/projects/emergencyresponse/RangeData/log.html)
|
||||
// Data is second UWB ranging dataset, B2 or "plaza 2", from
|
||||
// "Navigating with Ranging Radios: Five Data Sets with Ground Truth"
|
||||
// by Joseph Djugash, Bradley Hamner, and Stephan Roth
|
||||
// https://www.ri.cmu.edu/pub_files/2009/9/Final_5datasetsRangingRadios.pdf
|
||||
|
||||
// load the odometry
|
||||
// DR: Odometry Input (delta distance traveled and delta heading change)
|
||||
// Time (sec) Delta Dist. Trav. (m) Delta Heading (rad)
|
||||
typedef pair<double, Pose2> TimedOdometry;
|
||||
list<TimedOdometry> readOdometry() {
|
||||
list<TimedOdometry> odometryList;
|
||||
string data_file = findExampleDataFile("Plaza2_DR.txt");
|
||||
ifstream is(data_file.c_str());
|
||||
// Time (sec) Delta Distance Traveled (m) Delta Heading (rad)
|
||||
using TimedOdometry = std::pair<double, Pose2>;
|
||||
std::list<TimedOdometry> readOdometry() {
|
||||
std::list<TimedOdometry> odometryList;
|
||||
std::string data_file = gtsam::findExampleDataFile("Plaza2_DR.txt");
|
||||
std::ifstream is(data_file.c_str());
|
||||
|
||||
while (is) {
|
||||
double t, distance_traveled, delta_heading;
|
||||
is >> t >> distance_traveled >> delta_heading;
|
||||
odometryList.push_back(
|
||||
TimedOdometry(t, Pose2(distance_traveled, 0, delta_heading)));
|
||||
odometryList.emplace_back(t, Pose2(distance_traveled, 0, delta_heading));
|
||||
}
|
||||
is.clear(); /* clears the end-of-file and error flags */
|
||||
return odometryList;
|
||||
|
@ -73,72 +92,66 @@ list<TimedOdometry> readOdometry() {
|
|||
|
||||
// load the ranges from TD
|
||||
// Time (sec) Sender / Antenna ID Receiver Node ID Range (m)
|
||||
typedef boost::tuple<double, size_t, double> RangeTriple;
|
||||
vector<RangeTriple> readTriples() {
|
||||
vector<RangeTriple> triples;
|
||||
string data_file = findExampleDataFile("Plaza2_TD.txt");
|
||||
ifstream is(data_file.c_str());
|
||||
using RangeTriple = boost::tuple<double, size_t, double>;
|
||||
std::vector<RangeTriple> readTriples() {
|
||||
std::vector<RangeTriple> triples;
|
||||
std::string data_file = gtsam::findExampleDataFile("Plaza2_TD.txt");
|
||||
std::ifstream is(data_file.c_str());
|
||||
|
||||
while (is) {
|
||||
double t, sender, range;
|
||||
size_t receiver;
|
||||
double t, range, sender, receiver;
|
||||
is >> t >> sender >> receiver >> range;
|
||||
triples.push_back(RangeTriple(t, receiver, range));
|
||||
triples.emplace_back(t, receiver, range);
|
||||
}
|
||||
is.clear(); /* clears the end-of-file and error flags */
|
||||
return triples;
|
||||
}
|
||||
|
||||
// main
|
||||
int main (int argc, char** argv) {
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// load Plaza2 data
|
||||
list<TimedOdometry> odometry = readOdometry();
|
||||
// size_t M = odometry.size();
|
||||
std::list<TimedOdometry> odometry = readOdometry();
|
||||
size_t M = odometry.size();
|
||||
std::cout << "Read " << M << " odometry entries." << std::endl;
|
||||
|
||||
vector<RangeTriple> triples = readTriples();
|
||||
std::vector<RangeTriple> triples = readTriples();
|
||||
size_t K = triples.size();
|
||||
std::cout << "Read " << K << " range triples." << std::endl;
|
||||
|
||||
// parameters
|
||||
size_t minK = 150; // minimum number of range measurements to process initially
|
||||
size_t minK =
|
||||
150; // minimum number of range measurements to process initially
|
||||
size_t incK = 25; // minimum number of range measurements to process after
|
||||
bool groundTruth = false;
|
||||
bool robust = true;
|
||||
|
||||
// Set Noise parameters
|
||||
Vector priorSigmas = Vector3(1,1,M_PI);
|
||||
Vector priorSigmas = Vector3(1, 1, M_PI);
|
||||
Vector odoSigmas = Vector3(0.05, 0.01, 0.1);
|
||||
double sigmaR = 100; // range standard deviation
|
||||
const NM::Base::shared_ptr // all same type
|
||||
priorNoise = NM::Diagonal::Sigmas(priorSigmas), //prior
|
||||
priorNoise = NM::Diagonal::Sigmas(priorSigmas), // prior
|
||||
looseNoise = NM::Isotropic::Sigma(2, 1000), // loose LM prior
|
||||
odoNoise = NM::Diagonal::Sigmas(odoSigmas), // odometry
|
||||
gaussian = NM::Isotropic::Sigma(1, sigmaR), // non-robust
|
||||
tukey = NM::Robust::Create(NM::mEstimator::Tukey::Create(15), gaussian), //robust
|
||||
tukey = NM::Robust::Create(NM::mEstimator::Tukey::Create(15),
|
||||
gaussian), // robust
|
||||
rangeNoise = robust ? tukey : gaussian;
|
||||
|
||||
// Initialize iSAM
|
||||
ISAM2 isam;
|
||||
gtsam::ISAM2 isam;
|
||||
|
||||
// Add prior on first pose
|
||||
Pose2 pose0 = Pose2(-34.2086489999201, 45.3007639991120,
|
||||
M_PI - 2.02108900000000);
|
||||
NonlinearFactorGraph newFactors;
|
||||
Pose2 pose0 = Pose2(-34.2086489999201, 45.3007639991120, M_PI - 2.021089);
|
||||
gtsam::NonlinearFactorGraph newFactors;
|
||||
newFactors.addPrior(0, pose0, priorNoise);
|
||||
Values initial;
|
||||
gtsam::Values initial;
|
||||
initial.insert(0, pose0);
|
||||
|
||||
// initialize points
|
||||
if (groundTruth) { // from TL file
|
||||
initial.insert(symbol('L', 1), Point2(-68.9265, 18.3778));
|
||||
initial.insert(symbol('L', 6), Point2(-37.5805, 69.2278));
|
||||
initial.insert(symbol('L', 0), Point2(-33.6205, 26.9678));
|
||||
initial.insert(symbol('L', 5), Point2(1.7095, -5.8122));
|
||||
} else { // drawn from sigma=1 Gaussian in matlab version
|
||||
initial.insert(symbol('L', 1), Point2(3.5784, 2.76944));
|
||||
initial.insert(symbol('L', 6), Point2(-1.34989, 3.03492));
|
||||
initial.insert(symbol('L', 0), Point2(0.725404, -0.0630549));
|
||||
initial.insert(symbol('L', 5), Point2(0.714743, -0.204966));
|
||||
}
|
||||
// We will initialize landmarks randomly, and keep track of which landmarks we
|
||||
// already added with a set.
|
||||
std::mt19937_64 rng;
|
||||
std::normal_distribution<double> normal(0.0, 100.0);
|
||||
std::set<Symbol> initializedLandmarks;
|
||||
|
||||
// set some loop variables
|
||||
size_t i = 1; // step counter
|
||||
|
@ -149,14 +162,15 @@ int main (int argc, char** argv) {
|
|||
|
||||
// Loop over odometry
|
||||
gttic_(iSAM);
|
||||
for(const TimedOdometry& timedOdometry: odometry) {
|
||||
//--------------------------------- odometry loop -----------------------------------------
|
||||
for (const TimedOdometry& timedOdometry : odometry) {
|
||||
//--------------------------------- odometry loop --------------------------
|
||||
double t;
|
||||
Pose2 odometry;
|
||||
boost::tie(t, odometry) = timedOdometry;
|
||||
|
||||
// add odometry factor
|
||||
newFactors.push_back(BetweenFactor<Pose2>(i-1, i, odometry, odoNoise));
|
||||
newFactors.emplace_shared<gtsam::BetweenFactor<Pose2>>(i - 1, i, odometry,
|
||||
odoNoise);
|
||||
|
||||
// predict pose and add as initial estimate
|
||||
Pose2 predictedPose = lastPose.compose(odometry);
|
||||
|
@ -166,8 +180,20 @@ int main (int argc, char** argv) {
|
|||
// Check if there are range factors to be added
|
||||
while (k < K && t >= boost::get<0>(triples[k])) {
|
||||
size_t j = boost::get<1>(triples[k]);
|
||||
Symbol landmark_key('L', j);
|
||||
double range = boost::get<2>(triples[k]);
|
||||
newFactors.push_back(RangeFactor<Pose2, Point2>(i, symbol('L', j), range,rangeNoise));
|
||||
newFactors.emplace_shared<gtsam::RangeFactor<Pose2, Point2>>(
|
||||
i, landmark_key, range, rangeNoise);
|
||||
if (initializedLandmarks.count(landmark_key) == 0) {
|
||||
std::cout << "adding landmark " << j << std::endl;
|
||||
double x = normal(rng), y = normal(rng);
|
||||
initial.insert(landmark_key, Point2(x, y));
|
||||
initializedLandmarks.insert(landmark_key);
|
||||
// We also add a very loose prior on the landmark in case there is only
|
||||
// one sighting, which cannot fully determine the landmark.
|
||||
newFactors.emplace_shared<gtsam::PriorFactor<Point2>>(
|
||||
landmark_key, Point2(0, 0), looseNoise);
|
||||
}
|
||||
k = k + 1;
|
||||
countK = countK + 1;
|
||||
}
|
||||
|
@ -175,8 +201,9 @@ int main (int argc, char** argv) {
|
|||
// Check whether to update iSAM 2
|
||||
if ((k > minK) && (countK > incK)) {
|
||||
if (!initialized) { // Do a full optimize for first minK ranges
|
||||
std::cout << "Initializing at time " << k << std::endl;
|
||||
gttic_(batchInitialization);
|
||||
LevenbergMarquardtOptimizer batchOptimizer(newFactors, initial);
|
||||
gtsam::LevenbergMarquardtOptimizer batchOptimizer(newFactors, initial);
|
||||
initial = batchOptimizer.optimize();
|
||||
gttoc_(batchInitialization);
|
||||
initialized = true;
|
||||
|
@ -185,21 +212,27 @@ int main (int argc, char** argv) {
|
|||
isam.update(newFactors, initial);
|
||||
gttoc_(update);
|
||||
gttic_(calculateEstimate);
|
||||
Values result = isam.calculateEstimate();
|
||||
gtsam::Values result = isam.calculateEstimate();
|
||||
gttoc_(calculateEstimate);
|
||||
lastPose = result.at<Pose2>(i);
|
||||
newFactors = NonlinearFactorGraph();
|
||||
initial = Values();
|
||||
newFactors = gtsam::NonlinearFactorGraph();
|
||||
initial = gtsam::Values();
|
||||
countK = 0;
|
||||
}
|
||||
i += 1;
|
||||
//--------------------------------- odometry loop -----------------------------------------
|
||||
//--------------------------------- odometry loop --------------------------
|
||||
} // end for
|
||||
gttoc_(iSAM);
|
||||
|
||||
// Print timings
|
||||
tictoc_print_();
|
||||
|
||||
// Print optimized landmarks:
|
||||
gtsam::Values finalResult = isam.calculateEstimate();
|
||||
for (auto&& landmark_key : initializedLandmarks) {
|
||||
Point2 p = finalResult.at<Point2>(landmark_key);
|
||||
std::cout << landmark_key << ":" << p.transpose() << "\n";
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,9 +26,12 @@
|
|||
#include <gtsam/nonlinear/ExpressionFactorGraph.h>
|
||||
|
||||
// Header order is close to far
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -46,10 +49,9 @@ int main(int argc, char* argv[]) {
|
|||
if (argc > 1) filename = string(argv[1]);
|
||||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata;
|
||||
readBAL(filename, mydata);
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") %
|
||||
mydata.number_tracks() % mydata.number_cameras();
|
||||
mydata.numberTracks() % mydata.numberCameras();
|
||||
|
||||
// Create a factor graph
|
||||
ExpressionFactorGraph graph;
|
||||
|
|
|
@ -10,17 +10,20 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SFMExample.cpp
|
||||
* @file SFMExample_bal.cpp
|
||||
* @brief Solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
// For an explanation of headers, see SFMExample.cpp
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -41,9 +44,8 @@ int main (int argc, char* argv[]) {
|
|||
if (argc>1) filename = string(argv[1]);
|
||||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata;
|
||||
readBAL(filename, mydata);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") % mydata.number_tracks() % mydata.number_cameras();
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") % mydata.numberTracks() % mydata.numberCameras();
|
||||
|
||||
// Create a factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
|
|
|
@ -17,15 +17,16 @@
|
|||
*/
|
||||
|
||||
// For an explanation of headers, see SFMExample.cpp
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
|
||||
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -45,10 +46,9 @@ int main(int argc, char* argv[]) {
|
|||
if (argc > 1) filename = string(argv[1]);
|
||||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata;
|
||||
readBAL(filename, mydata);
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") %
|
||||
mydata.number_tracks() % mydata.number_cameras();
|
||||
mydata.numberTracks() % mydata.numberCameras();
|
||||
|
||||
// Create a factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
|
@ -131,7 +131,7 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
cout << "Time comparison by solving " << filename << " results:" << endl;
|
||||
cout << boost::format("%1% point tracks and %2% cameras\n") %
|
||||
mydata.number_tracks() % mydata.number_cameras()
|
||||
mydata.numberTracks() % mydata.numberCameras()
|
||||
<< endl;
|
||||
|
||||
tictoc_print_();
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
* Passing function argument allows to specificy an initial position, a pose increment and step count.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
// As this is a full 3D problem, we will use Pose3 variables to represent the camera
|
||||
// positions and Point3 variables (x, y, z) to represent the landmark coordinates.
|
||||
// Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).
|
||||
|
|
|
@ -68,10 +68,9 @@ int main(int argc, char** argv) {
|
|||
<< graph.size() << " factors (Unary+Edge).";
|
||||
|
||||
// "Decoding", i.e., configuration with largest value
|
||||
// We use sequential variable elimination
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||
// Uses max-product.
|
||||
auto optimalDecoding = graph.optimize();
|
||||
optimalDecoding.print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||
|
||||
// "Inference" Computing marginals for each node
|
||||
// Here we'll make use of DiscreteMarginals class, which makes use of
|
||||
|
|
|
@ -50,8 +50,8 @@ int main(int argc, char** argv) {
|
|||
|
||||
// Print the UGM distribution
|
||||
cout << "\nUGM distribution:" << endl;
|
||||
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
||||
Cathy & Heather & Mark & Allison);
|
||||
auto allPosbValues =
|
||||
DiscreteValues::CartesianProduct(Cathy & Heather & Mark & Allison);
|
||||
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||
DiscreteFactor::Values values = allPosbValues[i];
|
||||
double prodPot = graph(values);
|
||||
|
@ -61,10 +61,9 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
|
||||
// "Decoding", i.e., configuration with largest value (MPE)
|
||||
// We use sequential variable elimination
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\noptimalDecoding");
|
||||
// Uses max-product
|
||||
auto optimalDecoding = graph.optimize();
|
||||
GTSAM_PRINT(optimalDecoding);
|
||||
|
||||
// "Inference" Computing marginals
|
||||
cout << "\nComputing Node Marginals .." << endl;
|
||||
|
|
|
@ -440,7 +440,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
|
|||
EIGEN_DEVICE_FUNC
|
||||
void lazyAssign(const TriangularBase<OtherDerived>& other);
|
||||
|
||||
/** \deprecated */
|
||||
/** @deprecated */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC
|
||||
void lazyAssign(const MatrixBase<OtherDerived>& other);
|
||||
|
@ -523,7 +523,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
|
|||
call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());
|
||||
}
|
||||
|
||||
/** \deprecated
|
||||
/** @deprecated
|
||||
* Shortcut for \code (*this).swap(other.triangularView<(*this)::Mode>()) \endcode */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
|
|
@ -5,8 +5,5 @@ install(FILES ${base_headers} DESTINATION include/gtsam/base)
|
|||
file(GLOB base_headers_tree "treeTraversal/*.h")
|
||||
install(FILES ${base_headers_tree} DESTINATION include/gtsam/base/treeTraversal)
|
||||
|
||||
file(GLOB deprecated_headers "deprecated/*.h")
|
||||
install(FILES ${deprecated_headers} DESTINATION include/gtsam/base/deprecated)
|
||||
|
||||
# Build tests
|
||||
add_subdirectory(tests)
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <boost/version.hpp>
|
||||
#if BOOST_VERSION >= 107400
|
||||
#include <boost/serialization/library_version_type.hpp>
|
||||
#endif
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/set.hpp>
|
||||
#include <gtsam/base/FastDefaultAllocator.h>
|
||||
|
|
|
@ -370,4 +370,4 @@ public:
|
|||
* the gtsam namespace to be more easily enforced as testable
|
||||
*/
|
||||
#define GTSAM_CONCEPT_LIE_INST(T) template class gtsam::IsLieGroup<T>;
|
||||
#define GTSAM_CONCEPT_LIE_TYPE(T) typedef gtsam::IsLieGroup<T> _gtsam_IsLieGroup_##T;
|
||||
#define GTSAM_CONCEPT_LIE_TYPE(T) using _gtsam_IsLieGroup_##T = gtsam::IsLieGroup<T>;
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LieMatrix.h
|
||||
* @brief External deprecation warning, see deprecated/LieMatrix.h for details
|
||||
* @author Paul Drews
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("LieMatrix.h is deprecated. Please use Eigen::Matrix instead.")
|
||||
#else
|
||||
#warning "LieMatrix.h is deprecated. Please use Eigen::Matrix instead."
|
||||
#endif
|
||||
|
||||
#include "gtsam/base/deprecated/LieMatrix.h"
|
|
@ -1,26 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LieVector.h
|
||||
* @brief Deprecation warning for LieVector, see deprecated/LieVector.h for details.
|
||||
* @author Paul Drews
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("LieVector.h is deprecated. Please use Eigen::Vector instead.")
|
||||
#else
|
||||
#warning "LieVector.h is deprecated. Please use Eigen::Vector instead."
|
||||
#endif
|
||||
|
||||
#include <gtsam/base/deprecated/LieVector.h>
|
|
@ -178,4 +178,4 @@ struct FixedDimension {
|
|||
// * the gtsam namespace to be more easily enforced as testable
|
||||
// */
|
||||
#define GTSAM_CONCEPT_MANIFOLD_INST(T) template class gtsam::IsManifold<T>;
|
||||
#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) typedef gtsam::IsManifold<T> _gtsam_IsManifold_##T;
|
||||
#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) using _gtsam_IsManifold_##T = gtsam::IsManifold<T>;
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
#include <boost/tokenizer.hpp>
|
||||
#include <boost/format.hpp>
|
||||
|
||||
#include <cstdarg>
|
||||
#include <cstring>
|
||||
|
|
|
@ -26,12 +26,9 @@
|
|||
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/config.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <functional>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
|
||||
#include <vector>
|
||||
|
||||
/**
|
||||
* Matrix is a typedef in the gtsam namespace
|
||||
|
@ -46,28 +43,28 @@ typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> M
|
|||
// Create handy typedefs and constants for square-size matrices
|
||||
// MatrixMN, MatrixN = MatrixNN, I_NxN, and Z_NxN, for M,N=1..9
|
||||
#define GTSAM_MAKE_MATRIX_DEFS(N) \
|
||||
typedef Eigen::Matrix<double, N, N> Matrix##N; \
|
||||
typedef Eigen::Matrix<double, 1, N> Matrix1##N; \
|
||||
typedef Eigen::Matrix<double, 2, N> Matrix2##N; \
|
||||
typedef Eigen::Matrix<double, 3, N> Matrix3##N; \
|
||||
typedef Eigen::Matrix<double, 4, N> Matrix4##N; \
|
||||
typedef Eigen::Matrix<double, 5, N> Matrix5##N; \
|
||||
typedef Eigen::Matrix<double, 6, N> Matrix6##N; \
|
||||
typedef Eigen::Matrix<double, 7, N> Matrix7##N; \
|
||||
typedef Eigen::Matrix<double, 8, N> Matrix8##N; \
|
||||
typedef Eigen::Matrix<double, 9, N> Matrix9##N; \
|
||||
using Matrix##N = Eigen::Matrix<double, N, N>; \
|
||||
using Matrix1##N = Eigen::Matrix<double, 1, N>; \
|
||||
using Matrix2##N = Eigen::Matrix<double, 2, N>; \
|
||||
using Matrix3##N = Eigen::Matrix<double, 3, N>; \
|
||||
using Matrix4##N = Eigen::Matrix<double, 4, N>; \
|
||||
using Matrix5##N = Eigen::Matrix<double, 5, N>; \
|
||||
using Matrix6##N = Eigen::Matrix<double, 6, N>; \
|
||||
using Matrix7##N = Eigen::Matrix<double, 7, N>; \
|
||||
using Matrix8##N = Eigen::Matrix<double, 8, N>; \
|
||||
using Matrix9##N = Eigen::Matrix<double, 9, N>; \
|
||||
static const Eigen::MatrixBase<Matrix##N>::IdentityReturnType I_##N##x##N = Matrix##N::Identity(); \
|
||||
static const Eigen::MatrixBase<Matrix##N>::ConstantReturnType Z_##N##x##N = Matrix##N::Zero();
|
||||
|
||||
GTSAM_MAKE_MATRIX_DEFS(1);
|
||||
GTSAM_MAKE_MATRIX_DEFS(2);
|
||||
GTSAM_MAKE_MATRIX_DEFS(3);
|
||||
GTSAM_MAKE_MATRIX_DEFS(4);
|
||||
GTSAM_MAKE_MATRIX_DEFS(5);
|
||||
GTSAM_MAKE_MATRIX_DEFS(6);
|
||||
GTSAM_MAKE_MATRIX_DEFS(7);
|
||||
GTSAM_MAKE_MATRIX_DEFS(8);
|
||||
GTSAM_MAKE_MATRIX_DEFS(9);
|
||||
GTSAM_MAKE_MATRIX_DEFS(1)
|
||||
GTSAM_MAKE_MATRIX_DEFS(2)
|
||||
GTSAM_MAKE_MATRIX_DEFS(3)
|
||||
GTSAM_MAKE_MATRIX_DEFS(4)
|
||||
GTSAM_MAKE_MATRIX_DEFS(5)
|
||||
GTSAM_MAKE_MATRIX_DEFS(6)
|
||||
GTSAM_MAKE_MATRIX_DEFS(7)
|
||||
GTSAM_MAKE_MATRIX_DEFS(8)
|
||||
GTSAM_MAKE_MATRIX_DEFS(9)
|
||||
|
||||
// Matrix expressions for accessing parts of matrices
|
||||
typedef Eigen::Block<Matrix> SubMatrix;
|
||||
|
@ -524,81 +521,3 @@ GTSAM_EXPORT Matrix RtR(const Matrix& A);
|
|||
|
||||
GTSAM_EXPORT Vector columnNormSquare(const Matrix &A);
|
||||
} // namespace gtsam
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
/**
|
||||
* Ref. https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
|
||||
*
|
||||
* Eigen supports calling resize() on both static and dynamic matrices.
|
||||
* This allows for a uniform API, with resize having no effect if the static matrix
|
||||
* is already the correct size.
|
||||
* https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
|
||||
*
|
||||
* We use all the Matrix template parameters to ensure wide compatibility.
|
||||
*
|
||||
* eigen_typekit in ROS uses the same code
|
||||
* http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
|
||||
*/
|
||||
|
||||
// split version - sends sizes ahead
|
||||
template<class Archive,
|
||||
typename Scalar_,
|
||||
int Rows_,
|
||||
int Cols_,
|
||||
int Ops_,
|
||||
int MaxRows_,
|
||||
int MaxCols_>
|
||||
void save(Archive & ar,
|
||||
const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||
const unsigned int /*version*/) {
|
||||
const size_t rows = m.rows(), cols = m.cols();
|
||||
ar << BOOST_SERIALIZATION_NVP(rows);
|
||||
ar << BOOST_SERIALIZATION_NVP(cols);
|
||||
ar << make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
template<class Archive,
|
||||
typename Scalar_,
|
||||
int Rows_,
|
||||
int Cols_,
|
||||
int Ops_,
|
||||
int MaxRows_,
|
||||
int MaxCols_>
|
||||
void load(Archive & ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||
const unsigned int /*version*/) {
|
||||
size_t rows, cols;
|
||||
ar >> BOOST_SERIALIZATION_NVP(rows);
|
||||
ar >> BOOST_SERIALIZATION_NVP(cols);
|
||||
m.resize(rows, cols);
|
||||
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
// templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
|
||||
template<class Archive,
|
||||
typename Scalar_,
|
||||
int Rows_,
|
||||
int Cols_,
|
||||
int Ops_,
|
||||
int MaxRows_,
|
||||
int MaxCols_>
|
||||
void serialize(Archive & ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||
const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
// specialized to Matrix for MATLAB wrapper
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file MatrixSerialization.h
|
||||
* @brief Serialization for matrices
|
||||
* @author Frank Dellaert
|
||||
* @date February 2022
|
||||
*/
|
||||
|
||||
// \callgraph
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
/**
|
||||
* Ref.
|
||||
* https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
|
||||
*
|
||||
* Eigen supports calling resize() on both static and dynamic matrices.
|
||||
* This allows for a uniform API, with resize having no effect if the static
|
||||
* matrix is already the correct size.
|
||||
* https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
|
||||
*
|
||||
* We use all the Matrix template parameters to ensure wide compatibility.
|
||||
*
|
||||
* eigen_typekit in ROS uses the same code
|
||||
* http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
|
||||
*/
|
||||
|
||||
// split version - sends sizes ahead
|
||||
template <class Archive, typename Scalar_, int Rows_, int Cols_, int Ops_,
|
||||
int MaxRows_, int MaxCols_>
|
||||
void save(
|
||||
Archive& ar,
|
||||
const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_>& m,
|
||||
const unsigned int /*version*/) {
|
||||
const size_t rows = m.rows(), cols = m.cols();
|
||||
ar << BOOST_SERIALIZATION_NVP(rows);
|
||||
ar << BOOST_SERIALIZATION_NVP(cols);
|
||||
ar << make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
template <class Archive, typename Scalar_, int Rows_, int Cols_, int Ops_,
|
||||
int MaxRows_, int MaxCols_>
|
||||
void load(Archive& ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_>& m,
|
||||
const unsigned int /*version*/) {
|
||||
size_t rows, cols;
|
||||
ar >> BOOST_SERIALIZATION_NVP(rows);
|
||||
ar >> BOOST_SERIALIZATION_NVP(cols);
|
||||
m.resize(rows, cols);
|
||||
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
// templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
|
||||
template <class Archive, typename Scalar_, int Rows_, int Cols_, int Ops_,
|
||||
int MaxRows_, int MaxCols_>
|
||||
void serialize(
|
||||
Archive& ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_>& m,
|
||||
const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
// specialized to Matrix for MATLAB wrapper
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
|
@ -20,6 +20,8 @@
|
|||
#pragma once
|
||||
#include <gtsam/config.h> // Configuration from CMake
|
||||
#include <Eigen/Dense>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
#ifndef OPTIONALJACOBIAN_NOBOOST
|
||||
#include <boost/optional.hpp>
|
||||
|
@ -89,6 +91,31 @@ public:
|
|||
usurp(dynamic.data());
|
||||
}
|
||||
|
||||
/// Constructor that will resize a dynamic matrix (unless already correct)
|
||||
OptionalJacobian(Eigen::MatrixXd* dynamic) :
|
||||
map_(nullptr) {
|
||||
dynamic->resize(Rows, Cols); // no malloc if correct size
|
||||
usurp(dynamic->data());
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Constructor from an Eigen::Ref *value*. Will not usurp if dimension is wrong
|
||||
* @note This is important so we don't overwrite someone else's memory!
|
||||
*/
|
||||
template<class MATRIX>
|
||||
OptionalJacobian(Eigen::Ref<MATRIX> dynamic_ref) :
|
||||
map_(nullptr) {
|
||||
if (dynamic_ref.rows() == Rows && dynamic_ref.cols() == Cols && !dynamic_ref.IsRowMajor) {
|
||||
usurp(dynamic_ref.data());
|
||||
} else {
|
||||
throw std::invalid_argument(
|
||||
std::string("OptionalJacobian called with wrong dimensions or "
|
||||
"storage order.\n"
|
||||
"Expected: ") +
|
||||
"(" + std::to_string(Rows) + ", " + std::to_string(Cols) + ")");
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef OPTIONALJACOBIAN_NOBOOST
|
||||
|
||||
/// Constructor with boost::none just makes empty
|
||||
|
|
|
@ -173,4 +173,4 @@ namespace gtsam {
|
|||
* @deprecated please use BOOST_CONCEPT_ASSERT and
|
||||
*/
|
||||
#define GTSAM_CONCEPT_TESTABLE_INST(T) template class gtsam::IsTestable<T>;
|
||||
#define GTSAM_CONCEPT_TESTABLE_TYPE(T) typedef gtsam::IsTestable<T> _gtsam_Testable_##T;
|
||||
#define GTSAM_CONCEPT_TESTABLE_TYPE(T) using _gtsam_Testable_##T = gtsam::IsTestable<T>;
|
||||
|
|
|
@ -80,12 +80,13 @@ bool assert_equal(const V& expected, const boost::optional<const V&>& actual, do
|
|||
return assert_equal(expected, *actual, tol);
|
||||
}
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
/**
|
||||
* Version of assert_equals to work with vectors
|
||||
* \deprecated: use container equals instead
|
||||
* @deprecated: use container equals instead
|
||||
*/
|
||||
template<class V>
|
||||
bool assert_equal(const std::vector<V>& expected, const std::vector<V>& actual, double tol = 1e-9) {
|
||||
bool GTSAM_DEPRECATED assert_equal(const std::vector<V>& expected, const std::vector<V>& actual, double tol = 1e-9) {
|
||||
bool match = true;
|
||||
if (expected.size() != actual.size())
|
||||
match = false;
|
||||
|
@ -108,6 +109,7 @@ bool assert_equal(const std::vector<V>& expected, const std::vector<V>& actual,
|
|||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Function for comparing maps of testable->testable
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <gtsam/config.h> // Configuration from CMake
|
||||
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <memory>
|
||||
|
||||
|
|
|
@ -48,19 +48,19 @@ static const Eigen::MatrixBase<Vector3>::ConstantReturnType Z_3x1 = Vector3::Zer
|
|||
// Create handy typedefs and constants for vectors with N>3
|
||||
// VectorN and Z_Nx1, for N=1..9
|
||||
#define GTSAM_MAKE_VECTOR_DEFS(N) \
|
||||
typedef Eigen::Matrix<double, N, 1> Vector##N; \
|
||||
using Vector##N = Eigen::Matrix<double, N, 1>; \
|
||||
static const Eigen::MatrixBase<Vector##N>::ConstantReturnType Z_##N##x1 = Vector##N::Zero();
|
||||
|
||||
GTSAM_MAKE_VECTOR_DEFS(4);
|
||||
GTSAM_MAKE_VECTOR_DEFS(5);
|
||||
GTSAM_MAKE_VECTOR_DEFS(6);
|
||||
GTSAM_MAKE_VECTOR_DEFS(7);
|
||||
GTSAM_MAKE_VECTOR_DEFS(8);
|
||||
GTSAM_MAKE_VECTOR_DEFS(9);
|
||||
GTSAM_MAKE_VECTOR_DEFS(10);
|
||||
GTSAM_MAKE_VECTOR_DEFS(11);
|
||||
GTSAM_MAKE_VECTOR_DEFS(12);
|
||||
GTSAM_MAKE_VECTOR_DEFS(15);
|
||||
GTSAM_MAKE_VECTOR_DEFS(4)
|
||||
GTSAM_MAKE_VECTOR_DEFS(5)
|
||||
GTSAM_MAKE_VECTOR_DEFS(6)
|
||||
GTSAM_MAKE_VECTOR_DEFS(7)
|
||||
GTSAM_MAKE_VECTOR_DEFS(8)
|
||||
GTSAM_MAKE_VECTOR_DEFS(9)
|
||||
GTSAM_MAKE_VECTOR_DEFS(10)
|
||||
GTSAM_MAKE_VECTOR_DEFS(11)
|
||||
GTSAM_MAKE_VECTOR_DEFS(12)
|
||||
GTSAM_MAKE_VECTOR_DEFS(15)
|
||||
|
||||
typedef Eigen::VectorBlock<Vector> SubVector;
|
||||
typedef Eigen::VectorBlock<const Vector> ConstSubVector;
|
||||
|
@ -204,18 +204,19 @@ inline double inner_prod(const V1 &a, const V2& b) {
|
|||
return a.dot(b);
|
||||
}
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
/**
|
||||
* BLAS Level 1 scal: x <- alpha*x
|
||||
* \deprecated: use operators instead
|
||||
* @deprecated: use operators instead
|
||||
*/
|
||||
inline void scal(double alpha, Vector& x) { x *= alpha; }
|
||||
inline void GTSAM_DEPRECATED scal(double alpha, Vector& x) { x *= alpha; }
|
||||
|
||||
/**
|
||||
* BLAS Level 1 axpy: y <- alpha*x + y
|
||||
* \deprecated: use operators instead
|
||||
* @deprecated: use operators instead
|
||||
*/
|
||||
template<class V1, class V2>
|
||||
inline void axpy(double alpha, const V1& x, V2& y) {
|
||||
inline void GTSAM_DEPRECATED axpy(double alpha, const V1& x, V2& y) {
|
||||
assert (y.size()==x.size());
|
||||
y += alpha * x;
|
||||
}
|
||||
|
@ -223,6 +224,7 @@ inline void axpy(double alpha, const Vector& x, SubVector y) {
|
|||
assert (y.size()==x.size());
|
||||
y += alpha * x;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* house(x,j) computes HouseHolder vector v and scaling factor beta
|
||||
|
@ -264,45 +266,3 @@ GTSAM_EXPORT Vector concatVectors(const std::list<Vector>& vs);
|
|||
*/
|
||||
GTSAM_EXPORT Vector concatVectors(size_t nrVectors, ...);
|
||||
} // namespace gtsam
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
// split version - copies into an STL vector for serialization
|
||||
template<class Archive>
|
||||
void save(Archive & ar, const gtsam::Vector & v, unsigned int /*version*/) {
|
||||
const size_t size = v.size();
|
||||
ar << BOOST_SERIALIZATION_NVP(size);
|
||||
ar << make_nvp("data", make_array(v.data(), v.size()));
|
||||
}
|
||||
|
||||
template<class Archive>
|
||||
void load(Archive & ar, gtsam::Vector & v, unsigned int /*version*/) {
|
||||
size_t size;
|
||||
ar >> BOOST_SERIALIZATION_NVP(size);
|
||||
v.resize(size);
|
||||
ar >> make_nvp("data", make_array(v.data(), v.size()));
|
||||
}
|
||||
|
||||
// split version - copies into an STL vector for serialization
|
||||
template<class Archive, int D>
|
||||
void save(Archive & ar, const Eigen::Matrix<double,D,1> & v, unsigned int /*version*/) {
|
||||
ar << make_nvp("data", make_array(v.data(), v.RowsAtCompileTime));
|
||||
}
|
||||
|
||||
template<class Archive, int D>
|
||||
void load(Archive & ar, Eigen::Matrix<double,D,1> & v, unsigned int /*version*/) {
|
||||
ar >> make_nvp("data", make_array(v.data(), v.RowsAtCompileTime));
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
||||
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector2)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector3)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector6)
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file VectorSerialization.h
|
||||
* @brief serialization for Vectors
|
||||
* @author Frank Dellaert
|
||||
* @date February 2022
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Vector.h>
|
||||
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
// split version - copies into an STL vector for serialization
|
||||
template <class Archive>
|
||||
void save(Archive& ar, const gtsam::Vector& v, unsigned int /*version*/) {
|
||||
const size_t size = v.size();
|
||||
ar << BOOST_SERIALIZATION_NVP(size);
|
||||
ar << make_nvp("data", make_array(v.data(), v.size()));
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void load(Archive& ar, gtsam::Vector& v, unsigned int /*version*/) {
|
||||
size_t size;
|
||||
ar >> BOOST_SERIALIZATION_NVP(size);
|
||||
v.resize(size);
|
||||
ar >> make_nvp("data", make_array(v.data(), v.size()));
|
||||
}
|
||||
|
||||
// split version - copies into an STL vector for serialization
|
||||
template <class Archive, int D>
|
||||
void save(Archive& ar, const Eigen::Matrix<double, D, 1>& v,
|
||||
unsigned int /*version*/) {
|
||||
ar << make_nvp("data", make_array(v.data(), v.RowsAtCompileTime));
|
||||
}
|
||||
|
||||
template <class Archive, int D>
|
||||
void load(Archive& ar, Eigen::Matrix<double, D, 1>& v,
|
||||
unsigned int /*version*/) {
|
||||
ar >> make_nvp("data", make_array(v.data(), v.RowsAtCompileTime));
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
||||
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector2)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector3)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector6)
|
|
@ -18,6 +18,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/MatrixSerialization.h>
|
||||
#include <gtsam/base/FastVector.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
|
|
@ -38,7 +38,7 @@ class DSFMap {
|
|||
DSFMap();
|
||||
KEY find(const KEY& key) const;
|
||||
void merge(const KEY& x, const KEY& y);
|
||||
std::map<KEY, Set> sets();
|
||||
std::map<KEY, This::Set> sets();
|
||||
};
|
||||
|
||||
class IndexPairSet {
|
||||
|
@ -82,6 +82,7 @@ class IndexPairSetMap {
|
|||
};
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/MatrixSerialization.h>
|
||||
bool linear_independent(Matrix A, Matrix B, double tol);
|
||||
|
||||
#include <gtsam/base/Value.h>
|
||||
|
|
|
@ -32,7 +32,7 @@ void testDefaultChart(TestResult& result_,
|
|||
const std::string& name_,
|
||||
const T& value) {
|
||||
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(T);
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(T)
|
||||
|
||||
typedef typename gtsam::DefaultChart<T> Chart;
|
||||
typedef typename Chart::vector Vector;
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
|
|
@ -1,152 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LieMatrix.h
|
||||
* @brief A wrapper around Matrix providing Lie compatibility
|
||||
* @author Richard Roberts and Alex Cunningham
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdarg>
|
||||
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @deprecated: LieMatrix, LieVector and LieMatrix are obsolete in GTSAM 4.0 as
|
||||
* we can directly add double, Vector, and Matrix into values now, because of
|
||||
* gtsam::traits.
|
||||
*/
|
||||
struct LieMatrix : public Matrix {
|
||||
|
||||
/// @name Constructors
|
||||
/// @{
|
||||
enum { dimension = Eigen::Dynamic };
|
||||
|
||||
/** default constructor - only for serialize */
|
||||
LieMatrix() {}
|
||||
|
||||
/** initialize from a normal matrix */
|
||||
LieMatrix(const Matrix& v) : Matrix(v) {}
|
||||
|
||||
template <class M>
|
||||
LieMatrix(const M& v) : Matrix(v) {}
|
||||
|
||||
// Currently TMP constructor causes ICE on MSVS 2013
|
||||
#if (_MSC_VER < 1800)
|
||||
/** initialize from a fixed size normal vector */
|
||||
template<int M, int N>
|
||||
LieMatrix(const Eigen::Matrix<double, M, N>& v) : Matrix(v) {}
|
||||
#endif
|
||||
|
||||
/** constructor with size and initial data, row order ! */
|
||||
LieMatrix(size_t m, size_t n, const double* const data) :
|
||||
Matrix(Eigen::Map<const Matrix>(data, m, n)) {}
|
||||
|
||||
/// @}
|
||||
/// @name Testable interface
|
||||
/// @{
|
||||
|
||||
/** print @param s optional string naming the object */
|
||||
void print(const std::string& name = "") const {
|
||||
gtsam::print(matrix(), name);
|
||||
}
|
||||
/** equality up to tolerance */
|
||||
inline bool equals(const LieMatrix& expected, double tol=1e-5) const {
|
||||
return gtsam::equal_with_abs_tol(matrix(), expected.matrix(), tol);
|
||||
}
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/** get the underlying matrix */
|
||||
inline Matrix matrix() const {
|
||||
return static_cast<Matrix>(*this);
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Group
|
||||
/// @{
|
||||
LieMatrix compose(const LieMatrix& q) { return (*this)+q;}
|
||||
LieMatrix between(const LieMatrix& q) { return q-(*this);}
|
||||
LieMatrix inverse() { return -(*this);}
|
||||
/// @}
|
||||
|
||||
/// @name Manifold
|
||||
/// @{
|
||||
Vector localCoordinates(const LieMatrix& q) { return between(q).vector();}
|
||||
LieMatrix retract(const Vector& v) {return compose(LieMatrix(v));}
|
||||
/// @}
|
||||
|
||||
/// @name Lie Group
|
||||
/// @{
|
||||
static Vector Logmap(const LieMatrix& p) {return p.vector();}
|
||||
static LieMatrix Expmap(const Vector& v) { return LieMatrix(v);}
|
||||
/// @}
|
||||
|
||||
/// @name VectorSpace requirements
|
||||
/// @{
|
||||
|
||||
/** Returns dimensionality of the tangent space */
|
||||
inline size_t dim() const { return size(); }
|
||||
|
||||
/** Convert to vector, is done row-wise - TODO why? */
|
||||
inline Vector vector() const {
|
||||
Vector result(size());
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic,
|
||||
Eigen::RowMajor> RowMajor;
|
||||
Eigen::Map<RowMajor>(&result(0), rows(), cols()) = *this;
|
||||
return result;
|
||||
}
|
||||
|
||||
/** identity - NOTE: no known size at compile time - so zero length */
|
||||
inline static LieMatrix identity() {
|
||||
throw std::runtime_error("LieMatrix::identity(): Don't use this function");
|
||||
return LieMatrix();
|
||||
}
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
||||
// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template<class Archive>
|
||||
void serialize(Archive & ar, const unsigned int /*version*/) {
|
||||
ar & boost::serialization::make_nvp("Matrix",
|
||||
boost::serialization::base_object<Matrix>(*this));
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
template<>
|
||||
struct traits<LieMatrix> : public internal::VectorSpace<LieMatrix> {
|
||||
|
||||
// Override Retract, as the default version does not know how to initialize
|
||||
static LieMatrix Retract(const LieMatrix& origin, const TangentVector& v,
|
||||
ChartJacobian H1 = boost::none, ChartJacobian H2 = boost::none) {
|
||||
if (H1) *H1 = Eye(origin);
|
||||
if (H2) *H2 = Eye(origin);
|
||||
typedef const Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic,
|
||||
Eigen::RowMajor> RowMajor;
|
||||
return origin + Eigen::Map<RowMajor>(&v(0), origin.rows(), origin.cols());
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
} // \namespace gtsam
|
|
@ -1,88 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LieScalar.h
|
||||
* @brief A wrapper around scalar providing Lie compatibility
|
||||
* @author Kai Ni
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/dllexport.h>
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
#include <iostream>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @deprecated: LieScalar, LieVector and LieMatrix are obsolete in GTSAM 4.0 as
|
||||
* we can directly add double, Vector, and Matrix into values now, because of
|
||||
* gtsam::traits.
|
||||
*/
|
||||
struct LieScalar {
|
||||
|
||||
enum { dimension = 1 };
|
||||
|
||||
/** default constructor */
|
||||
LieScalar() : d_(0.0) {}
|
||||
|
||||
/** wrap a double */
|
||||
/*explicit*/ LieScalar(double d) : d_(d) {}
|
||||
|
||||
/** access the underlying value */
|
||||
double value() const { return d_; }
|
||||
|
||||
/** Automatic conversion to underlying value */
|
||||
operator double() const { return d_; }
|
||||
|
||||
/** convert vector */
|
||||
Vector1 vector() const { Vector1 v; v<<d_; return v; }
|
||||
|
||||
/// @name Testable
|
||||
/// @{
|
||||
void print(const std::string& name = "") const {
|
||||
std::cout << name << ": " << d_ << std::endl;
|
||||
}
|
||||
bool equals(const LieScalar& expected, double tol = 1e-5) const {
|
||||
return std::abs(expected.d_ - d_) <= tol;
|
||||
}
|
||||
/// @}
|
||||
|
||||
/// @name Group
|
||||
/// @{
|
||||
static LieScalar identity() { return LieScalar(0);}
|
||||
LieScalar compose(const LieScalar& q) { return (*this)+q;}
|
||||
LieScalar between(const LieScalar& q) { return q-(*this);}
|
||||
LieScalar inverse() { return -(*this);}
|
||||
/// @}
|
||||
|
||||
/// @name Manifold
|
||||
/// @{
|
||||
size_t dim() const { return 1; }
|
||||
Vector1 localCoordinates(const LieScalar& q) { return between(q).vector();}
|
||||
LieScalar retract(const Vector1& v) {return compose(LieScalar(v[0]));}
|
||||
/// @}
|
||||
|
||||
/// @name Lie Group
|
||||
/// @{
|
||||
static Vector1 Logmap(const LieScalar& p) { return p.vector();}
|
||||
static LieScalar Expmap(const Vector1& v) { return LieScalar(v[0]);}
|
||||
/// @}
|
||||
|
||||
private:
|
||||
double d_;
|
||||
};
|
||||
|
||||
template<>
|
||||
struct traits<LieScalar> : public internal::ScalarTraits<LieScalar> {};
|
||||
|
||||
} // \namespace gtsam
|
|
@ -1,121 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LieVector.h
|
||||
* @brief A wrapper around vector providing Lie compatibility
|
||||
* @author Alex Cunningham
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
#include <cstdarg>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @deprecated: LieVector, LieVector and LieMatrix are obsolete in GTSAM 4.0 as
|
||||
* we can directly add double, Vector, and Matrix into values now, because of
|
||||
* gtsam::traits.
|
||||
*/
|
||||
struct LieVector : public Vector {
|
||||
|
||||
enum { dimension = Eigen::Dynamic };
|
||||
|
||||
/** default constructor - should be unnecessary */
|
||||
LieVector() {}
|
||||
|
||||
/** initialize from a normal vector */
|
||||
LieVector(const Vector& v) : Vector(v) {}
|
||||
|
||||
template <class V>
|
||||
LieVector(const V& v) : Vector(v) {}
|
||||
|
||||
// Currently TMP constructor causes ICE on MSVS 2013
|
||||
#if (_MSC_VER < 1800)
|
||||
/** initialize from a fixed size normal vector */
|
||||
template<int N>
|
||||
LieVector(const Eigen::Matrix<double, N, 1>& v) : Vector(v) {}
|
||||
#endif
|
||||
|
||||
/** wrap a double */
|
||||
LieVector(double d) : Vector((Vector(1) << d).finished()) {}
|
||||
|
||||
/** constructor with size and initial data, row order ! */
|
||||
LieVector(size_t m, const double* const data) : Vector(m) {
|
||||
for (size_t i = 0; i < m; i++) (*this)(i) = data[i];
|
||||
}
|
||||
|
||||
/// @name Testable
|
||||
/// @{
|
||||
void print(const std::string& name="") const {
|
||||
gtsam::print(vector(), name);
|
||||
}
|
||||
bool equals(const LieVector& expected, double tol=1e-5) const {
|
||||
return gtsam::equal(vector(), expected.vector(), tol);
|
||||
}
|
||||
/// @}
|
||||
|
||||
/// @name Group
|
||||
/// @{
|
||||
LieVector compose(const LieVector& q) { return (*this)+q;}
|
||||
LieVector between(const LieVector& q) { return q-(*this);}
|
||||
LieVector inverse() { return -(*this);}
|
||||
/// @}
|
||||
|
||||
/// @name Manifold
|
||||
/// @{
|
||||
Vector localCoordinates(const LieVector& q) { return between(q).vector();}
|
||||
LieVector retract(const Vector& v) {return compose(LieVector(v));}
|
||||
/// @}
|
||||
|
||||
/// @name Lie Group
|
||||
/// @{
|
||||
static Vector Logmap(const LieVector& p) {return p.vector();}
|
||||
static LieVector Expmap(const Vector& v) { return LieVector(v);}
|
||||
/// @}
|
||||
|
||||
/// @name VectorSpace requirements
|
||||
/// @{
|
||||
|
||||
/** get the underlying vector */
|
||||
Vector vector() const {
|
||||
return static_cast<Vector>(*this);
|
||||
}
|
||||
|
||||
/** Returns dimensionality of the tangent space */
|
||||
size_t dim() const { return this->size(); }
|
||||
|
||||
/** identity - NOTE: no known size at compile time - so zero length */
|
||||
static LieVector identity() {
|
||||
throw std::runtime_error("LieVector::identity(): Don't use this function");
|
||||
return LieVector();
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
||||
// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template<class Archive>
|
||||
void serialize(Archive & ar, const unsigned int /*version*/) {
|
||||
ar & boost::serialization::make_nvp("Vector",
|
||||
boost::serialization::base_object<Vector>(*this));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<>
|
||||
struct traits<LieVector> : public internal::VectorSpace<LieVector> {};
|
||||
|
||||
} // \namespace gtsam
|
|
@ -19,11 +19,13 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <sstream>
|
||||
#include <Eigen/Core>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
// includes for standard serialization types
|
||||
#include <boost/serialization/version.hpp>
|
||||
#include <boost/serialization/optional.hpp>
|
||||
#include <boost/serialization/shared_ptr.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
@ -40,6 +42,17 @@
|
|||
#include <boost/archive/binary_oarchive.hpp>
|
||||
#include <boost/serialization/export.hpp>
|
||||
|
||||
// Workaround a bug in GCC >= 7 and C++17
|
||||
// ref. https://gitlab.com/libeigen/eigen/-/issues/1676
|
||||
#ifdef __GNUC__
|
||||
#if __GNUC__ >= 7 && __cplusplus >= 201703L
|
||||
namespace boost { namespace serialization { struct U; } }
|
||||
namespace Eigen { namespace internal {
|
||||
template<> struct traits<boost::serialization::U> {enum {Flags=0};};
|
||||
} }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/** @name Standard serialization
|
||||
|
|
|
@ -42,7 +42,7 @@ T create() {
|
|||
}
|
||||
|
||||
// Creates or empties a folder in the build folder and returns the relative path
|
||||
boost::filesystem::path resetFilesystem(
|
||||
inline boost::filesystem::path resetFilesystem(
|
||||
boost::filesystem::path folder = "actual") {
|
||||
boost::filesystem::remove_all(folder);
|
||||
boost::filesystem::create_directory(folder);
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testLieMatrix.cpp
|
||||
* @author Richard Roberts
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/deprecated/LieMatrix.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/Manifold.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
GTSAM_CONCEPT_TESTABLE_INST(LieMatrix)
|
||||
GTSAM_CONCEPT_LIE_INST(LieMatrix)
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( LieMatrix, construction ) {
|
||||
Matrix m = (Matrix(2,2) << 1.0,2.0, 3.0,4.0).finished();
|
||||
LieMatrix lie1(m), lie2(m);
|
||||
|
||||
EXPECT(traits<LieMatrix>::GetDimension(m) == 4);
|
||||
EXPECT(assert_equal(m, lie1.matrix()));
|
||||
EXPECT(assert_equal(lie1, lie2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( LieMatrix, other_constructors ) {
|
||||
Matrix init = (Matrix(2,2) << 10.0,20.0, 30.0,40.0).finished();
|
||||
LieMatrix exp(init);
|
||||
double data[] = {10,30,20,40};
|
||||
LieMatrix b(2,2,data);
|
||||
EXPECT(assert_equal(exp, b));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(LieMatrix, retract) {
|
||||
LieMatrix init((Matrix(2,2) << 1.0,2.0,3.0,4.0).finished());
|
||||
Vector update = (Vector(4) << 3.0, 4.0, 6.0, 7.0).finished();
|
||||
|
||||
LieMatrix expected((Matrix(2,2) << 4.0, 6.0, 9.0, 11.0).finished());
|
||||
LieMatrix actual = traits<LieMatrix>::Retract(init,update);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
|
||||
Vector expectedUpdate = update;
|
||||
Vector actualUpdate = traits<LieMatrix>::Local(init,actual);
|
||||
|
||||
EXPECT(assert_equal(expectedUpdate, actualUpdate));
|
||||
|
||||
Vector expectedLogmap = (Vector(4) << 1, 2, 3, 4).finished();
|
||||
Vector actualLogmap = traits<LieMatrix>::Logmap(LieMatrix((Matrix(2,2) << 1.0, 2.0, 3.0, 4.0).finished()));
|
||||
EXPECT(assert_equal(expectedLogmap, actualLogmap));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testLieScalar.cpp
|
||||
* @author Kai Ni
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/deprecated/LieScalar.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/Manifold.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
GTSAM_CONCEPT_TESTABLE_INST(LieScalar)
|
||||
GTSAM_CONCEPT_LIE_INST(LieScalar)
|
||||
|
||||
const double tol=1e-9;
|
||||
|
||||
//******************************************************************************
|
||||
TEST(LieScalar , Concept) {
|
||||
BOOST_CONCEPT_ASSERT((IsGroup<LieScalar>));
|
||||
BOOST_CONCEPT_ASSERT((IsManifold<LieScalar>));
|
||||
BOOST_CONCEPT_ASSERT((IsLieGroup<LieScalar>));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(LieScalar , Invariants) {
|
||||
LieScalar lie1(2), lie2(3);
|
||||
CHECK(check_group_invariants(lie1, lie2));
|
||||
CHECK(check_manifold_invariants(lie1, lie2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( testLieScalar, construction ) {
|
||||
double d = 2.;
|
||||
LieScalar lie1(d), lie2(d);
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(2., lie1.value(),tol);
|
||||
EXPECT_DOUBLES_EQUAL(2., lie2.value(),tol);
|
||||
EXPECT(traits<LieScalar>::dimension == 1);
|
||||
EXPECT(assert_equal(lie1, lie2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( testLieScalar, localCoordinates ) {
|
||||
LieScalar lie1(1.), lie2(3.);
|
||||
|
||||
Vector1 actual = traits<LieScalar>::Local(lie1, lie2);
|
||||
EXPECT( assert_equal((Vector)(Vector(1) << 2).finished(), actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||
/* ************************************************************************* */
|
|
@ -1,66 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testLieVector.cpp
|
||||
* @author Alex Cunningham
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/deprecated/LieVector.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/Manifold.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
GTSAM_CONCEPT_TESTABLE_INST(LieVector)
|
||||
GTSAM_CONCEPT_LIE_INST(LieVector)
|
||||
|
||||
//******************************************************************************
|
||||
TEST(LieVector , Concept) {
|
||||
BOOST_CONCEPT_ASSERT((IsGroup<LieVector>));
|
||||
BOOST_CONCEPT_ASSERT((IsManifold<LieVector>));
|
||||
BOOST_CONCEPT_ASSERT((IsLieGroup<LieVector>));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(LieVector , Invariants) {
|
||||
Vector v = Vector3(1.0, 2.0, 3.0);
|
||||
LieVector lie1(v), lie2(v);
|
||||
check_manifold_invariants(lie1, lie2);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( testLieVector, construction ) {
|
||||
Vector v = Vector3(1.0, 2.0, 3.0);
|
||||
LieVector lie1(v), lie2(v);
|
||||
|
||||
EXPECT(lie1.dim() == 3);
|
||||
EXPECT(assert_equal(v, lie1.vector()));
|
||||
EXPECT(assert_equal(lie1, lie2));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( testLieVector, other_constructors ) {
|
||||
Vector init = Vector2(10.0, 20.0);
|
||||
LieVector exp(init);
|
||||
double data[] = { 10, 20 };
|
||||
LieVector b(2, data);
|
||||
EXPECT(assert_equal(exp, b));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -173,7 +173,7 @@ TEST(Matrix, stack )
|
|||
{
|
||||
Matrix A = (Matrix(2, 2) << -5.0, 3.0, 00.0, -5.0).finished();
|
||||
Matrix B = (Matrix(3, 2) << -0.5, 2.1, 1.1, 3.4, 2.6, 7.1).finished();
|
||||
Matrix AB = stack(2, &A, &B);
|
||||
Matrix AB = gtsam::stack(2, &A, &B);
|
||||
Matrix C(5, 2);
|
||||
for (int i = 0; i < 2; i++)
|
||||
for (int j = 0; j < 2; j++)
|
||||
|
@ -187,7 +187,7 @@ TEST(Matrix, stack )
|
|||
std::vector<gtsam::Matrix> matrices;
|
||||
matrices.push_back(A);
|
||||
matrices.push_back(B);
|
||||
Matrix AB2 = stack(matrices);
|
||||
Matrix AB2 = gtsam::stack(matrices);
|
||||
EQUALITY(C,AB2);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,40 +24,33 @@ using namespace std;
|
|||
using namespace gtsam;
|
||||
|
||||
//******************************************************************************
|
||||
#define TEST_CONSTRUCTOR(DIM1, DIM2, X, TRUTHY) \
|
||||
{ \
|
||||
OptionalJacobian<DIM1, DIM2> H(X); \
|
||||
EXPECT(H == TRUTHY); \
|
||||
}
|
||||
TEST( OptionalJacobian, Constructors ) {
|
||||
Matrix23 fixed;
|
||||
|
||||
OptionalJacobian<2, 3> H1;
|
||||
EXPECT(!H1);
|
||||
|
||||
OptionalJacobian<2, 3> H2(fixed);
|
||||
EXPECT(H2);
|
||||
|
||||
OptionalJacobian<2, 3> H3(&fixed);
|
||||
EXPECT(H3);
|
||||
|
||||
Matrix dynamic;
|
||||
OptionalJacobian<2, 3> H4(dynamic);
|
||||
EXPECT(H4);
|
||||
|
||||
OptionalJacobian<2, 3> H5(boost::none);
|
||||
EXPECT(!H5);
|
||||
|
||||
boost::optional<Matrix&> optional(dynamic);
|
||||
OptionalJacobian<2, 3> H6(optional);
|
||||
EXPECT(H6);
|
||||
|
||||
OptionalJacobian<2, 3> H;
|
||||
EXPECT(!H);
|
||||
|
||||
TEST_CONSTRUCTOR(2, 3, fixed, true);
|
||||
TEST_CONSTRUCTOR(2, 3, &fixed, true);
|
||||
TEST_CONSTRUCTOR(2, 3, dynamic, true);
|
||||
TEST_CONSTRUCTOR(2, 3, &dynamic, true);
|
||||
TEST_CONSTRUCTOR(2, 3, boost::none, false);
|
||||
TEST_CONSTRUCTOR(2, 3, optional, true);
|
||||
|
||||
// Test dynamic
|
||||
OptionalJacobian<-1, -1> H7;
|
||||
EXPECT(!H7);
|
||||
|
||||
OptionalJacobian<-1, -1> H8(dynamic);
|
||||
EXPECT(H8);
|
||||
|
||||
OptionalJacobian<-1, -1> H9(boost::none);
|
||||
EXPECT(!H9);
|
||||
|
||||
OptionalJacobian<-1, -1> H10(optional);
|
||||
EXPECT(H10);
|
||||
TEST_CONSTRUCTOR(-1, -1, dynamic, true);
|
||||
TEST_CONSTRUCTOR(-1, -1, boost::none, false);
|
||||
TEST_CONSTRUCTOR(-1, -1, optional, true);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
@ -101,6 +94,25 @@ TEST( OptionalJacobian, Fixed) {
|
|||
dynamic2.setOnes();
|
||||
test(dynamic2);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic2));
|
||||
|
||||
{ // Dynamic pointer
|
||||
// Passing in an empty matrix means we want it resized
|
||||
Matrix dynamic0;
|
||||
test(&dynamic0);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic0));
|
||||
|
||||
// Dynamic wrong size
|
||||
Matrix dynamic1(3, 5);
|
||||
dynamic1.setOnes();
|
||||
test(&dynamic1);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic1));
|
||||
|
||||
// Dynamic right size
|
||||
Matrix dynamic2(2, 5);
|
||||
dynamic2.setOnes();
|
||||
test(&dynamic2);
|
||||
EXPECT(assert_equal(kTestMatrix, dynamic2));
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <gtsam/inference/Key.h>
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/MatrixSerialization.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/base/FastList.h>
|
||||
#include <gtsam/base/FastMap.h>
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testTestableAssertions
|
||||
* @author Alex Cunningham
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/deprecated/LieScalar.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( testTestableAssertions, optional ) {
|
||||
typedef boost::optional<LieScalar> OptionalScalar;
|
||||
LieScalar x(1.0);
|
||||
OptionalScalar ox(x), dummy = boost::none;
|
||||
EXPECT(assert_equal(ox, ox));
|
||||
EXPECT(assert_equal(x, ox));
|
||||
EXPECT(assert_equal(dummy, dummy));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||
/* ************************************************************************* */
|
|
@ -220,8 +220,8 @@ TEST(Vector, axpy )
|
|||
Vector x = Vector3(10., 20., 30.);
|
||||
Vector y0 = Vector3(2.0, 5.0, 6.0);
|
||||
Vector y1 = y0, y2 = y0;
|
||||
axpy(0.1,x,y1);
|
||||
axpy(0.1,x,y2.head(3));
|
||||
y1 += 0.1 * x;
|
||||
y2.head(3) += 0.1 * x;
|
||||
Vector expected = Vector3(3.0, 7.0, 9.0);
|
||||
EXPECT(assert_equal(expected,y1));
|
||||
EXPECT(assert_equal(expected,Vector(y2)));
|
||||
|
|
|
@ -158,9 +158,8 @@ void DepthFirstForestParallel(FOREST& forest, DATA& rootData,
|
|||
// Typedefs
|
||||
typedef typename FOREST::Node Node;
|
||||
|
||||
tbb::task::spawn_root_and_wait(
|
||||
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
|
||||
visitorPost, problemSizeThreshold));
|
||||
visitorPost, problemSizeThreshold);
|
||||
#else
|
||||
DepthFirstForest(forest, rootData, visitorPre, visitorPost);
|
||||
#endif
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include <boost/make_shared.hpp>
|
||||
|
||||
#ifdef GTSAM_USE_TBB
|
||||
#include <tbb/task.h> // tbb::task, tbb::task_list
|
||||
#include <tbb/task_group.h> // tbb::task_group
|
||||
#include <tbb/scalable_allocator.h> // tbb::scalable_allocator
|
||||
|
||||
namespace gtsam {
|
||||
|
@ -34,7 +34,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
class PreOrderTask : public tbb::task
|
||||
class PreOrderTask
|
||||
{
|
||||
public:
|
||||
const boost::shared_ptr<NODE>& treeNode;
|
||||
|
@ -42,28 +42,30 @@ namespace gtsam {
|
|||
VISITOR_PRE& visitorPre;
|
||||
VISITOR_POST& visitorPost;
|
||||
int problemSizeThreshold;
|
||||
tbb::task_group& tg;
|
||||
bool makeNewTasks;
|
||||
|
||||
bool isPostOrderPhase;
|
||||
// Keep track of order phase across multiple calls to the same functor
|
||||
mutable bool isPostOrderPhase;
|
||||
|
||||
PreOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData,
|
||||
VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold,
|
||||
bool makeNewTasks = true)
|
||||
tbb::task_group& tg, bool makeNewTasks = true)
|
||||
: treeNode(treeNode),
|
||||
myData(myData),
|
||||
visitorPre(visitorPre),
|
||||
visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold),
|
||||
tg(tg),
|
||||
makeNewTasks(makeNewTasks),
|
||||
isPostOrderPhase(false) {}
|
||||
|
||||
tbb::task* execute() override
|
||||
void operator()() const
|
||||
{
|
||||
if(isPostOrderPhase)
|
||||
{
|
||||
// Run the post-order visitor since this task was recycled to run the post-order visitor
|
||||
(void) visitorPost(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -71,14 +73,10 @@ namespace gtsam {
|
|||
{
|
||||
if(!treeNode->children.empty())
|
||||
{
|
||||
// Allocate post-order task as a continuation
|
||||
isPostOrderPhase = true;
|
||||
recycle_as_continuation();
|
||||
|
||||
bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold);
|
||||
|
||||
tbb::task* firstChild = 0;
|
||||
tbb::task_list childTasks;
|
||||
// If we have child tasks, start subtasks and wait for them to complete
|
||||
tbb::task_group ctg;
|
||||
for(const boost::shared_ptr<NODE>& child: treeNode->children)
|
||||
{
|
||||
// Process child in a subtask. Important: Run visitorPre before calling
|
||||
|
@ -86,37 +84,30 @@ namespace gtsam {
|
|||
// allocated an extra child, this causes a TBB error.
|
||||
boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>(
|
||||
tbb::scalable_allocator<DATA>(), visitorPre(child, *myData));
|
||||
tbb::task* childTask =
|
||||
new (allocate_child()) PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, overThreshold);
|
||||
if (firstChild)
|
||||
childTasks.push_back(*childTask);
|
||||
else
|
||||
firstChild = childTask;
|
||||
ctg.run(PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, ctg, overThreshold));
|
||||
}
|
||||
ctg.wait();
|
||||
|
||||
// If we have child tasks, start subtasks and wait for them to complete
|
||||
set_ref_count((int)treeNode->children.size());
|
||||
spawn(childTasks);
|
||||
return firstChild;
|
||||
// Allocate post-order task as a continuation
|
||||
isPostOrderPhase = true;
|
||||
tg.run(*this);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Run the post-order visitor in this task if we have no children
|
||||
(void) visitorPost(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Process this node and its children in this task
|
||||
processNodeRecursively(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData)
|
||||
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData) const
|
||||
{
|
||||
for(const boost::shared_ptr<NODE>& child: node->children)
|
||||
{
|
||||
|
@ -131,7 +122,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<typename ROOTS, typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
class RootTask : public tbb::task
|
||||
class RootTask
|
||||
{
|
||||
public:
|
||||
const ROOTS& roots;
|
||||
|
@ -139,37 +130,30 @@ namespace gtsam {
|
|||
VISITOR_PRE& visitorPre;
|
||||
VISITOR_POST& visitorPost;
|
||||
int problemSizeThreshold;
|
||||
tbb::task_group& tg;
|
||||
RootTask(const ROOTS& roots, DATA& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost,
|
||||
int problemSizeThreshold) :
|
||||
int problemSizeThreshold, tbb::task_group& tg) :
|
||||
roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold) {}
|
||||
problemSizeThreshold(problemSizeThreshold), tg(tg) {}
|
||||
|
||||
tbb::task* execute() override
|
||||
void operator()() const
|
||||
{
|
||||
typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask;
|
||||
// Create data and tasks for our children
|
||||
tbb::task_list tasks;
|
||||
for(const boost::shared_ptr<NODE>& root: roots)
|
||||
{
|
||||
boost::shared_ptr<DATA> rootData = boost::allocate_shared<DATA>(tbb::scalable_allocator<DATA>(), visitorPre(root, myData));
|
||||
tasks.push_back(*new(allocate_child())
|
||||
PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold));
|
||||
tg.run(PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
|
||||
}
|
||||
// Set TBB ref count
|
||||
set_ref_count(1 + (int) roots.size());
|
||||
// Spawn tasks
|
||||
spawn_and_wait_for_all(tasks);
|
||||
// Return nullptr
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename NODE, typename ROOTS, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST>&
|
||||
CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
|
||||
void CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
|
||||
{
|
||||
typedef RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST> RootTask;
|
||||
return *new(tbb::task::allocate_root()) RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold);
|
||||
tbb::task_group tg;
|
||||
tg.run_and_wait(RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,6 +34,14 @@
|
|||
#include <tbb/scalable_allocator.h>
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define GTSAM_DEPRECATED __attribute__((deprecated))
|
||||
#elif defined(_MSC_VER)
|
||||
#define GTSAM_DEPRECATED __declspec(deprecated)
|
||||
#else
|
||||
#define GTSAM_DEPRECATED
|
||||
#endif
|
||||
|
||||
#ifdef GTSAM_USE_EIGEN_MKL_OPENMP
|
||||
#include <omp.h>
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
#include <gtsam/base/utilities.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
std::string RedirectCout::str() const {
|
||||
return ssBuffer_.str();
|
||||
}
|
||||
|
||||
RedirectCout::~RedirectCout() {
|
||||
std::cout.rdbuf(coutBuffer_);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,5 +1,9 @@
|
|||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
||||
namespace gtsam {
|
||||
/**
|
||||
* For Python __str__().
|
||||
|
@ -12,14 +16,10 @@ struct RedirectCout {
|
|||
RedirectCout() : ssBuffer_(), coutBuffer_(std::cout.rdbuf(ssBuffer_.rdbuf())) {}
|
||||
|
||||
/// return the string
|
||||
std::string str() const {
|
||||
return ssBuffer_.str();
|
||||
}
|
||||
std::string str() const;
|
||||
|
||||
/// destructor -- redirect stdout buffer to its original buffer
|
||||
~RedirectCout() {
|
||||
std::cout.rdbuf(coutBuffer_);
|
||||
}
|
||||
~RedirectCout();
|
||||
|
||||
private:
|
||||
std::stringstream ssBuffer_;
|
||||
|
|
|
@ -92,7 +92,7 @@ Matrix kroneckerProductIdentity(const Weights& w) {
|
|||
|
||||
/// CRTP Base class for function bases
|
||||
template <typename DERIVED>
|
||||
class GTSAM_EXPORT Basis {
|
||||
class Basis {
|
||||
public:
|
||||
/**
|
||||
* Calculate weights for all x in vector X.
|
||||
|
@ -497,11 +497,6 @@ class GTSAM_EXPORT Basis {
|
|||
}
|
||||
};
|
||||
|
||||
// Vector version for MATLAB :-(
|
||||
static double Derivative(double x, const Vector& p, //
|
||||
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) {
|
||||
return DerivativeFunctor(x)(p.transpose(), H);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -29,9 +29,12 @@ namespace gtsam {
|
|||
* pseudo-spectral parameterization.
|
||||
*
|
||||
* @tparam BASIS The basis class to use e.g. Chebyshev2
|
||||
*
|
||||
* Example, degree 8 Chebyshev polynomial measured at x=0.5:
|
||||
* EvaluationFactor<Chebyshev2> factor(key, measured, model, 8, 0.5);
|
||||
*/
|
||||
template <class BASIS>
|
||||
class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
||||
class EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, Vector>;
|
||||
|
||||
|
@ -47,7 +50,7 @@ class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
|||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the polynomial.
|
||||
*/
|
||||
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
EvaluationFactor(Key key, double z, const SharedNoiseModel &model,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x)) {}
|
||||
|
||||
|
@ -62,7 +65,7 @@ class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
|||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
EvaluationFactor(Key key, double z, const SharedNoiseModel &model,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x, a, b)) {}
|
||||
|
||||
|
@ -85,7 +88,7 @@ class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
|||
* @param M: Size of the evaluated state vector.
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class GTSAM_EXPORT VectorEvaluationFactor
|
||||
class VectorEvaluationFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
|
@ -148,7 +151,7 @@ class GTSAM_EXPORT VectorEvaluationFactor
|
|||
* where N is the degree and i is the component index.
|
||||
*/
|
||||
template <class BASIS, size_t P>
|
||||
class GTSAM_EXPORT VectorComponentFactor
|
||||
class VectorComponentFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
|
@ -217,7 +220,7 @@ class GTSAM_EXPORT VectorComponentFactor
|
|||
* where `x` is the value (e.g. timestep) at which the rotation was evaluated.
|
||||
*/
|
||||
template <class BASIS, typename T>
|
||||
class GTSAM_EXPORT ManifoldEvaluationFactor
|
||||
class ManifoldEvaluationFactor
|
||||
: public FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>>;
|
||||
|
@ -269,7 +272,7 @@ class GTSAM_EXPORT ManifoldEvaluationFactor
|
|||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
*/
|
||||
template <class BASIS>
|
||||
class GTSAM_EXPORT DerivativeFactor
|
||||
class DerivativeFactor
|
||||
: public FunctorizedFactor<double, typename BASIS::Parameters> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, typename BASIS::Parameters>;
|
||||
|
@ -318,7 +321,7 @@ class GTSAM_EXPORT DerivativeFactor
|
|||
* @param M: Size of the evaluated state vector derivative.
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class GTSAM_EXPORT VectorDerivativeFactor
|
||||
class VectorDerivativeFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
|
@ -371,7 +374,7 @@ class GTSAM_EXPORT VectorDerivativeFactor
|
|||
* @param P: Size of the control component derivative.
|
||||
*/
|
||||
template <class BASIS, int P>
|
||||
class GTSAM_EXPORT ComponentDerivativeFactor
|
||||
class ComponentDerivativeFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
#include <unsupported/Eigen/KroneckerProduct>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
|
@ -31,7 +29,7 @@ namespace gtsam {
|
|||
* These are typically denoted with the symbol T_n, where n is the degree.
|
||||
* The parameter N is the number of coefficients, i.e., N = n+1.
|
||||
*/
|
||||
struct Chebyshev1Basis : Basis<Chebyshev1Basis> {
|
||||
struct GTSAM_EXPORT Chebyshev1Basis : Basis<Chebyshev1Basis> {
|
||||
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
|
||||
|
||||
Parameters parameters_;
|
||||
|
@ -79,7 +77,7 @@ struct Chebyshev1Basis : Basis<Chebyshev1Basis> {
|
|||
* functions. In this sense, they are like the sines and cosines of the Fourier
|
||||
* basis.
|
||||
*/
|
||||
struct Chebyshev2Basis : Basis<Chebyshev2Basis> {
|
||||
struct GTSAM_EXPORT Chebyshev2Basis : Basis<Chebyshev2Basis> {
|
||||
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
|
||||
|
||||
/**
|
||||
|
|
|
@ -22,8 +22,7 @@
|
|||
*
|
||||
* This is different from Chebyshev.h since it leverage ideas from
|
||||
* pseudo-spectral optimization, i.e. we don't decompose into basis functions,
|
||||
* rather estimate function parameters that enforce function nodes at Chebyshev
|
||||
* points.
|
||||
* rather estimate function values at the Chebyshev points.
|
||||
*
|
||||
* Please refer to Agrawal21icra for more details.
|
||||
*
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
namespace gtsam {
|
||||
|
||||
/// Fourier basis
|
||||
class GTSAM_EXPORT FourierBasis : public Basis<FourierBasis> {
|
||||
class FourierBasis : public Basis<FourierBasis> {
|
||||
public:
|
||||
using Parameters = Eigen::Matrix<double, /*Nx1*/ -1, 1>;
|
||||
using DiffMatrix = Eigen::Matrix<double, /*NxN*/ -1, -1>;
|
||||
|
|
|
@ -153,7 +153,7 @@ class ParameterMatrix {
|
|||
return matrix_ * other;
|
||||
}
|
||||
|
||||
/// @name Vector Space requirements, following LieMatrix
|
||||
/// @name Vector Space requirements
|
||||
/// @{
|
||||
|
||||
/**
|
||||
|
|
|
@ -44,9 +44,6 @@ class Chebyshev2 {
|
|||
static Matrix DerivativeWeights(size_t N, double x, double a, double b);
|
||||
static Matrix IntegrationWeights(size_t N, double a, double b);
|
||||
static Matrix DifferentiationMatrix(size_t N, double a, double b);
|
||||
|
||||
// TODO Needs OptionalJacobian
|
||||
// static double Derivative(double x, Vector f);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
@ -140,7 +137,7 @@ class FitBasis {
|
|||
static gtsam::GaussianFactorGraph::shared_ptr LinearGraph(
|
||||
const std::map<double, double>& sequence,
|
||||
const gtsam::noiseModel::Base* model, size_t N);
|
||||
Parameters parameters() const;
|
||||
This::Parameters parameters() const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -0,0 +1,230 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------1-------------------------------------------
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file testBasisFactors.cpp
|
||||
* @date May 31, 2020
|
||||
* @author Varun Agrawal
|
||||
* @brief unit tests for factors in BasisFactors.h
|
||||
*/
|
||||
|
||||
#include <gtsam/basis/Basis.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/nonlinear/FunctorizedFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using gtsam::noiseModel::Isotropic;
|
||||
using gtsam::Pose2;
|
||||
using gtsam::Vector;
|
||||
using gtsam::Values;
|
||||
using gtsam::Chebyshev2;
|
||||
using gtsam::ParameterMatrix;
|
||||
using gtsam::LevenbergMarquardtParams;
|
||||
using gtsam::LevenbergMarquardtOptimizer;
|
||||
using gtsam::NonlinearFactorGraph;
|
||||
using gtsam::NonlinearOptimizerParams;
|
||||
|
||||
constexpr size_t N = 2;
|
||||
|
||||
// Key used in all tests
|
||||
const gtsam::Symbol key('X', 0);
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, EvaluationFactor) {
|
||||
using gtsam::EvaluationFactor;
|
||||
|
||||
double measured = 0;
|
||||
|
||||
auto model = Isotropic::Sigma(1, 1.0);
|
||||
EvaluationFactor<Chebyshev2> factor(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
Vector functionValues(N);
|
||||
functionValues.setZero();
|
||||
|
||||
Values initial;
|
||||
initial.insert<Vector>(key, functionValues);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, VectorEvaluationFactor) {
|
||||
using gtsam::VectorEvaluationFactor;
|
||||
const size_t M = 4;
|
||||
|
||||
const Vector measured = Vector::Zero(M);
|
||||
|
||||
auto model = Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> factor(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, Print) {
|
||||
using gtsam::VectorEvaluationFactor;
|
||||
const size_t M = 1;
|
||||
|
||||
const Vector measured = Vector::Ones(M) * 42;
|
||||
|
||||
auto model = Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> factor(key, measured, model, N, 0);
|
||||
|
||||
std::string expected =
|
||||
" keys = { X0 }\n"
|
||||
" noise model: unit (1) \n"
|
||||
"FunctorizedFactor(X0)\n"
|
||||
" measurement: [\n"
|
||||
" 42\n"
|
||||
"]\n"
|
||||
" noise model sigmas: 1\n";
|
||||
|
||||
EXPECT(assert_print_equal(expected, factor));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, VectorComponentFactor) {
|
||||
using gtsam::VectorComponentFactor;
|
||||
const int P = 4;
|
||||
const size_t i = 2;
|
||||
const double measured = 0.0, t = 3.0, a = 2.0, b = 4.0;
|
||||
auto model = Isotropic::Sigma(1, 1.0);
|
||||
VectorComponentFactor<Chebyshev2, P> factor(key, measured, model, N, i,
|
||||
t, a, b);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
ParameterMatrix<P> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<P>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, ManifoldEvaluationFactor) {
|
||||
using gtsam::ManifoldEvaluationFactor;
|
||||
const Pose2 measured;
|
||||
const double t = 3.0, a = 2.0, b = 4.0;
|
||||
auto model = Isotropic::Sigma(3, 1.0);
|
||||
ManifoldEvaluationFactor<Chebyshev2, Pose2> factor(key, measured, model, N,
|
||||
t, a, b);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
ParameterMatrix<3> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<3>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, VecDerivativePrior) {
|
||||
using gtsam::VectorDerivativeFactor;
|
||||
const size_t M = 4;
|
||||
|
||||
const Vector measured = Vector::Zero(M);
|
||||
auto model = Isotropic::Sigma(M, 1.0);
|
||||
VectorDerivativeFactor<Chebyshev2, M> vecDPrior(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(vecDPrior);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, ComponentDerivativeFactor) {
|
||||
using gtsam::ComponentDerivativeFactor;
|
||||
const size_t M = 4;
|
||||
|
||||
double measured = 0;
|
||||
auto model = Isotropic::Sigma(1, 1.0);
|
||||
ComponentDerivativeFactor<Chebyshev2, M> controlDPrior(key, measured, model,
|
||||
N, 0, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(controlDPrior);
|
||||
|
||||
Values initial;
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -25,9 +25,10 @@
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
namespace {
|
||||
auto model = noiseModel::Unit::Create(1);
|
||||
|
||||
const size_t N = 3;
|
||||
} // namespace
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Chebyshev1) {
|
||||
|
|
|
@ -10,26 +10,30 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testChebyshev.cpp
|
||||
* @file testChebyshev2.cpp
|
||||
* @date July 4, 2020
|
||||
* @author Varun Agrawal
|
||||
* @brief Unit tests for Chebyshev Basis Decompositions via pseudo-spectral
|
||||
* methods
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
using namespace boost::placeholders;
|
||||
|
||||
namespace {
|
||||
noiseModel::Diagonal::shared_ptr model = noiseModel::Unit::Create(1);
|
||||
|
||||
const size_t N = 32;
|
||||
} // namespace
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Point) {
|
||||
|
@ -121,12 +125,30 @@ TEST(Chebyshev2, InterpolateVector) {
|
|||
EXPECT(assert_equal(numericalH, actualH, 1e-9));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Interpolating poses using the exponential map
|
||||
TEST(Chebyshev2, InterpolatePose2) {
|
||||
double t = 30, a = 0, b = 100;
|
||||
|
||||
ParameterMatrix<3> X(N);
|
||||
X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp
|
||||
X.row(1) = Vector::Zero(N);
|
||||
X.row(2) = 0.1 * Vector::Ones(N);
|
||||
|
||||
Vector xi(3);
|
||||
xi << t, 0, 0.1;
|
||||
Chebyshev2::ManifoldEvaluationFunctor<Pose2> fx(N, t, a, b);
|
||||
// We use xi as canonical coordinates via exponential map
|
||||
Pose2 expected = Pose2::ChartAtOrigin::Retract(xi);
|
||||
EXPECT(assert_equal(expected, fx(X)));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Decomposition) {
|
||||
// Create example sequence
|
||||
Sequence sequence;
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
double x = (double)i / 16. - 0.99, y = x;
|
||||
double x = (1.0/ 16)*i - 0.99, y = x;
|
||||
sequence[x] = y;
|
||||
}
|
||||
|
||||
|
@ -144,11 +166,11 @@ TEST(Chebyshev2, DifferentiationMatrix3) {
|
|||
// Trefethen00book, p.55
|
||||
const size_t N = 3;
|
||||
Matrix expected(N, N);
|
||||
// Differentiation matrix computed from Chebfun
|
||||
// Differentiation matrix computed from chebfun
|
||||
expected << 1.5000, -2.0000, 0.5000, //
|
||||
0.5000, -0.0000, -0.5000, //
|
||||
-0.5000, 2.0000, -1.5000;
|
||||
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
|
||||
// multiply by -1 since the chebyshev points have a phase shift wrt Trefethen
|
||||
// This was verified with chebfun
|
||||
expected = -expected;
|
||||
|
||||
|
@ -167,7 +189,7 @@ TEST(Chebyshev2, DerivativeMatrix6) {
|
|||
0.3820, -0.8944, 1.6180, 0.1708, -2.0000, 0.7236, //
|
||||
-0.2764, 0.6180, -0.8944, 2.0000, 1.1708, -2.6180, //
|
||||
0.5000, -1.1056, 1.5279, -2.8944, 10.4721, -8.5000;
|
||||
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
|
||||
// multiply by -1 since the chebyshev points have a phase shift wrt Trefethen
|
||||
// This was verified with chebfun
|
||||
expected = -expected;
|
||||
|
||||
|
@ -252,7 +274,7 @@ TEST(Chebyshev2, DerivativeWeights2) {
|
|||
Weights dWeights2 = Chebyshev2::DerivativeWeights(N, x2, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x2), dWeights2 * fvals, 1e-8);
|
||||
|
||||
// test if derivative calculation and cheb point is correct
|
||||
// test if derivative calculation and Chebyshev point is correct
|
||||
double x3 = Chebyshev2::Point(N, 3, a, b);
|
||||
Weights dWeights3 = Chebyshev2::DerivativeWeights(N, x3, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x3), dWeights3 * fvals, 1e-8);
|
||||
|
|
|
@ -70,7 +70,7 @@
|
|||
#cmakedefine GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
|
||||
// Make sure dependent projects that want it can see deprecated functions
|
||||
#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V41
|
||||
#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
|
||||
// Support Metis-based nested dissection
|
||||
#cmakedefine GTSAM_SUPPORT_NESTED_DISSECTION
|
||||
|
@ -80,3 +80,6 @@
|
|||
|
||||
// Whether to use the system installed Metis instead of the provided one
|
||||
#cmakedefine GTSAM_USE_SYSTEM_METIS
|
||||
|
||||
// Toggle switch for BetweenFactor jacobian computation
|
||||
#cmakedefine GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
|
|
|
@ -10,17 +10,19 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LieScalar.h
|
||||
* @brief External deprecation warning, see deprecated/LieScalar.h for details
|
||||
* @author Kai Ni
|
||||
* @file AlgebraicDecisionTree.cpp
|
||||
* @date Feb 20, 2022
|
||||
* @author Mike Sheffler
|
||||
* @author Duy-Nguyen Ta
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include "AlgebraicDecisionTree.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("LieScalar.h is deprecated. Please use double/float instead.")
|
||||
#else
|
||||
#warning "LieScalar.h is deprecated. Please use double/float instead."
|
||||
#endif
|
||||
#include <gtsam/base/types.h>
|
||||
|
||||
#include <gtsam/base/deprecated/LieScalar.h>
|
||||
namespace gtsam {
|
||||
|
||||
template class AlgebraicDecisionTree<Key>;
|
||||
|
||||
} // namespace gtsam
|
|
@ -18,8 +18,13 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/discrete/DecisionTree-inl.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
|
@ -27,21 +32,28 @@ namespace gtsam {
|
|||
* Just has some nice constructors and some syntactic sugar
|
||||
* TODO: consider eliminating this class altogether?
|
||||
*/
|
||||
template<typename L>
|
||||
class AlgebraicDecisionTree: public DecisionTree<L, double> {
|
||||
template <typename L>
|
||||
class GTSAM_EXPORT AlgebraicDecisionTree : public DecisionTree<L, double> {
|
||||
/**
|
||||
* @brief Default method used by `labelFormatter` or `valueFormatter` when
|
||||
* printing.
|
||||
*
|
||||
* @param x The value passed to format.
|
||||
* @return std::string
|
||||
*/
|
||||
static std::string DefaultFormatter(const L& x) {
|
||||
std::stringstream ss;
|
||||
ss << x;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
typedef DecisionTree<L, double> Super;
|
||||
using Base = DecisionTree<L, double>;
|
||||
|
||||
/** The Real ring with addition and multiplication */
|
||||
struct Ring {
|
||||
static inline double zero() {
|
||||
return 0.0;
|
||||
}
|
||||
static inline double one() {
|
||||
return 1.0;
|
||||
}
|
||||
static inline double zero() { return 0.0; }
|
||||
static inline double one() { return 1.0; }
|
||||
static inline double add(const double& a, const double& b) {
|
||||
return a + b;
|
||||
}
|
||||
|
@ -54,39 +66,35 @@ namespace gtsam {
|
|||
static inline double div(const double& a, const double& b) {
|
||||
return a / b;
|
||||
}
|
||||
static inline double id(const double& x) {
|
||||
return x;
|
||||
}
|
||||
static inline double id(const double& x) { return x; }
|
||||
};
|
||||
|
||||
AlgebraicDecisionTree() :
|
||||
Super(1.0) {
|
||||
}
|
||||
AlgebraicDecisionTree() : Base(1.0) {}
|
||||
|
||||
AlgebraicDecisionTree(const Super& add) :
|
||||
Super(add) {
|
||||
}
|
||||
// Explicitly non-explicit constructor
|
||||
AlgebraicDecisionTree(const Base& add) : Base(add) {}
|
||||
|
||||
/** Create a new leaf function splitting on a variable */
|
||||
AlgebraicDecisionTree(const L& label, double y1, double y2) :
|
||||
Super(label, y1, y2) {
|
||||
}
|
||||
AlgebraicDecisionTree(const L& label, double y1, double y2)
|
||||
: Base(label, y1, y2) {}
|
||||
|
||||
/** Create a new leaf function splitting on a variable */
|
||||
AlgebraicDecisionTree(const typename Super::LabelC& labelC, double y1, double y2) :
|
||||
Super(labelC, y1, y2) {
|
||||
}
|
||||
AlgebraicDecisionTree(const typename Base::LabelC& labelC, double y1,
|
||||
double y2)
|
||||
: Base(labelC, y1, y2) {}
|
||||
|
||||
/** Create from keys and vector table */
|
||||
AlgebraicDecisionTree //
|
||||
(const std::vector<typename Super::LabelC>& labelCs, const std::vector<double>& ys) {
|
||||
this->root_ = Super::create(labelCs.begin(), labelCs.end(), ys.begin(),
|
||||
ys.end());
|
||||
(const std::vector<typename Base::LabelC>& labelCs,
|
||||
const std::vector<double>& ys) {
|
||||
this->root_ =
|
||||
Base::create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end());
|
||||
}
|
||||
|
||||
/** Create from keys and string table */
|
||||
AlgebraicDecisionTree //
|
||||
(const std::vector<typename Super::LabelC>& labelCs, const std::string& table) {
|
||||
(const std::vector<typename Base::LabelC>& labelCs,
|
||||
const std::string& table) {
|
||||
// Convert string to doubles
|
||||
std::vector<double> ys;
|
||||
std::istringstream iss(table);
|
||||
|
@ -94,23 +102,32 @@ namespace gtsam {
|
|||
std::istream_iterator<double>(), std::back_inserter(ys));
|
||||
|
||||
// now call recursive Create
|
||||
this->root_ = Super::create(labelCs.begin(), labelCs.end(), ys.begin(),
|
||||
ys.end());
|
||||
this->root_ =
|
||||
Base::create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end());
|
||||
}
|
||||
|
||||
/** Create a new function splitting on a variable */
|
||||
template<typename Iterator>
|
||||
AlgebraicDecisionTree(Iterator begin, Iterator end, const L& label) :
|
||||
Super(nullptr) {
|
||||
template <typename Iterator>
|
||||
AlgebraicDecisionTree(Iterator begin, Iterator end, const L& label)
|
||||
: Base(nullptr) {
|
||||
this->root_ = compose(begin, end, label);
|
||||
}
|
||||
|
||||
/** Convert */
|
||||
template<typename M>
|
||||
/**
|
||||
* Convert labels from type M to type L.
|
||||
*
|
||||
* @param other: The AlgebraicDecisionTree with label type M to convert.
|
||||
* @param map: Map from label type M to label type L.
|
||||
*/
|
||||
template <typename M>
|
||||
AlgebraicDecisionTree(const AlgebraicDecisionTree<M>& other,
|
||||
const std::map<M, L>& map) {
|
||||
this->root_ = this->template convert<M, double>(other.root_, map,
|
||||
Ring::id);
|
||||
// Functor for label conversion so we can use `convertFrom`.
|
||||
std::function<L(const M&)> L_of_M = [&map](const M& label) -> L {
|
||||
return map.at(label);
|
||||
};
|
||||
std::function<double(const double&)> op = Ring::id;
|
||||
this->root_ = DecisionTree<L, double>::convertFrom(other.root_, L_of_M, op);
|
||||
}
|
||||
|
||||
/** sum */
|
||||
|
@ -134,12 +151,31 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** sum out variable */
|
||||
AlgebraicDecisionTree sum(const typename Super::LabelC& labelC) const {
|
||||
AlgebraicDecisionTree sum(const typename Base::LabelC& labelC) const {
|
||||
return this->combine(labelC, &Ring::add);
|
||||
}
|
||||
|
||||
/// print method customized to value type `double`.
|
||||
void print(const std::string& s,
|
||||
const typename Base::LabelFormatter& labelFormatter =
|
||||
&DefaultFormatter) const {
|
||||
auto valueFormatter = [](const double& v) {
|
||||
return (boost::format("%4.8g") % v).str();
|
||||
};
|
||||
// AlgebraicDecisionTree
|
||||
Base::print(s, labelFormatter, valueFormatter);
|
||||
}
|
||||
|
||||
}
|
||||
// namespace gtsam
|
||||
/// Equality method customized to value type `double`.
|
||||
bool equals(const AlgebraicDecisionTree& other, double tol = 1e-9) const {
|
||||
// lambda for comparison of two doubles upto some tolerance.
|
||||
auto compare = [tol](double a, double b) {
|
||||
return std::abs(a - b) < tol;
|
||||
};
|
||||
return Base::equals(other, compare);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct traits<AlgebraicDecisionTree<T>>
|
||||
: public Testable<AlgebraicDecisionTree<T>> {};
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -19,23 +19,25 @@
|
|||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
/**
|
||||
* An assignment from labels to value index (size_t).
|
||||
* Assigns to each label a value. Implemented as a simple map.
|
||||
* A discrete factor takes an Assignment and returns a value.
|
||||
*/
|
||||
template<class L>
|
||||
class Assignment: public std::map<L, size_t> {
|
||||
template <class L>
|
||||
class Assignment : public std::map<L, size_t> {
|
||||
public:
|
||||
using std::map<L, size_t>::operator=;
|
||||
|
||||
void print(const std::string& s = "Assignment: ") const {
|
||||
std::cout << s << ": ";
|
||||
for(const typename Assignment::value_type& keyValue: *this)
|
||||
for (const typename Assignment::value_type& keyValue : *this)
|
||||
std::cout << "(" << keyValue.first << ", " << keyValue.second << ")";
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
@ -43,8 +45,6 @@ namespace gtsam {
|
|||
bool equals(const Assignment& other, double tol = 1e-9) const {
|
||||
return (*this == other);
|
||||
}
|
||||
}; //Assignment
|
||||
|
||||
|
||||
/**
|
||||
* @brief Get Cartesian product consisting all possible configurations
|
||||
|
@ -58,29 +58,28 @@ namespace gtsam {
|
|||
* variables with each having cardinalities 4, we get 4096 possible
|
||||
* configurations!!
|
||||
*/
|
||||
template<typename L>
|
||||
std::vector<Assignment<L> > cartesianProduct(
|
||||
const std::vector<std::pair<L, size_t> >& keys) {
|
||||
std::vector<Assignment<L> > allPossValues;
|
||||
Assignment<L> values;
|
||||
template <typename Derived = Assignment<L>>
|
||||
static std::vector<Derived> CartesianProduct(
|
||||
const std::vector<std::pair<L, size_t>>& keys) {
|
||||
std::vector<Derived> allPossValues;
|
||||
Derived values;
|
||||
typedef std::pair<L, size_t> DiscreteKey;
|
||||
for(const DiscreteKey& key: keys)
|
||||
values[key.first] = 0; //Initialize from 0
|
||||
for (const DiscreteKey& key : keys)
|
||||
values[key.first] = 0; // Initialize from 0
|
||||
while (1) {
|
||||
allPossValues.push_back(values);
|
||||
size_t j = 0;
|
||||
for (j = 0; j < keys.size(); j++) {
|
||||
L idx = keys[j].first;
|
||||
values[idx]++;
|
||||
if (values[idx] < keys[j].second)
|
||||
break;
|
||||
//Wrap condition
|
||||
if (values[idx] < keys[j].second) break;
|
||||
// Wrap condition
|
||||
values[idx] = 0;
|
||||
}
|
||||
if (j == keys.size())
|
||||
break;
|
||||
if (j == keys.size()) break;
|
||||
}
|
||||
return allPossValues;
|
||||
}
|
||||
}; // Assignment
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -20,79 +20,93 @@
|
|||
#pragma once
|
||||
|
||||
#include <gtsam/discrete/DecisionTree.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <boost/optional.hpp>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
using boost::assign::operator+=;
|
||||
#include <boost/type_traits/has_dereference.hpp>
|
||||
#include <boost/unordered_set.hpp>
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
#include <list>
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
using boost::assign::operator+=;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
// Node
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
#ifdef DT_DEBUG_MEMORY
|
||||
template<typename L, typename Y>
|
||||
int DecisionTree<L, Y>::Node::nrNodes = 0;
|
||||
#endif
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
// Leaf
|
||||
/*********************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
class DecisionTree<L, Y>::Leaf: public DecisionTree<L, Y>::Node {
|
||||
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
struct DecisionTree<L, Y>::Leaf : public DecisionTree<L, Y>::Node {
|
||||
/** constant stored in this leaf */
|
||||
Y constant_;
|
||||
|
||||
public:
|
||||
/** The number of assignments contained within this leaf.
|
||||
* Particularly useful when leaves have been pruned.
|
||||
*/
|
||||
size_t nrAssignments_;
|
||||
|
||||
/** Constructor from constant */
|
||||
Leaf(const Y& constant) :
|
||||
constant_(constant) {}
|
||||
/// Constructor from constant
|
||||
Leaf(const Y& constant, size_t nrAssignments = 1)
|
||||
: constant_(constant), nrAssignments_(nrAssignments) {}
|
||||
|
||||
/** return the constant */
|
||||
/// Return the constant
|
||||
const Y& constant() const {
|
||||
return constant_;
|
||||
}
|
||||
|
||||
/// Return the number of assignments contained within this leaf.
|
||||
size_t nrAssignments() const { return nrAssignments_; }
|
||||
|
||||
/// Leaf-Leaf equality
|
||||
bool sameLeaf(const Leaf& q) const override {
|
||||
return constant_ == q.constant_;
|
||||
}
|
||||
|
||||
/// polymorphic equality: is q is a leaf, could be
|
||||
/// polymorphic equality: is q a leaf and is it the same as this leaf?
|
||||
bool sameLeaf(const Node& q) const override {
|
||||
return (q.isLeaf() && q.sameLeaf(*this));
|
||||
}
|
||||
|
||||
/** equality up to tolerance */
|
||||
bool equals(const Node& q, double tol) const override {
|
||||
const Leaf* other = dynamic_cast<const Leaf*> (&q);
|
||||
/// equality up to tolerance
|
||||
bool equals(const Node& q, const CompareFunc& compare) const override {
|
||||
const Leaf* other = dynamic_cast<const Leaf*>(&q);
|
||||
if (!other) return false;
|
||||
return std::abs(double(this->constant_ - other->constant_)) < tol;
|
||||
return compare(this->constant_, other->constant_);
|
||||
}
|
||||
|
||||
/** print */
|
||||
void print(const std::string& s) const override {
|
||||
bool showZero = true;
|
||||
if (showZero || constant_) std::cout << s << " Leaf " << constant_ << std::endl;
|
||||
/// print
|
||||
void print(const std::string& s, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter) const override {
|
||||
std::cout << s << " Leaf " << valueFormatter(constant_) << std::endl;
|
||||
}
|
||||
|
||||
/** to graphviz file */
|
||||
void dot(std::ostream& os, bool showZero) const override {
|
||||
if (showZero || constant_) os << "\"" << this->id() << "\" [label=\""
|
||||
<< boost::format("%4.2g") % constant_
|
||||
<< "\", shape=box, rank=sink, height=0.35, fixedsize=true]\n"; // width=0.55,
|
||||
/** Write graphviz format to stream `os`. */
|
||||
void dot(std::ostream& os, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter,
|
||||
bool showZero) const override {
|
||||
std::string value = valueFormatter(constant_);
|
||||
if (showZero || value.compare("0"))
|
||||
os << "\"" << this->id() << "\" [label=\"" << value
|
||||
<< "\", shape=box, rank=sink, height=0.35, fixedsize=true]\n";
|
||||
}
|
||||
|
||||
/** evaluate */
|
||||
|
@ -102,7 +116,14 @@ namespace gtsam {
|
|||
|
||||
/** apply unary operator */
|
||||
NodePtr apply(const Unary& op) const override {
|
||||
NodePtr f(new Leaf(op(constant_)));
|
||||
NodePtr f(new Leaf(op(constant_), nrAssignments_));
|
||||
return f;
|
||||
}
|
||||
|
||||
/// Apply unary operator with assignment
|
||||
NodePtr apply(const UnaryAssignment& op,
|
||||
const Assignment<L>& assignment) const override {
|
||||
NodePtr f(new Leaf(op(assignment, constant_), nrAssignments_));
|
||||
return f;
|
||||
}
|
||||
|
||||
|
@ -117,7 +138,8 @@ namespace gtsam {
|
|||
|
||||
// Applying binary operator to two leaves results in a leaf
|
||||
NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const override {
|
||||
NodePtr h(new Leaf(op(fL.constant_, constant_))); // fL op gL
|
||||
// fL op gL
|
||||
NodePtr h(new Leaf(op(fL.constant_, constant_), nrAssignments_));
|
||||
return h;
|
||||
}
|
||||
|
||||
|
@ -128,19 +150,17 @@ namespace gtsam {
|
|||
|
||||
/** choose a branch, create new memory ! */
|
||||
NodePtr choose(const L& label, size_t index) const override {
|
||||
return NodePtr(new Leaf(constant()));
|
||||
return NodePtr(new Leaf(constant(), nrAssignments()));
|
||||
}
|
||||
|
||||
bool isLeaf() const override { return true; }
|
||||
|
||||
}; // Leaf
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
// Choice
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
class DecisionTree<L, Y>::Choice: public DecisionTree<L, Y>::Node {
|
||||
|
||||
struct DecisionTree<L, Y>::Choice: public DecisionTree<L, Y>::Node {
|
||||
/** the label of the variable on which we split */
|
||||
L label_;
|
||||
|
||||
|
@ -148,27 +168,38 @@ namespace gtsam {
|
|||
std::vector<NodePtr> branches_;
|
||||
|
||||
private:
|
||||
/** incremental allSame */
|
||||
/**
|
||||
* Incremental allSame.
|
||||
* Records if all the branches are the same leaf.
|
||||
*/
|
||||
size_t allSame_;
|
||||
|
||||
typedef boost::shared_ptr<const Choice> ChoicePtr;
|
||||
using ChoicePtr = boost::shared_ptr<const Choice>;
|
||||
|
||||
public:
|
||||
|
||||
~Choice() override {
|
||||
#ifdef DT_DEBUG_MEMORY
|
||||
std::std::cout << Node::nrNodes << " destructing (Choice) " << this->id() << std::std::endl;
|
||||
std::std::cout << Node::nrNodes << " destructing (Choice) " << this->id()
|
||||
<< std::std::endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
/** If all branches of a choice node f are the same, just return a branch */
|
||||
/// If all branches of a choice node f are the same, just return a branch.
|
||||
static NodePtr Unique(const ChoicePtr& f) {
|
||||
#ifndef DT_NO_PRUNING
|
||||
#ifndef GTSAM_DT_NO_PRUNING
|
||||
if (f->allSame_) {
|
||||
assert(f->branches().size() > 0);
|
||||
NodePtr f0 = f->branches_[0];
|
||||
assert(f0->isLeaf());
|
||||
NodePtr newLeaf(new Leaf(boost::dynamic_pointer_cast<const Leaf>(f0)->constant()));
|
||||
|
||||
size_t nrAssignments = 0;
|
||||
for(auto branch: f->branches()) {
|
||||
assert(branch->isLeaf());
|
||||
nrAssignments +=
|
||||
boost::dynamic_pointer_cast<const Leaf>(branch)->nrAssignments();
|
||||
}
|
||||
NodePtr newLeaf(
|
||||
new Leaf(boost::dynamic_pointer_cast<const Leaf>(f0)->constant(),
|
||||
nrAssignments));
|
||||
return newLeaf;
|
||||
} else
|
||||
#endif
|
||||
|
@ -177,18 +208,15 @@ namespace gtsam {
|
|||
|
||||
bool isLeaf() const override { return false; }
|
||||
|
||||
/** Constructor, given choice label and mandatory expected branch count */
|
||||
/// Constructor, given choice label and mandatory expected branch count.
|
||||
Choice(const L& label, size_t count) :
|
||||
label_(label), allSame_(true) {
|
||||
branches_.reserve(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct from applying binary op to two Choice nodes
|
||||
*/
|
||||
/// Construct from applying binary op to two Choice nodes.
|
||||
Choice(const Choice& f, const Choice& g, const Binary& op) :
|
||||
allSame_(true) {
|
||||
|
||||
// Choose what to do based on label
|
||||
if (f.label() > g.label()) {
|
||||
// f higher than g
|
||||
|
@ -214,6 +242,7 @@ namespace gtsam {
|
|||
}
|
||||
}
|
||||
|
||||
/// Return the label of this choice node.
|
||||
const L& label() const {
|
||||
return label_;
|
||||
}
|
||||
|
@ -235,33 +264,39 @@ namespace gtsam {
|
|||
branches_.push_back(node);
|
||||
}
|
||||
|
||||
/** print (as a tree) */
|
||||
void print(const std::string& s) const override {
|
||||
/// print (as a tree).
|
||||
void print(const std::string& s, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter) const override {
|
||||
std::cout << s << " Choice(";
|
||||
// std::cout << this << ",";
|
||||
std::cout << label_ << ") " << std::endl;
|
||||
std::cout << labelFormatter(label_) << ") " << std::endl;
|
||||
for (size_t i = 0; i < branches_.size(); i++)
|
||||
branches_[i]->print((boost::format("%s %d") % s % i).str());
|
||||
branches_[i]->print((boost::format("%s %d") % s % i).str(),
|
||||
labelFormatter, valueFormatter);
|
||||
}
|
||||
|
||||
/** output to graphviz (as a a graph) */
|
||||
void dot(std::ostream& os, bool showZero) const override {
|
||||
void dot(std::ostream& os, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter,
|
||||
bool showZero) const override {
|
||||
os << "\"" << this->id() << "\" [shape=circle, label=\"" << label_
|
||||
<< "\"]\n";
|
||||
for (size_t i = 0; i < branches_.size(); i++) {
|
||||
NodePtr branch = branches_[i];
|
||||
size_t B = branches_.size();
|
||||
for (size_t i = 0; i < B; i++) {
|
||||
const NodePtr& branch = branches_[i];
|
||||
|
||||
// Check if zero
|
||||
if (!showZero) {
|
||||
const Leaf* leaf = dynamic_cast<const Leaf*> (branch.get());
|
||||
if (leaf && !leaf->constant()) continue;
|
||||
const Leaf* leaf = dynamic_cast<const Leaf*>(branch.get());
|
||||
if (leaf && valueFormatter(leaf->constant()).compare("0")) continue;
|
||||
}
|
||||
|
||||
os << "\"" << this->id() << "\" -> \"" << branch->id() << "\"";
|
||||
if (B == 2) {
|
||||
if (i == 0) os << " [style=dashed]";
|
||||
if (i > 1) os << " [style=bold]";
|
||||
}
|
||||
os << std::endl;
|
||||
branch->dot(os, showZero);
|
||||
branch->dot(os, labelFormatter, valueFormatter, showZero);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -275,19 +310,20 @@ namespace gtsam {
|
|||
return (q.isLeaf() && q.sameLeaf(*this));
|
||||
}
|
||||
|
||||
/** equality up to tolerance */
|
||||
bool equals(const Node& q, double tol) const override {
|
||||
const Choice* other = dynamic_cast<const Choice*> (&q);
|
||||
/// equality
|
||||
bool equals(const Node& q, const CompareFunc& compare) const override {
|
||||
const Choice* other = dynamic_cast<const Choice*>(&q);
|
||||
if (!other) return false;
|
||||
if (this->label_ != other->label_) return false;
|
||||
if (branches_.size() != other->branches_.size()) return false;
|
||||
// we don't care about shared pointers being equal here
|
||||
for (size_t i = 0; i < branches_.size(); i++)
|
||||
if (!(branches_[i]->equals(*(other->branches_[i]), tol))) return false;
|
||||
if (!(branches_[i]->equals(*(other->branches_[i]), compare)))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/** evaluate */
|
||||
/// evaluate
|
||||
const Y& operator()(const Assignment<L>& x) const override {
|
||||
#ifndef NDEBUG
|
||||
typename Assignment<L>::const_iterator it = x.find(label_);
|
||||
|
@ -302,20 +338,54 @@ namespace gtsam {
|
|||
return (*child)(x);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct from applying unary op to a Choice node
|
||||
*/
|
||||
/// Construct from applying unary op to a Choice node.
|
||||
Choice(const L& label, const Choice& f, const Unary& op) :
|
||||
label_(label), allSame_(true) {
|
||||
|
||||
branches_.reserve(f.branches_.size()); // reserve space
|
||||
for (const NodePtr& branch: f.branches_)
|
||||
for (const NodePtr& branch : f.branches_) {
|
||||
push_back(branch->apply(op));
|
||||
}
|
||||
}
|
||||
|
||||
/** apply unary operator */
|
||||
/**
|
||||
* @brief Constructor which accepts a UnaryAssignment op and the
|
||||
* corresponding assignment.
|
||||
*
|
||||
* @param label The label for this node.
|
||||
* @param f The original choice node to apply the op on.
|
||||
* @param op Function to apply on the choice node. Takes Assignment and
|
||||
* value as arguments.
|
||||
* @param assignment The Assignment that will go to op.
|
||||
*/
|
||||
Choice(const L& label, const Choice& f, const UnaryAssignment& op,
|
||||
const Assignment<L>& assignment)
|
||||
: label_(label), allSame_(true) {
|
||||
branches_.reserve(f.branches_.size()); // reserve space
|
||||
|
||||
Assignment<L> assignment_ = assignment;
|
||||
|
||||
for (size_t i = 0; i < f.branches_.size(); i++) {
|
||||
assignment_[label_] = i; // Set assignment for label to i
|
||||
|
||||
const NodePtr branch = f.branches_[i];
|
||||
push_back(branch->apply(op, assignment_));
|
||||
|
||||
// Remove the assignment so we are backtracking
|
||||
auto assignment_it = assignment_.find(label_);
|
||||
assignment_.erase(assignment_it);
|
||||
}
|
||||
}
|
||||
|
||||
/// apply unary operator.
|
||||
NodePtr apply(const Unary& op) const override {
|
||||
boost::shared_ptr<Choice> r(new Choice(label_, *this, op));
|
||||
auto r = boost::make_shared<Choice>(label_, *this, op);
|
||||
return Unique(r);
|
||||
}
|
||||
|
||||
/// Apply unary operator with assignment
|
||||
NodePtr apply(const UnaryAssignment& op,
|
||||
const Assignment<L>& assignment) const override {
|
||||
auto r = boost::make_shared<Choice>(label_, *this, op, assignment);
|
||||
return Unique(r);
|
||||
}
|
||||
|
||||
|
@ -330,44 +400,42 @@ namespace gtsam {
|
|||
|
||||
// If second argument of binary op is Leaf node, recurse on branches
|
||||
NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const override {
|
||||
boost::shared_ptr<Choice> h(new Choice(label(), nrChoices()));
|
||||
for(NodePtr branch: branches_)
|
||||
auto h = boost::make_shared<Choice>(label(), nrChoices());
|
||||
for (auto&& branch : branches_)
|
||||
h->push_back(fL.apply_f_op_g(*branch, op));
|
||||
return Unique(h);
|
||||
}
|
||||
|
||||
// If second argument of binary op is Choice, call constructor
|
||||
NodePtr apply_g_op_fC(const Choice& fC, const Binary& op) const override {
|
||||
boost::shared_ptr<Choice> h(new Choice(fC, *this, op));
|
||||
auto h = boost::make_shared<Choice>(fC, *this, op);
|
||||
return Unique(h);
|
||||
}
|
||||
|
||||
// If second argument of binary op is Leaf
|
||||
template<typename OP>
|
||||
NodePtr apply_fC_op_gL(const Leaf& gL, OP op) const {
|
||||
boost::shared_ptr<Choice> h(new Choice(label(), nrChoices()));
|
||||
for(const NodePtr& branch: branches_)
|
||||
auto h = boost::make_shared<Choice>(label(), nrChoices());
|
||||
for (auto&& branch : branches_)
|
||||
h->push_back(branch->apply_f_op_g(gL, op));
|
||||
return Unique(h);
|
||||
}
|
||||
|
||||
/** choose a branch, recursively */
|
||||
NodePtr choose(const L& label, size_t index) const override {
|
||||
if (label_ == label)
|
||||
return branches_[index]; // choose branch
|
||||
if (label_ == label) return branches_[index]; // choose branch
|
||||
|
||||
// second case, not label of interest, just recurse
|
||||
boost::shared_ptr<Choice> r(new Choice(label_, branches_.size()));
|
||||
for(const NodePtr& branch: branches_)
|
||||
auto r = boost::make_shared<Choice>(label_, branches_.size());
|
||||
for (auto&& branch : branches_)
|
||||
r->push_back(branch->choose(label, index));
|
||||
return Unique(r);
|
||||
}
|
||||
|
||||
}; // Choice
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
// DecisionTree
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y>::DecisionTree() {
|
||||
}
|
||||
|
@ -377,37 +445,36 @@ namespace gtsam {
|
|||
root_(root) {
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y>::DecisionTree(const Y& y) {
|
||||
root_ = NodePtr(new Leaf(y));
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y>::DecisionTree(//
|
||||
const L& label, const Y& y1, const Y& y2) {
|
||||
boost::shared_ptr<Choice> a(new Choice(label, 2));
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
DecisionTree<L, Y>::DecisionTree(const L& label, const Y& y1, const Y& y2) {
|
||||
auto a = boost::make_shared<Choice>(label, 2);
|
||||
NodePtr l1(new Leaf(y1)), l2(new Leaf(y2));
|
||||
a->push_back(l1);
|
||||
a->push_back(l2);
|
||||
root_ = Choice::Unique(a);
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y>::DecisionTree(//
|
||||
const LabelC& labelC, const Y& y1, const Y& y2) {
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
DecisionTree<L, Y>::DecisionTree(const LabelC& labelC, const Y& y1,
|
||||
const Y& y2) {
|
||||
if (labelC.second != 2) throw std::invalid_argument(
|
||||
"DecisionTree: binary constructor called with non-binary label");
|
||||
boost::shared_ptr<Choice> a(new Choice(labelC.first, 2));
|
||||
auto a = boost::make_shared<Choice>(labelC.first, 2);
|
||||
NodePtr l1(new Leaf(y1)), l2(new Leaf(y2));
|
||||
a->push_back(l1);
|
||||
a->push_back(l2);
|
||||
root_ = Choice::Unique(a);
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y>::DecisionTree(const std::vector<LabelC>& labelCs,
|
||||
const std::vector<Y>& ys) {
|
||||
|
@ -415,11 +482,10 @@ namespace gtsam {
|
|||
root_ = create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end());
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y>::DecisionTree(const std::vector<LabelC>& labelCs,
|
||||
const std::string& table) {
|
||||
|
||||
// Convert std::string to values of type Y
|
||||
std::vector<Y> ys;
|
||||
std::istringstream iss(table);
|
||||
|
@ -430,14 +496,14 @@ namespace gtsam {
|
|||
root_ = create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end());
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
template<typename Iterator> DecisionTree<L, Y>::DecisionTree(
|
||||
Iterator begin, Iterator end, const L& label) {
|
||||
root_ = compose(begin, end, label);
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y>::DecisionTree(const L& label,
|
||||
const DecisionTree& f0, const DecisionTree& f1) {
|
||||
|
@ -446,24 +512,35 @@ namespace gtsam {
|
|||
root_ = compose(functions.begin(), functions.end(), label);
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
template<typename M, typename X>
|
||||
DecisionTree<L, Y>::DecisionTree(const DecisionTree<M, X>& other,
|
||||
const std::map<M, L>& map, std::function<Y(const X&)> op) {
|
||||
root_ = convert(other.root_, map, op);
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
template <typename X, typename Func>
|
||||
DecisionTree<L, Y>::DecisionTree(const DecisionTree<L, X>& other,
|
||||
Func Y_of_X) {
|
||||
// Define functor for identity mapping of node label.
|
||||
auto L_of_L = [](const L& label) { return label; };
|
||||
root_ = convertFrom<L, X>(other.root_, L_of_L, Y_of_X);
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
// Called by two constructors above.
|
||||
// Takes a label and a corresponding range of decision trees, and creates a new
|
||||
// decision tree. However, the order of the labels needs to be respected, so we
|
||||
// cannot just create a root Choice node on the label: if the label is not the
|
||||
// highest label, we need to do a complicated and expensive recursive call.
|
||||
template<typename L, typename Y> template<typename Iterator>
|
||||
typename DecisionTree<L, Y>::NodePtr DecisionTree<L, Y>::compose(Iterator begin,
|
||||
Iterator end, const L& label) const {
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
template <typename M, typename X, typename Func>
|
||||
DecisionTree<L, Y>::DecisionTree(const DecisionTree<M, X>& other,
|
||||
const std::map<M, L>& map, Func Y_of_X) {
|
||||
auto L_of_M = [&map](const M& label) -> L { return map.at(label); };
|
||||
root_ = convertFrom<M, X>(other.root_, L_of_M, Y_of_X);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
// Called by two constructors above.
|
||||
// Takes a label and a corresponding range of decision trees, and creates a
|
||||
// new decision tree. However, the order of the labels needs to be respected,
|
||||
// so we cannot just create a root Choice node on the label: if the label is
|
||||
// not the highest label, we need a complicated/ expensive recursive call.
|
||||
template <typename L, typename Y>
|
||||
template <typename Iterator>
|
||||
typename DecisionTree<L, Y>::NodePtr DecisionTree<L, Y>::compose(
|
||||
Iterator begin, Iterator end, const L& label) const {
|
||||
// find highest label among branches
|
||||
boost::optional<L> highestLabel;
|
||||
size_t nrChoices = 0;
|
||||
|
@ -480,13 +557,14 @@ namespace gtsam {
|
|||
|
||||
// if label is already in correct order, just put together a choice on label
|
||||
if (!nrChoices || !highestLabel || label > *highestLabel) {
|
||||
boost::shared_ptr<Choice> choiceOnLabel(new Choice(label, end - begin));
|
||||
auto choiceOnLabel = boost::make_shared<Choice>(label, end - begin);
|
||||
for (Iterator it = begin; it != end; it++)
|
||||
choiceOnLabel->push_back(it->root_);
|
||||
return Choice::Unique(choiceOnLabel);
|
||||
} else {
|
||||
// Set up a new choice on the highest label
|
||||
boost::shared_ptr<Choice> choiceOnHighestLabel(new Choice(*highestLabel, nrChoices));
|
||||
auto choiceOnHighestLabel =
|
||||
boost::make_shared<Choice>(*highestLabel, nrChoices);
|
||||
// now, for all possible values of highestLabel
|
||||
for (size_t index = 0; index < nrChoices; index++) {
|
||||
// make a new set of functions for composing by iterating over the given
|
||||
|
@ -505,7 +583,7 @@ namespace gtsam {
|
|||
}
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
// "create" is a bit of a complicated thing, but very useful.
|
||||
// It takes a range of labels and a corresponding range of values,
|
||||
// and creates a decision tree, as follows:
|
||||
|
@ -530,7 +608,6 @@ namespace gtsam {
|
|||
template<typename It, typename ValueIt>
|
||||
typename DecisionTree<L, Y>::NodePtr DecisionTree<L, Y>::create(
|
||||
It begin, It end, ValueIt beginY, ValueIt endY) const {
|
||||
|
||||
// get crucial counts
|
||||
size_t nrChoices = begin->second;
|
||||
size_t size = endY - beginY;
|
||||
|
@ -542,10 +619,14 @@ namespace gtsam {
|
|||
// Create a simple choice node with values as leaves.
|
||||
if (size != nrChoices) {
|
||||
std::cout << "Trying to create DD on " << begin->first << std::endl;
|
||||
std::cout << boost::format("DecisionTree::create: expected %d values but got %d instead") % nrChoices % size << std::endl;
|
||||
std::cout << boost::format(
|
||||
"DecisionTree::create: expected %d values but got %d "
|
||||
"instead") %
|
||||
nrChoices % size
|
||||
<< std::endl;
|
||||
throw std::invalid_argument("DecisionTree::create invalid argument");
|
||||
}
|
||||
boost::shared_ptr<Choice> choice(new Choice(begin->first, endY - beginY));
|
||||
auto choice = boost::make_shared<Choice>(begin->first, endY - beginY);
|
||||
for (ValueIt y = beginY; y != endY; y++)
|
||||
choice->push_back(NodePtr(new Leaf(*y)));
|
||||
return Choice::Unique(choice);
|
||||
|
@ -558,56 +639,219 @@ namespace gtsam {
|
|||
size_t split = size / nrChoices;
|
||||
for (size_t i = 0; i < nrChoices; i++, beginY += split) {
|
||||
NodePtr f = create<It, ValueIt>(labelC, end, beginY, beginY + split);
|
||||
functions += DecisionTree(f);
|
||||
functions.emplace_back(f);
|
||||
}
|
||||
return compose(functions.begin(), functions.end(), begin->first);
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
template<typename M, typename X>
|
||||
typename DecisionTree<L, Y>::NodePtr DecisionTree<L, Y>::convert(
|
||||
const typename DecisionTree<M, X>::NodePtr& f, const std::map<M, L>& map,
|
||||
std::function<Y(const X&)> op) {
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
template <typename M, typename X>
|
||||
typename DecisionTree<L, Y>::NodePtr DecisionTree<L, Y>::convertFrom(
|
||||
const typename DecisionTree<M, X>::NodePtr& f,
|
||||
std::function<L(const M&)> L_of_M,
|
||||
std::function<Y(const X&)> Y_of_X) const {
|
||||
using LY = DecisionTree<L, Y>;
|
||||
|
||||
typedef DecisionTree<M, X> MX;
|
||||
typedef typename MX::Leaf MXLeaf;
|
||||
typedef typename MX::Choice MXChoice;
|
||||
typedef typename MX::NodePtr MXNodePtr;
|
||||
typedef DecisionTree<L, Y> LY;
|
||||
|
||||
// ugliness below because apparently we can't have templated virtual functions
|
||||
// If leaf, apply unary conversion "op" and create a unique leaf
|
||||
const MXLeaf* leaf = dynamic_cast<const MXLeaf*> (f.get());
|
||||
if (leaf) return NodePtr(new Leaf(op(leaf->constant())));
|
||||
// Ugliness below because apparently we can't have templated virtual
|
||||
// functions.
|
||||
// If leaf, apply unary conversion "op" and create a unique leaf.
|
||||
using MXLeaf = typename DecisionTree<M, X>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const MXLeaf>(f)) {
|
||||
return NodePtr(new Leaf(Y_of_X(leaf->constant()), leaf->nrAssignments()));
|
||||
}
|
||||
|
||||
// Check if Choice
|
||||
boost::shared_ptr<const MXChoice> choice = boost::dynamic_pointer_cast<const MXChoice> (f);
|
||||
using MXChoice = typename DecisionTree<M, X>::Choice;
|
||||
auto choice = boost::dynamic_pointer_cast<const MXChoice>(f);
|
||||
if (!choice) throw std::invalid_argument(
|
||||
"DecisionTree::Convert: Invalid NodePtr");
|
||||
"DecisionTree::convertFrom: Invalid NodePtr");
|
||||
|
||||
// get new label
|
||||
M oldLabel = choice->label();
|
||||
L newLabel = map.at(oldLabel);
|
||||
const M oldLabel = choice->label();
|
||||
const L newLabel = L_of_M(oldLabel);
|
||||
|
||||
// put together via Shannon expansion otherwise not sorted.
|
||||
std::vector<LY> functions;
|
||||
for(const MXNodePtr& branch: choice->branches()) {
|
||||
LY converted(convert<M, X>(branch, map, op));
|
||||
functions += converted;
|
||||
for (auto&& branch : choice->branches()) {
|
||||
functions.emplace_back(convertFrom<M, X>(branch, L_of_M, Y_of_X));
|
||||
}
|
||||
return LY::compose(functions.begin(), functions.end(), newLabel);
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
bool DecisionTree<L, Y>::equals(const DecisionTree& other, double tol) const {
|
||||
return root_->equals(*other.root_, tol);
|
||||
/****************************************************************************/
|
||||
/**
|
||||
* Functor performing depth-first visit to each leaf with the leaf value as
|
||||
* the argument.
|
||||
*
|
||||
* NOTE: We differentiate between leaves and assignments. Concretely, a 3
|
||||
* binary variable tree will have 2^3=8 assignments, but based on pruning, it
|
||||
* can have less than 8 leaves. For example, if a tree has all assignment
|
||||
* values as 1, then pruning will cause the tree to have only 1 leaf yet 8
|
||||
* assignments.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
struct Visit {
|
||||
using F = std::function<void(const Y&)>;
|
||||
explicit Visit(F f) : f(f) {} ///< Construct from folding function.
|
||||
F f; ///< folding function object.
|
||||
|
||||
/// Do a depth-first visit on the tree rooted at node.
|
||||
void operator()(const typename DecisionTree<L, Y>::NodePtr& node) const {
|
||||
using Leaf = typename DecisionTree<L, Y>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const Leaf>(node))
|
||||
return f(leaf->constant());
|
||||
|
||||
using Choice = typename DecisionTree<L, Y>::Choice;
|
||||
auto choice = boost::dynamic_pointer_cast<const Choice>(node);
|
||||
if (!choice)
|
||||
throw std::invalid_argument("DecisionTree::Visit: Invalid NodePtr");
|
||||
for (auto&& branch : choice->branches()) (*this)(branch); // recurse!
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename Y>
|
||||
template <typename Func>
|
||||
void DecisionTree<L, Y>::visit(Func f) const {
|
||||
Visit<L, Y> visit(f);
|
||||
visit(root_);
|
||||
}
|
||||
|
||||
template<typename L, typename Y>
|
||||
void DecisionTree<L, Y>::print(const std::string& s) const {
|
||||
root_->print(s);
|
||||
/****************************************************************************/
|
||||
/**
|
||||
* Functor performing depth-first visit to each leaf with the Leaf object
|
||||
* passed as an argument.
|
||||
*
|
||||
* NOTE: We differentiate between leaves and assignments. Concretely, a 3
|
||||
* binary variable tree will have 2^3=8 assignments, but based on pruning, it
|
||||
* can have <8 leaves. For example, if a tree has all assignment values as 1,
|
||||
* then pruning will cause the tree to have only 1 leaf yet 8 assignments.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
struct VisitLeaf {
|
||||
using F = std::function<void(const typename DecisionTree<L, Y>::Leaf&)>;
|
||||
explicit VisitLeaf(F f) : f(f) {} ///< Construct from folding function.
|
||||
F f; ///< folding function object.
|
||||
|
||||
/// Do a depth-first visit on the tree rooted at node.
|
||||
void operator()(const typename DecisionTree<L, Y>::NodePtr& node) const {
|
||||
using Leaf = typename DecisionTree<L, Y>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const Leaf>(node))
|
||||
return f(*leaf);
|
||||
|
||||
using Choice = typename DecisionTree<L, Y>::Choice;
|
||||
auto choice = boost::dynamic_pointer_cast<const Choice>(node);
|
||||
if (!choice)
|
||||
throw std::invalid_argument("DecisionTree::VisitLeaf: Invalid NodePtr");
|
||||
for (auto&& branch : choice->branches()) (*this)(branch); // recurse!
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename Y>
|
||||
template <typename Func>
|
||||
void DecisionTree<L, Y>::visitLeaf(Func f) const {
|
||||
VisitLeaf<L, Y> visit(f);
|
||||
visit(root_);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
/**
|
||||
* Functor performing depth-first visit to each leaf with the leaf's
|
||||
* `Assignment<L>` and value passed as arguments.
|
||||
*
|
||||
* NOTE: Follows the same pruning semantics as `visit`.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
struct VisitWith {
|
||||
using F = std::function<void(const Assignment<L>&, const Y&)>;
|
||||
explicit VisitWith(F f) : f(f) {} ///< Construct from folding function.
|
||||
Assignment<L> assignment; ///< Assignment, mutating through recursion.
|
||||
F f; ///< folding function object.
|
||||
|
||||
/// Do a depth-first visit on the tree rooted at node.
|
||||
void operator()(const typename DecisionTree<L, Y>::NodePtr& node) {
|
||||
using Leaf = typename DecisionTree<L, Y>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const Leaf>(node))
|
||||
return f(assignment, leaf->constant());
|
||||
|
||||
using Choice = typename DecisionTree<L, Y>::Choice;
|
||||
auto choice = boost::dynamic_pointer_cast<const Choice>(node);
|
||||
if (!choice)
|
||||
throw std::invalid_argument("DecisionTree::VisitWith: Invalid NodePtr");
|
||||
for (size_t i = 0; i < choice->nrChoices(); i++) {
|
||||
assignment[choice->label()] = i; // Set assignment for label to i
|
||||
|
||||
(*this)(choice->branches()[i]); // recurse!
|
||||
|
||||
// Remove the choice so we are backtracking
|
||||
auto choice_it = assignment.find(choice->label());
|
||||
assignment.erase(choice_it);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename Y>
|
||||
template <typename Func>
|
||||
void DecisionTree<L, Y>::visitWith(Func f) const {
|
||||
VisitWith<L, Y> visit(f);
|
||||
visit(root_);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
size_t DecisionTree<L, Y>::nrLeaves() const {
|
||||
size_t total = 0;
|
||||
visit([&total](const Y& node) { total += 1; });
|
||||
return total;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
// fold is just done with a visit
|
||||
template <typename L, typename Y>
|
||||
template <typename Func, typename X>
|
||||
X DecisionTree<L, Y>::fold(Func f, X x0) const {
|
||||
visit([&](const Y& y) { x0 = f(y, x0); });
|
||||
return x0;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
/**
|
||||
* Get (partial) labels by performing a visit.
|
||||
*
|
||||
* This method performs a depth-first search to go to every leaf and records
|
||||
* the keys assignment which leads to that leaf. Since the tree can be pruned,
|
||||
* there might be a leaf at a lower depth which results in a partial
|
||||
* assignment (i.e. not all keys are specified).
|
||||
*
|
||||
* E.g. given a tree with 3 keys, there may be a branch where the 3rd key has
|
||||
* the same values for all the leaves. This leads to the branch being pruned
|
||||
* so we get a leaf which is arrived at by just the first 2 keys and their
|
||||
* assignments.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
std::set<L> DecisionTree<L, Y>::labels() const {
|
||||
std::set<L> unique;
|
||||
auto f = [&](const Assignment<L>& assignment, const Y&) {
|
||||
for (auto&& kv : assignment) {
|
||||
unique.insert(kv.first);
|
||||
}
|
||||
};
|
||||
visitWith(f);
|
||||
return unique;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
bool DecisionTree<L, Y>::equals(const DecisionTree& other,
|
||||
const CompareFunc& compare) const {
|
||||
return root_->equals(*other.root_, compare);
|
||||
}
|
||||
|
||||
template <typename L, typename Y>
|
||||
void DecisionTree<L, Y>::print(const std::string& s,
|
||||
const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter) const {
|
||||
root_->print(s, labelFormatter, valueFormatter);
|
||||
}
|
||||
|
||||
template<typename L, typename Y>
|
||||
|
@ -622,13 +866,36 @@ namespace gtsam {
|
|||
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y> DecisionTree<L, Y>::apply(const Unary& op) const {
|
||||
// It is unclear what should happen if tree is empty:
|
||||
if (empty()) {
|
||||
throw std::runtime_error(
|
||||
"DecisionTree::apply(unary op) undefined for empty tree.");
|
||||
}
|
||||
return DecisionTree(root_->apply(op));
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
/// Apply unary operator with assignment
|
||||
template <typename L, typename Y>
|
||||
DecisionTree<L, Y> DecisionTree<L, Y>::apply(
|
||||
const UnaryAssignment& op) const {
|
||||
// It is unclear what should happen if tree is empty:
|
||||
if (empty()) {
|
||||
throw std::runtime_error(
|
||||
"DecisionTree::apply(unary op) undefined for empty tree.");
|
||||
}
|
||||
Assignment<L> assignment;
|
||||
return DecisionTree(root_->apply(op, assignment));
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y> DecisionTree<L, Y>::apply(const DecisionTree& g,
|
||||
const Binary& op) const {
|
||||
// It is unclear what should happen if either tree is empty:
|
||||
if (empty() || g.empty()) {
|
||||
throw std::runtime_error(
|
||||
"DecisionTree::apply(binary op) undefined for empty trees.");
|
||||
}
|
||||
// apply the operaton on the root of both diagrams
|
||||
NodePtr h = root_->apply_f_op_g(*g.root_, op);
|
||||
// create a new class with the resulting root "h"
|
||||
|
@ -636,7 +903,7 @@ namespace gtsam {
|
|||
return result;
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
/****************************************************************************/
|
||||
// The way this works:
|
||||
// We have an ADT, picture it as a tree.
|
||||
// At a certain depth, we have a branch on "label".
|
||||
|
@ -656,25 +923,40 @@ namespace gtsam {
|
|||
return result;
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
void DecisionTree<L, Y>::dot(std::ostream& os, bool showZero) const {
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
void DecisionTree<L, Y>::dot(std::ostream& os,
|
||||
const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter,
|
||||
bool showZero) const {
|
||||
os << "digraph G {\n";
|
||||
root_->dot(os, showZero);
|
||||
root_->dot(os, labelFormatter, valueFormatter, showZero);
|
||||
os << " [ordering=out]}" << std::endl;
|
||||
}
|
||||
|
||||
template<typename L, typename Y>
|
||||
void DecisionTree<L, Y>::dot(const std::string& name, bool showZero) const {
|
||||
template <typename L, typename Y>
|
||||
void DecisionTree<L, Y>::dot(const std::string& name,
|
||||
const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter,
|
||||
bool showZero) const {
|
||||
std::ofstream os((name + ".dot").c_str());
|
||||
dot(os, showZero);
|
||||
int result = system(
|
||||
("dot -Tpdf " + name + ".dot -o " + name + ".pdf >& /dev/null").c_str());
|
||||
if (result==-1) throw std::runtime_error("DecisionTree::dot system call failed");
|
||||
}
|
||||
dot(os, labelFormatter, valueFormatter, showZero);
|
||||
int result =
|
||||
system(("dot -Tpdf " + name + ".dot -o " + name + ".pdf >& /dev/null")
|
||||
.c_str());
|
||||
if (result == -1)
|
||||
throw std::runtime_error("DecisionTree::dot system call failed");
|
||||
}
|
||||
|
||||
/*********************************************************************************/
|
||||
|
||||
} // namespace gtsam
|
||||
template <typename L, typename Y>
|
||||
std::string DecisionTree<L, Y>::dot(const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter,
|
||||
bool showZero) const {
|
||||
std::stringstream ss;
|
||||
dot(ss, labelFormatter, valueFormatter, showZero);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -19,12 +19,17 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/types.h>
|
||||
#include <gtsam/discrete/Assignment.h>
|
||||
|
||||
#include <boost/function.hpp>
|
||||
#include <functional>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace gtsam {
|
||||
|
@ -36,24 +41,32 @@ namespace gtsam {
|
|||
*/
|
||||
template<typename L, typename Y>
|
||||
class DecisionTree {
|
||||
protected:
|
||||
/// Default method for comparison of two objects of type Y.
|
||||
static bool DefaultCompare(const Y& a, const Y& b) {
|
||||
return a == b;
|
||||
}
|
||||
|
||||
public:
|
||||
using LabelFormatter = std::function<std::string(L)>;
|
||||
using ValueFormatter = std::function<std::string(Y)>;
|
||||
using CompareFunc = std::function<bool(const Y&, const Y&)>;
|
||||
|
||||
/** Handy typedefs for unary and binary function types */
|
||||
typedef std::function<Y(const Y&)> Unary;
|
||||
typedef std::function<Y(const Y&, const Y&)> Binary;
|
||||
using Unary = std::function<Y(const Y&)>;
|
||||
using UnaryAssignment = std::function<Y(const Assignment<L>&, const Y&)>;
|
||||
using Binary = std::function<Y(const Y&, const Y&)>;
|
||||
|
||||
/** A label annotated with cardinality */
|
||||
typedef std::pair<L,size_t> LabelC;
|
||||
using LabelC = std::pair<L, size_t>;
|
||||
|
||||
/** DTs consist of Leaf and Choice nodes, both subclasses of Node */
|
||||
class Leaf;
|
||||
class Choice;
|
||||
struct Leaf;
|
||||
struct Choice;
|
||||
|
||||
/** ------------------------ Node base class --------------------------- */
|
||||
class Node {
|
||||
public:
|
||||
typedef boost::shared_ptr<const Node> Ptr;
|
||||
struct Node {
|
||||
using Ptr = boost::shared_ptr<const Node>;
|
||||
|
||||
#ifdef DT_DEBUG_MEMORY
|
||||
static int nrNodes;
|
||||
|
@ -62,14 +75,16 @@ namespace gtsam {
|
|||
// Constructor
|
||||
Node() {
|
||||
#ifdef DT_DEBUG_MEMORY
|
||||
std::cout << ++nrNodes << " constructed " << id() << std::endl; std::cout.flush();
|
||||
std::cout << ++nrNodes << " constructed " << id() << std::endl;
|
||||
std::cout.flush();
|
||||
#endif
|
||||
}
|
||||
|
||||
// Destructor
|
||||
virtual ~Node() {
|
||||
#ifdef DT_DEBUG_MEMORY
|
||||
std::cout << --nrNodes << " destructed " << id() << std::endl; std::cout.flush();
|
||||
std::cout << --nrNodes << " destructed " << id() << std::endl;
|
||||
std::cout.flush();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -77,13 +92,20 @@ namespace gtsam {
|
|||
const void* id() const { return this; }
|
||||
|
||||
// everything else is virtual, no documentation here as internal
|
||||
virtual void print(const std::string& s = "") const = 0;
|
||||
virtual void dot(std::ostream& os, bool showZero) const = 0;
|
||||
virtual void print(const std::string& s,
|
||||
const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter) const = 0;
|
||||
virtual void dot(std::ostream& os, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter,
|
||||
bool showZero) const = 0;
|
||||
virtual bool sameLeaf(const Leaf& q) const = 0;
|
||||
virtual bool sameLeaf(const Node& q) const = 0;
|
||||
virtual bool equals(const Node& other, double tol = 1e-9) const = 0;
|
||||
virtual bool equals(const Node& other, const CompareFunc& compare =
|
||||
&DefaultCompare) const = 0;
|
||||
virtual const Y& operator()(const Assignment<L>& x) const = 0;
|
||||
virtual Ptr apply(const Unary& op) const = 0;
|
||||
virtual Ptr apply(const UnaryAssignment& op,
|
||||
const Assignment<L>& assignment) const = 0;
|
||||
virtual Ptr apply_f_op_g(const Node&, const Binary&) const = 0;
|
||||
virtual Ptr apply_g_op_fL(const Leaf&, const Binary&) const = 0;
|
||||
virtual Ptr apply_g_op_fC(const Choice&, const Binary&) const = 0;
|
||||
|
@ -93,36 +115,45 @@ namespace gtsam {
|
|||
/** ------------------------ Node base class --------------------------- */
|
||||
|
||||
public:
|
||||
|
||||
/** A function is a shared pointer to the root of a DT */
|
||||
typedef typename Node::Ptr NodePtr;
|
||||
using NodePtr = typename Node::Ptr;
|
||||
|
||||
/* a DecisionTree just contains the root */
|
||||
/// A DecisionTree just contains the root. TODO(dellaert): make protected.
|
||||
NodePtr root_;
|
||||
|
||||
protected:
|
||||
|
||||
/** Internal recursive function to create from keys, cardinalities, and Y values */
|
||||
/** Internal recursive function to create from keys, cardinalities,
|
||||
* and Y values
|
||||
*/
|
||||
template<typename It, typename ValueIt>
|
||||
NodePtr create(It begin, It end, ValueIt beginY, ValueIt endY) const;
|
||||
|
||||
/** Convert to a different type */
|
||||
template<typename M, typename X> NodePtr
|
||||
convert(const typename DecisionTree<M, X>::NodePtr& f, const std::map<M,
|
||||
L>& map, std::function<Y(const X&)> op);
|
||||
|
||||
/** Default constructor */
|
||||
DecisionTree();
|
||||
/**
|
||||
* @brief Convert from a DecisionTree<M, X> to DecisionTree<L, Y>.
|
||||
*
|
||||
* @tparam M The previous label type.
|
||||
* @tparam X The previous value type.
|
||||
* @param f The node pointer to the root of the previous DecisionTree.
|
||||
* @param L_of_M Functor to convert from label type M to type L.
|
||||
* @param Y_of_X Functor to convert from value type X to type Y.
|
||||
* @return NodePtr
|
||||
*/
|
||||
template <typename M, typename X>
|
||||
NodePtr convertFrom(const typename DecisionTree<M, X>::NodePtr& f,
|
||||
std::function<L(const M&)> L_of_M,
|
||||
std::function<Y(const X&)> Y_of_X) const;
|
||||
|
||||
public:
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/** Create a constant */
|
||||
DecisionTree(const Y& y);
|
||||
/** Default constructor (for serialization) */
|
||||
DecisionTree();
|
||||
|
||||
/** Create a new leaf function splitting on a variable */
|
||||
/** Create a constant */
|
||||
explicit DecisionTree(const Y& y);
|
||||
|
||||
/// Create tree with 2 assignments `y1`, `y2`, splitting on variable `label`
|
||||
DecisionTree(const L& label, const Y& y1, const Y& y2);
|
||||
|
||||
/** Allow Label+Cardinality for convenience */
|
||||
|
@ -139,31 +170,60 @@ namespace gtsam {
|
|||
DecisionTree(Iterator begin, Iterator end, const L& label);
|
||||
|
||||
/** Create DecisionTree from two others */
|
||||
DecisionTree(const L& label, //
|
||||
const DecisionTree& f0, const DecisionTree& f1);
|
||||
DecisionTree(const L& label, const DecisionTree& f0,
|
||||
const DecisionTree& f1);
|
||||
|
||||
/** Convert from a different type */
|
||||
template<typename M, typename X>
|
||||
DecisionTree(const DecisionTree<M, X>& other,
|
||||
const std::map<M, L>& map, std::function<Y(const X&)> op);
|
||||
/**
|
||||
* @brief Convert from a different value type.
|
||||
*
|
||||
* @tparam X The previous value type.
|
||||
* @param other The DecisionTree to convert from.
|
||||
* @param Y_of_X Functor to convert from value type X to type Y.
|
||||
*/
|
||||
template <typename X, typename Func>
|
||||
DecisionTree(const DecisionTree<L, X>& other, Func Y_of_X);
|
||||
|
||||
/**
|
||||
* @brief Convert from a different value type X to value type Y, also transate
|
||||
* labels via map from type M to L.
|
||||
*
|
||||
* @tparam M Previous label type.
|
||||
* @tparam X Previous value type.
|
||||
* @param other The decision tree to convert.
|
||||
* @param L_of_M Map from label type M to type L.
|
||||
* @param Y_of_X Functor to convert from type X to type Y.
|
||||
*/
|
||||
template <typename M, typename X, typename Func>
|
||||
DecisionTree(const DecisionTree<M, X>& other, const std::map<M, L>& map,
|
||||
Func Y_of_X);
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/** GTSAM-style print */
|
||||
void print(const std::string& s = "DecisionTree") const;
|
||||
/**
|
||||
* @brief GTSAM-style print
|
||||
*
|
||||
* @param s Prefix string.
|
||||
* @param labelFormatter Functor to format the node label.
|
||||
* @param valueFormatter Functor to format the node value.
|
||||
*/
|
||||
void print(const std::string& s, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter) const;
|
||||
|
||||
// Testable
|
||||
bool equals(const DecisionTree& other, double tol = 1e-9) const;
|
||||
bool equals(const DecisionTree& other,
|
||||
const CompareFunc& compare = &DefaultCompare) const;
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/** Make virtual */
|
||||
virtual ~DecisionTree() {
|
||||
}
|
||||
/// Make virtual
|
||||
virtual ~DecisionTree() {}
|
||||
|
||||
/// Check if tree is empty.
|
||||
bool empty() const { return !root_; }
|
||||
|
||||
/** equality */
|
||||
bool operator==(const DecisionTree& q) const;
|
||||
|
@ -171,9 +231,94 @@ namespace gtsam {
|
|||
/** evaluate */
|
||||
const Y& operator()(const Assignment<L>& x) const;
|
||||
|
||||
/**
|
||||
* @brief Visit all leaves in depth-first fashion.
|
||||
*
|
||||
* @param f (side-effect) Function taking a value.
|
||||
*
|
||||
* @note Due to pruning, the number of leaves may not be the same as the
|
||||
* number of assignments. E.g. if we have a tree on 2 binary variables with
|
||||
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
|
||||
*
|
||||
* Example:
|
||||
* int sum = 0;
|
||||
* auto visitor = [&](int y) { sum += y; };
|
||||
* tree.visitWith(visitor);
|
||||
*/
|
||||
template <typename Func>
|
||||
void visit(Func f) const;
|
||||
|
||||
/**
|
||||
* @brief Visit all leaves in depth-first fashion.
|
||||
*
|
||||
* @param f (side-effect) Function taking the leaf node pointer.
|
||||
*
|
||||
* @note Due to pruning, the number of leaves may not be the same as the
|
||||
* number of assignments. E.g. if we have a tree on 2 binary variables with
|
||||
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
|
||||
*
|
||||
* Example:
|
||||
* int sum = 0;
|
||||
* auto visitor = [&](int y) { sum += y; };
|
||||
* tree.visitWith(visitor);
|
||||
*/
|
||||
template <typename Func>
|
||||
void visitLeaf(Func f) const;
|
||||
|
||||
/**
|
||||
* @brief Visit all leaves in depth-first fashion.
|
||||
*
|
||||
* @param f (side-effect) Function taking an assignment and a value.
|
||||
*
|
||||
* @note Due to pruning, the number of leaves may not be the same as the
|
||||
* number of assignments. E.g. if we have a tree on 2 binary variables with
|
||||
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
|
||||
*
|
||||
* Example:
|
||||
* int sum = 0;
|
||||
* auto visitor = [&](const Assignment<L>& assignment, int y) { sum += y; };
|
||||
* tree.visitWith(visitor);
|
||||
*/
|
||||
template <typename Func>
|
||||
void visitWith(Func f) const;
|
||||
|
||||
/// Return the number of leaves in the tree.
|
||||
size_t nrLeaves() const;
|
||||
|
||||
/**
|
||||
* @brief Fold a binary function over the tree, returning accumulator.
|
||||
*
|
||||
* @tparam X type for accumulator.
|
||||
* @param f binary function: Y * X -> X returning an updated accumulator.
|
||||
* @param x0 initial value for accumulator.
|
||||
* @return X final value for accumulator.
|
||||
*
|
||||
* @note X is always passed by value.
|
||||
* @note Due to pruning, leaves might not exhaust choices.
|
||||
*
|
||||
* Example:
|
||||
* auto add = [](const double& y, double x) { return y + x; };
|
||||
* double sum = tree.fold(add, 0.0);
|
||||
*/
|
||||
template <typename Func, typename X>
|
||||
X fold(Func f, X x0) const;
|
||||
|
||||
/** Retrieve all unique labels as a set. */
|
||||
std::set<L> labels() const;
|
||||
|
||||
/** apply Unary operation "op" to f */
|
||||
DecisionTree apply(const Unary& op) const;
|
||||
|
||||
/**
|
||||
* @brief Apply Unary operation "op" to f while also providing the
|
||||
* corresponding assignment.
|
||||
*
|
||||
* @param op Function which takes Assignment<L> and Y as input and returns
|
||||
* object of type Y.
|
||||
* @return DecisionTree
|
||||
*/
|
||||
DecisionTree apply(const UnaryAssignment& op) const;
|
||||
|
||||
/** apply binary operation "op" to f and g */
|
||||
DecisionTree apply(const DecisionTree& g, const Binary& op) const;
|
||||
|
||||
|
@ -185,7 +330,8 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** combine subtrees on key with binary operation "op" */
|
||||
DecisionTree combine(const L& label, size_t cardinality, const Binary& op) const;
|
||||
DecisionTree combine(const L& label, size_t cardinality,
|
||||
const Binary& op) const;
|
||||
|
||||
/** combine with LabelC for convenience */
|
||||
DecisionTree combine(const LabelC& labelC, const Binary& op) const {
|
||||
|
@ -193,38 +339,68 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** output to graphviz format, stream version */
|
||||
void dot(std::ostream& os, bool showZero = true) const;
|
||||
void dot(std::ostream& os, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter, bool showZero = true) const;
|
||||
|
||||
/** output to graphviz format, open a file */
|
||||
void dot(const std::string& name, bool showZero = true) const;
|
||||
void dot(const std::string& name, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter, bool showZero = true) const;
|
||||
|
||||
/** output to graphviz format string */
|
||||
std::string dot(const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter,
|
||||
bool showZero = true) const;
|
||||
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
||||
// internal use only
|
||||
DecisionTree(const NodePtr& root);
|
||||
explicit DecisionTree(const NodePtr& root);
|
||||
|
||||
// internal use only
|
||||
template<typename Iterator> NodePtr
|
||||
compose(Iterator begin, Iterator end, const L& label) const;
|
||||
|
||||
/// @}
|
||||
|
||||
}; // DecisionTree
|
||||
|
||||
/** free versions of apply */
|
||||
|
||||
template<typename Y, typename L>
|
||||
/// Apply unary operator `op` to DecisionTree `f`.
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y> apply(const DecisionTree<L, Y>& f,
|
||||
const typename DecisionTree<L, Y>::Unary& op) {
|
||||
return f.apply(op);
|
||||
}
|
||||
|
||||
template<typename Y, typename L>
|
||||
/// Apply unary operator `op` with Assignment to DecisionTree `f`.
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y> apply(const DecisionTree<L, Y>& f,
|
||||
const typename DecisionTree<L, Y>::UnaryAssignment& op) {
|
||||
return f.apply(op);
|
||||
}
|
||||
|
||||
/// Apply binary operator `op` to DecisionTree `f`.
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y> apply(const DecisionTree<L, Y>& f,
|
||||
const DecisionTree<L, Y>& g,
|
||||
const typename DecisionTree<L, Y>::Binary& op) {
|
||||
return f.apply(g, op);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief unzip a DecisionTree with `std::pair` values.
|
||||
*
|
||||
* @param input the DecisionTree with `(T1,T2)` values.
|
||||
* @return a pair of DecisionTree on T1 and T2, respectively.
|
||||
*/
|
||||
template <typename L, typename T1, typename T2>
|
||||
std::pair<DecisionTree<L, T1>, DecisionTree<L, T2> > unzip(
|
||||
const DecisionTree<L, std::pair<T1, T2> >& input) {
|
||||
return std::make_pair(
|
||||
DecisionTree<L, T1>(input, [](std::pair<T1, T2> i) { return i.first; }),
|
||||
DecisionTree<L, T2>(input,
|
||||
[](std::pair<T1, T2> i) { return i.second; }));
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -17,74 +17,90 @@
|
|||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/base/FastSet.h>
|
||||
#include <gtsam/discrete/DecisionTreeFactor.h>
|
||||
#include <gtsam/discrete/DiscreteConditional.h>
|
||||
#include <gtsam/base/FastSet.h>
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include <utility>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ******************************************************************************** */
|
||||
DecisionTreeFactor::DecisionTreeFactor() {
|
||||
}
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor::DecisionTreeFactor() {}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor::DecisionTreeFactor(const DiscreteKeys& keys,
|
||||
const ADT& potentials) :
|
||||
DiscreteFactor(keys.indices()), Potentials(keys, potentials) {
|
||||
}
|
||||
const ADT& potentials)
|
||||
: DiscreteFactor(keys.indices()),
|
||||
ADT(potentials),
|
||||
cardinalities_(keys.cardinalities()) {}
|
||||
|
||||
/* *************************************************************************/
|
||||
DecisionTreeFactor::DecisionTreeFactor(const DiscreteConditional& c) :
|
||||
DiscreteFactor(c.keys()), Potentials(c) {
|
||||
}
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor::DecisionTreeFactor(const DiscreteConditional& c)
|
||||
: DiscreteFactor(c.keys()),
|
||||
AlgebraicDecisionTree<Key>(c),
|
||||
cardinalities_(c.cardinalities_) {}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool DecisionTreeFactor::equals(const DiscreteFactor& other, double tol) const {
|
||||
if(!dynamic_cast<const DecisionTreeFactor*>(&other)) {
|
||||
/* ************************************************************************ */
|
||||
bool DecisionTreeFactor::equals(const DiscreteFactor& other,
|
||||
double tol) const {
|
||||
if (!dynamic_cast<const DecisionTreeFactor*>(&other)) {
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
const DecisionTreeFactor& f(static_cast<const DecisionTreeFactor&>(other));
|
||||
return Potentials::equals(f, tol);
|
||||
} else {
|
||||
const auto& f(static_cast<const DecisionTreeFactor&>(other));
|
||||
return ADT::equals(f, tol);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************ */
|
||||
double DecisionTreeFactor::safe_div(const double& a, const double& b) {
|
||||
// The use for safe_div is when we divide the product factor by the sum
|
||||
// factor. If the product or sum is zero, we accord zero probability to the
|
||||
// event.
|
||||
return (a == 0 || b == 0) ? 0 : (a / b);
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
void DecisionTreeFactor::print(const string& s,
|
||||
const KeyFormatter& formatter) const {
|
||||
cout << s;
|
||||
Potentials::print("Potentials:",formatter);
|
||||
cout << " f[";
|
||||
for (auto&& key : keys())
|
||||
cout << boost::format(" (%1%,%2%),") % formatter(key) % cardinality(key);
|
||||
cout << " ]" << endl;
|
||||
ADT::print("", formatter);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor DecisionTreeFactor::apply(const DecisionTreeFactor& f,
|
||||
ADT::Binary op) const {
|
||||
map<Key,size_t> cs; // new cardinalities
|
||||
map<Key, size_t> cs; // new cardinalities
|
||||
// make unique key-cardinality map
|
||||
for(Key j: keys()) cs[j] = cardinality(j);
|
||||
for(Key j: f.keys()) cs[j] = f.cardinality(j);
|
||||
for (Key j : keys()) cs[j] = cardinality(j);
|
||||
for (Key j : f.keys()) cs[j] = f.cardinality(j);
|
||||
// Convert map into keys
|
||||
DiscreteKeys keys;
|
||||
for(const std::pair<const Key,size_t>& key: cs)
|
||||
keys.push_back(key);
|
||||
for (const std::pair<const Key, size_t>& key : cs) keys.push_back(key);
|
||||
// apply operand
|
||||
ADT result = ADT::apply(f, op);
|
||||
// Make a new factor
|
||||
return DecisionTreeFactor(keys, result);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor::shared_ptr DecisionTreeFactor::combine(size_t nrFrontals,
|
||||
ADT::Binary op) const {
|
||||
|
||||
if (nrFrontals > size()) throw invalid_argument(
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor::shared_ptr DecisionTreeFactor::combine(
|
||||
size_t nrFrontals, ADT::Binary op) const {
|
||||
if (nrFrontals > size())
|
||||
throw invalid_argument(
|
||||
(boost::format(
|
||||
"DecisionTreeFactor::combine: invalid number of frontal keys %d, nr.keys=%d")
|
||||
% nrFrontals % size()).str());
|
||||
"DecisionTreeFactor::combine: invalid number of frontal "
|
||||
"keys %d, nr.keys=%d") %
|
||||
nrFrontals % size())
|
||||
.str());
|
||||
|
||||
// sum over nrFrontals keys
|
||||
size_t i;
|
||||
|
@ -98,20 +114,21 @@ namespace gtsam {
|
|||
DiscreteKeys dkeys;
|
||||
for (; i < keys().size(); i++) {
|
||||
Key j = keys()[i];
|
||||
dkeys.push_back(DiscreteKey(j,cardinality(j)));
|
||||
dkeys.push_back(DiscreteKey(j, cardinality(j)));
|
||||
}
|
||||
return boost::make_shared<DecisionTreeFactor>(dkeys, result);
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor::shared_ptr DecisionTreeFactor::combine(const Ordering& frontalKeys,
|
||||
ADT::Binary op) const {
|
||||
|
||||
if (frontalKeys.size() > size()) throw invalid_argument(
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor::shared_ptr DecisionTreeFactor::combine(
|
||||
const Ordering& frontalKeys, ADT::Binary op) const {
|
||||
if (frontalKeys.size() > size())
|
||||
throw invalid_argument(
|
||||
(boost::format(
|
||||
"DecisionTreeFactor::combine: invalid number of frontal keys %d, nr.keys=%d")
|
||||
% frontalKeys.size() % size()).str());
|
||||
"DecisionTreeFactor::combine: invalid number of frontal "
|
||||
"keys %d, nr.keys=%d") %
|
||||
frontalKeys.size() % size())
|
||||
.str());
|
||||
|
||||
// sum over nrFrontals keys
|
||||
size_t i;
|
||||
|
@ -122,17 +139,190 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
// create new factor, note we collect keys that are not in frontalKeys
|
||||
// TODO: why do we need this??? result should contain correct keys!!!
|
||||
// TODO(frank): why do we need this??? result should contain correct keys!!!
|
||||
DiscreteKeys dkeys;
|
||||
for (i = 0; i < keys().size(); i++) {
|
||||
Key j = keys()[i];
|
||||
// TODO: inefficient!
|
||||
if (std::find(frontalKeys.begin(), frontalKeys.end(), j) != frontalKeys.end())
|
||||
// TODO(frank): inefficient!
|
||||
if (std::find(frontalKeys.begin(), frontalKeys.end(), j) !=
|
||||
frontalKeys.end())
|
||||
continue;
|
||||
dkeys.push_back(DiscreteKey(j,cardinality(j)));
|
||||
dkeys.push_back(DiscreteKey(j, cardinality(j)));
|
||||
}
|
||||
return boost::make_shared<DecisionTreeFactor>(dkeys, result);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************ */
|
||||
std::vector<std::pair<DiscreteValues, double>> DecisionTreeFactor::enumerate()
|
||||
const {
|
||||
// Get all possible assignments
|
||||
std::vector<std::pair<Key, size_t>> pairs = discreteKeys();
|
||||
// Reverse to make cartesian product output a more natural ordering.
|
||||
std::vector<std::pair<Key, size_t>> rpairs(pairs.rbegin(), pairs.rend());
|
||||
const auto assignments = DiscreteValues::CartesianProduct(rpairs);
|
||||
|
||||
// Construct unordered_map with values
|
||||
std::vector<std::pair<DiscreteValues, double>> result;
|
||||
for (const auto& assignment : assignments) {
|
||||
result.emplace_back(assignment, operator()(assignment));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
DiscreteKeys DecisionTreeFactor::discreteKeys() const {
|
||||
DiscreteKeys result;
|
||||
for (auto&& key : keys()) {
|
||||
DiscreteKey dkey(key, cardinality(key));
|
||||
if (std::find(result.begin(), result.end(), dkey) == result.end()) {
|
||||
result.push_back(dkey);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
static std::string valueFormatter(const double& v) {
|
||||
return (boost::format("%4.2g") % v).str();
|
||||
}
|
||||
|
||||
/** output to graphviz format, stream version */
|
||||
void DecisionTreeFactor::dot(std::ostream& os,
|
||||
const KeyFormatter& keyFormatter,
|
||||
bool showZero) const {
|
||||
ADT::dot(os, keyFormatter, valueFormatter, showZero);
|
||||
}
|
||||
|
||||
/** output to graphviz format, open a file */
|
||||
void DecisionTreeFactor::dot(const std::string& name,
|
||||
const KeyFormatter& keyFormatter,
|
||||
bool showZero) const {
|
||||
ADT::dot(name, keyFormatter, valueFormatter, showZero);
|
||||
}
|
||||
|
||||
/** output to graphviz format string */
|
||||
std::string DecisionTreeFactor::dot(const KeyFormatter& keyFormatter,
|
||||
bool showZero) const {
|
||||
return ADT::dot(keyFormatter, valueFormatter, showZero);
|
||||
}
|
||||
|
||||
// Print out header.
|
||||
/* ************************************************************************ */
|
||||
string DecisionTreeFactor::markdown(const KeyFormatter& keyFormatter,
|
||||
const Names& names) const {
|
||||
stringstream ss;
|
||||
|
||||
// Print out header.
|
||||
ss << "|";
|
||||
for (auto& key : keys()) {
|
||||
ss << keyFormatter(key) << "|";
|
||||
}
|
||||
ss << "value|\n";
|
||||
|
||||
// Print out separator with alignment hints.
|
||||
ss << "|";
|
||||
for (size_t j = 0; j < size(); j++) ss << ":-:|";
|
||||
ss << ":-:|\n";
|
||||
|
||||
// Print out all rows.
|
||||
auto rows = enumerate();
|
||||
for (const auto& kv : rows) {
|
||||
ss << "|";
|
||||
auto assignment = kv.first;
|
||||
for (auto& key : keys()) {
|
||||
size_t index = assignment.at(key);
|
||||
ss << DiscreteValues::Translate(names, key, index) << "|";
|
||||
}
|
||||
ss << kv.second << "|\n";
|
||||
}
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
string DecisionTreeFactor::html(const KeyFormatter& keyFormatter,
|
||||
const Names& names) const {
|
||||
stringstream ss;
|
||||
|
||||
// Print out preamble.
|
||||
ss << "<div>\n<table class='DecisionTreeFactor'>\n <thead>\n";
|
||||
|
||||
// Print out header row.
|
||||
ss << " <tr>";
|
||||
for (auto& key : keys()) {
|
||||
ss << "<th>" << keyFormatter(key) << "</th>";
|
||||
}
|
||||
ss << "<th>value</th></tr>\n";
|
||||
|
||||
// Finish header and start body.
|
||||
ss << " </thead>\n <tbody>\n";
|
||||
|
||||
// Print out all rows.
|
||||
auto rows = enumerate();
|
||||
for (const auto& kv : rows) {
|
||||
ss << " <tr>";
|
||||
auto assignment = kv.first;
|
||||
for (auto& key : keys()) {
|
||||
size_t index = assignment.at(key);
|
||||
ss << "<th>" << DiscreteValues::Translate(names, key, index) << "</th>";
|
||||
}
|
||||
ss << "<td>" << kv.second << "</td>"; // value
|
||||
ss << "</tr>\n";
|
||||
}
|
||||
ss << " </tbody>\n</table>\n</div>";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor::DecisionTreeFactor(const DiscreteKeys& keys,
|
||||
const vector<double>& table)
|
||||
: DiscreteFactor(keys.indices()),
|
||||
AlgebraicDecisionTree<Key>(keys, table),
|
||||
cardinalities_(keys.cardinalities()) {}
|
||||
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor::DecisionTreeFactor(const DiscreteKeys& keys,
|
||||
const string& table)
|
||||
: DiscreteFactor(keys.indices()),
|
||||
AlgebraicDecisionTree<Key>(keys, table),
|
||||
cardinalities_(keys.cardinalities()) {}
|
||||
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor DecisionTreeFactor::prune(size_t maxNrAssignments) const {
|
||||
const size_t N = maxNrAssignments;
|
||||
|
||||
// Get the probabilities in the decision tree so we can threshold.
|
||||
std::vector<double> probabilities;
|
||||
this->visitLeaf([&](const Leaf& leaf) {
|
||||
size_t nrAssignments = leaf.nrAssignments();
|
||||
double prob = leaf.constant();
|
||||
probabilities.insert(probabilities.end(), nrAssignments, prob);
|
||||
});
|
||||
|
||||
// The number of probabilities can be lower than max_leaves
|
||||
if (probabilities.size() <= N) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::sort(probabilities.begin(), probabilities.end(),
|
||||
std::greater<double>{});
|
||||
|
||||
double threshold = probabilities[N - 1];
|
||||
|
||||
// Now threshold the decision tree
|
||||
size_t total = 0;
|
||||
auto thresholdFunc = [threshold, &total, N](const double& value) {
|
||||
if (value < threshold || total >= N) {
|
||||
return 0.0;
|
||||
} else {
|
||||
total += 1;
|
||||
return value;
|
||||
}
|
||||
};
|
||||
DecisionTree<Key, double> thresholded(*this, thresholdFunc);
|
||||
|
||||
// Create pruned decision tree factor and return.
|
||||
return DecisionTreeFactor(this->discreteKeys(), thresholded);
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -18,15 +18,18 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/discrete/AlgebraicDecisionTree.h>
|
||||
#include <gtsam/discrete/DiscreteFactor.h>
|
||||
#include <gtsam/discrete/Potentials.h>
|
||||
#include <gtsam/discrete/DiscreteKey.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
#include <vector>
|
||||
#include <exception>
|
||||
#include <map>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -35,34 +38,46 @@ namespace gtsam {
|
|||
/**
|
||||
* A discrete probabilistic factor
|
||||
*/
|
||||
class GTSAM_EXPORT DecisionTreeFactor: public DiscreteFactor, public Potentials {
|
||||
|
||||
class GTSAM_EXPORT DecisionTreeFactor : public DiscreteFactor,
|
||||
public AlgebraicDecisionTree<Key> {
|
||||
public:
|
||||
|
||||
// typedefs needed to play nice with gtsam
|
||||
typedef DecisionTreeFactor This;
|
||||
typedef DiscreteFactor Base; ///< Typedef to base class
|
||||
typedef boost::shared_ptr<DecisionTreeFactor> shared_ptr;
|
||||
typedef AlgebraicDecisionTree<Key> ADT;
|
||||
|
||||
protected:
|
||||
std::map<Key, size_t> cardinalities_;
|
||||
|
||||
public:
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/** Default constructor for I/O */
|
||||
DecisionTreeFactor();
|
||||
|
||||
/** Constructor from Indices, Ordering, and AlgebraicDecisionDiagram */
|
||||
/** Constructor from DiscreteKeys and AlgebraicDecisionTree */
|
||||
DecisionTreeFactor(const DiscreteKeys& keys, const ADT& potentials);
|
||||
|
||||
/** Constructor from Indices and (string or doubles) */
|
||||
template<class SOURCE>
|
||||
DecisionTreeFactor(const DiscreteKeys& keys, SOURCE table) :
|
||||
DiscreteFactor(keys.indices()), Potentials(keys, table) {
|
||||
}
|
||||
/** Constructor from doubles */
|
||||
DecisionTreeFactor(const DiscreteKeys& keys,
|
||||
const std::vector<double>& table);
|
||||
|
||||
/** Constructor from string */
|
||||
DecisionTreeFactor(const DiscreteKeys& keys, const std::string& table);
|
||||
|
||||
/// Single-key specialization
|
||||
template <class SOURCE>
|
||||
DecisionTreeFactor(const DiscreteKey& key, SOURCE table)
|
||||
: DecisionTreeFactor(DiscreteKeys{key}, table) {}
|
||||
|
||||
/// Single-key specialization, with vector of doubles.
|
||||
DecisionTreeFactor(const DiscreteKey& key, const std::vector<double>& row)
|
||||
: DecisionTreeFactor(DiscreteKeys{key}, row) {}
|
||||
|
||||
/** Construct from a DiscreteConditional type */
|
||||
DecisionTreeFactor(const DiscreteConditional& c);
|
||||
explicit DecisionTreeFactor(const DiscreteConditional& c);
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
|
@ -72,7 +87,8 @@ namespace gtsam {
|
|||
bool equals(const DiscreteFactor& other, double tol = 1e-9) const override;
|
||||
|
||||
// print
|
||||
void print(const std::string& s = "DecisionTreeFactor:\n",
|
||||
void print(
|
||||
const std::string& s = "DecisionTreeFactor:\n",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/// @}
|
||||
|
@ -80,8 +96,8 @@ namespace gtsam {
|
|||
/// @{
|
||||
|
||||
/// Value is just look up in AlgebraicDecisonTree
|
||||
double operator()(const Values& values) const override {
|
||||
return Potentials::operator()(values);
|
||||
double operator()(const DiscreteValues& values) const override {
|
||||
return ADT::operator()(values);
|
||||
}
|
||||
|
||||
/// multiply two factors
|
||||
|
@ -89,15 +105,17 @@ namespace gtsam {
|
|||
return apply(f, ADT::Ring::mul);
|
||||
}
|
||||
|
||||
static double safe_div(const double& a, const double& b);
|
||||
|
||||
size_t cardinality(Key j) const { return cardinalities_.at(j); }
|
||||
|
||||
/// divide by factor f (safely)
|
||||
DecisionTreeFactor operator/(const DecisionTreeFactor& f) const {
|
||||
return apply(f, safe_div);
|
||||
}
|
||||
|
||||
/// Convert into a decisiontree
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override {
|
||||
return *this;
|
||||
}
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override { return *this; }
|
||||
|
||||
/// Create new factor by summing all values with the same separator values
|
||||
shared_ptr sum(size_t nrFrontals) const {
|
||||
|
@ -109,11 +127,16 @@ namespace gtsam {
|
|||
return combine(keys, ADT::Ring::add);
|
||||
}
|
||||
|
||||
/// Create new factor by maximizing over all values with the same separator values
|
||||
/// Create new factor by maximizing over all values with the same separator.
|
||||
shared_ptr max(size_t nrFrontals) const {
|
||||
return combine(nrFrontals, ADT::Ring::max);
|
||||
}
|
||||
|
||||
/// Create new factor by maximizing over all values with the same separator.
|
||||
shared_ptr max(const Ordering& keys) const {
|
||||
return combine(keys, ADT::Ring::max);
|
||||
}
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
@ -121,14 +144,14 @@ namespace gtsam {
|
|||
/**
|
||||
* Apply binary operator (*this) "op" f
|
||||
* @param f the second argument for op
|
||||
* @param op a binary operator that operates on AlgebraicDecisionDiagram potentials
|
||||
* @param op a binary operator that operates on AlgebraicDecisionTree
|
||||
*/
|
||||
DecisionTreeFactor apply(const DecisionTreeFactor& f, ADT::Binary op) const;
|
||||
|
||||
/**
|
||||
* Combine frontal variables using binary operator "op"
|
||||
* @param nrFrontals nr. of frontal to combine variables in this factor
|
||||
* @param op a binary operator that operates on AlgebraicDecisionDiagram potentials
|
||||
* @param op a binary operator that operates on AlgebraicDecisionTree
|
||||
* @return shared pointer to newly created DecisionTreeFactor
|
||||
*/
|
||||
shared_ptr combine(size_t nrFrontals, ADT::Binary op) const;
|
||||
|
@ -136,37 +159,80 @@ namespace gtsam {
|
|||
/**
|
||||
* Combine frontal variables in an Ordering using binary operator "op"
|
||||
* @param nrFrontals nr. of frontal to combine variables in this factor
|
||||
* @param op a binary operator that operates on AlgebraicDecisionDiagram potentials
|
||||
* @param op a binary operator that operates on AlgebraicDecisionTree
|
||||
* @return shared pointer to newly created DecisionTreeFactor
|
||||
*/
|
||||
shared_ptr combine(const Ordering& keys, ADT::Binary op) const;
|
||||
|
||||
/// Enumerate all values into a map from values to double.
|
||||
std::vector<std::pair<DiscreteValues, double>> enumerate() const;
|
||||
|
||||
// /**
|
||||
// * @brief Permutes the keys in Potentials and DiscreteFactor
|
||||
// *
|
||||
// * This re-implements the permuteWithInverse() in both Potentials
|
||||
// * and DiscreteFactor by doing both of them together.
|
||||
// */
|
||||
//
|
||||
// void permuteWithInverse(const Permutation& inversePermutation){
|
||||
// DiscreteFactor::permuteWithInverse(inversePermutation);
|
||||
// Potentials::permuteWithInverse(inversePermutation);
|
||||
// }
|
||||
//
|
||||
// /**
|
||||
// * Apply a reduction, which is a remapping of variable indices.
|
||||
// */
|
||||
// virtual void reduceWithInverse(const internal::Reduction& inverseReduction) {
|
||||
// DiscreteFactor::reduceWithInverse(inverseReduction);
|
||||
// Potentials::reduceWithInverse(inverseReduction);
|
||||
// }
|
||||
/// Return all the discrete keys associated with this factor.
|
||||
DiscreteKeys discreteKeys() const;
|
||||
|
||||
/**
|
||||
* @brief Prune the decision tree of discrete variables.
|
||||
*
|
||||
* Pruning will set the leaves to be "pruned" to 0 indicating a 0
|
||||
* probability. An assignment is pruned if it is not in the top
|
||||
* `maxNrAssignments` values.
|
||||
*
|
||||
* A violation can occur if there are more
|
||||
* duplicate values than `maxNrAssignments`. A violation here is the need to
|
||||
* un-prune the decision tree (e.g. all assignment values are 1.0). We could
|
||||
* have another case where some subset of duplicates exist (e.g. for a tree
|
||||
* with 8 assignments we have 1, 1, 1, 1, 0.8, 0.7, 0.6, 0.5), but this is
|
||||
* not a violation since the for `maxNrAssignments=5` the top values are (1,
|
||||
* 0.8).
|
||||
*
|
||||
* @param maxNrAssignments The maximum number of assignments to keep.
|
||||
* @return DecisionTreeFactor
|
||||
*/
|
||||
DecisionTreeFactor prune(size_t maxNrAssignments) const;
|
||||
|
||||
/// @}
|
||||
};
|
||||
// DecisionTreeFactor
|
||||
/// @name Wrapper support
|
||||
/// @{
|
||||
|
||||
/** output to graphviz format, stream version */
|
||||
void dot(std::ostream& os,
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
bool showZero = true) const;
|
||||
|
||||
/** output to graphviz format, open a file */
|
||||
void dot(const std::string& name,
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
bool showZero = true) const;
|
||||
|
||||
/** output to graphviz format string */
|
||||
std::string dot(const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
bool showZero = true) const;
|
||||
|
||||
/**
|
||||
* @brief Render as markdown table
|
||||
*
|
||||
* @param keyFormatter GTSAM-style Key formatter.
|
||||
* @param names optional, category names corresponding to choices.
|
||||
* @return std::string a markdown string.
|
||||
*/
|
||||
std::string markdown(const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const Names& names = {}) const override;
|
||||
|
||||
/**
|
||||
* @brief Render as html table
|
||||
*
|
||||
* @param keyFormatter GTSAM-style Key formatter.
|
||||
* @param names optional, category names corresponding to choices.
|
||||
* @return std::string a html string.
|
||||
*/
|
||||
std::string html(const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const Names& names = {}) const override;
|
||||
|
||||
/// @}
|
||||
};
|
||||
|
||||
// traits
|
||||
template<> struct traits<DecisionTreeFactor> : public Testable<DecisionTreeFactor> {};
|
||||
template <>
|
||||
struct traits<DecisionTreeFactor> : public Testable<DecisionTreeFactor> {};
|
||||
|
||||
}// namespace gtsam
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -25,51 +25,78 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
// Instantiate base class
|
||||
template class FactorGraph<DiscreteConditional>;
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool DiscreteBayesNet::equals(const This& bn, double tol) const
|
||||
{
|
||||
return Base::equals(bn, tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// void DiscreteBayesNet::add_front(const Signature& s) {
|
||||
// push_front(boost::make_shared<DiscreteConditional>(s));
|
||||
// }
|
||||
|
||||
/* ************************************************************************* */
|
||||
void DiscreteBayesNet::add(const Signature& s) {
|
||||
push_back(boost::make_shared<DiscreteConditional>(s));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double DiscreteBayesNet::evaluate(const DiscreteConditional::Values & values) const {
|
||||
// evaluate all conditionals and multiply
|
||||
double result = 1.0;
|
||||
for(DiscreteConditional::shared_ptr conditional: *this)
|
||||
result *= (*conditional)(values);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteFactor::sharedValues DiscreteBayesNet::optimize() const {
|
||||
// solve each node in turn in topological sort order (parents first)
|
||||
DiscreteFactor::sharedValues result(new DiscreteFactor::Values());
|
||||
for (auto conditional: boost::adaptors::reverse(*this))
|
||||
conditional->solveInPlace(*result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteFactor::sharedValues DiscreteBayesNet::sample() const {
|
||||
// sample each node in turn in topological sort order (parents first)
|
||||
DiscreteFactor::sharedValues result(new DiscreteFactor::Values());
|
||||
for (auto conditional: boost::adaptors::reverse(*this))
|
||||
conditional->sampleInPlace(*result);
|
||||
return result;
|
||||
}
|
||||
// Instantiate base class
|
||||
template class FactorGraph<DiscreteConditional>;
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace
|
||||
bool DiscreteBayesNet::equals(const This& bn, double tol) const {
|
||||
return Base::equals(bn, tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double DiscreteBayesNet::evaluate(const DiscreteValues& values) const {
|
||||
// evaluate all conditionals and multiply
|
||||
double result = 1.0;
|
||||
for (const DiscreteConditional::shared_ptr& conditional : *this)
|
||||
result *= (*conditional)(values);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
DiscreteValues DiscreteBayesNet::optimize() const {
|
||||
DiscreteValues result;
|
||||
return optimize(result);
|
||||
}
|
||||
|
||||
DiscreteValues DiscreteBayesNet::optimize(DiscreteValues result) const {
|
||||
// solve each node in turn in topological sort order (parents first)
|
||||
#ifdef _MSC_VER
|
||||
#pragma message("DiscreteBayesNet::optimize (deprecated) does not compute MPE!")
|
||||
#else
|
||||
#warning "DiscreteBayesNet::optimize (deprecated) does not compute MPE!"
|
||||
#endif
|
||||
for (auto conditional : boost::adaptors::reverse(*this))
|
||||
conditional->solveInPlace(&result);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteValues DiscreteBayesNet::sample() const {
|
||||
DiscreteValues result;
|
||||
return sample(result);
|
||||
}
|
||||
|
||||
DiscreteValues DiscreteBayesNet::sample(DiscreteValues result) const {
|
||||
// sample each node in turn in topological sort order (parents first)
|
||||
for (auto conditional : boost::adaptors::reverse(*this))
|
||||
conditional->sampleInPlace(&result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* *********************************************************************** */
|
||||
std::string DiscreteBayesNet::markdown(
|
||||
const KeyFormatter& keyFormatter,
|
||||
const DiscreteFactor::Names& names) const {
|
||||
using std::endl;
|
||||
std::stringstream ss;
|
||||
ss << "`DiscreteBayesNet` of size " << size() << endl << endl;
|
||||
for (const DiscreteConditional::shared_ptr& conditional : *this)
|
||||
ss << conditional->markdown(keyFormatter, names) << endl;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* *********************************************************************** */
|
||||
std::string DiscreteBayesNet::html(const KeyFormatter& keyFormatter,
|
||||
const DiscreteFactor::Names& names) const {
|
||||
using std::endl;
|
||||
std::stringstream ss;
|
||||
ss << "<div><p><tt>DiscreteBayesNet</tt> of size " << size() << "</p>";
|
||||
for (const DiscreteConditional::shared_ptr& conditional : *this)
|
||||
ss << conditional->html(keyFormatter, names) << endl;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue