Merge branch 'develop' into feature/g2o-vertices
commit
f4c8e7580c
|
@ -34,7 +34,7 @@ cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \
|
||||||
|
|
||||||
make -j$(nproc) install
|
make -j$(nproc) install
|
||||||
|
|
||||||
cd $CURRDIR/../gtsam_install/cython
|
cd cython
|
||||||
|
|
||||||
sudo $PYTHON setup.py install
|
sudo $PYTHON setup.py install
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ endif()
|
||||||
# Set the version number for the library
|
# Set the version number for the library
|
||||||
set (GTSAM_VERSION_MAJOR 4)
|
set (GTSAM_VERSION_MAJOR 4)
|
||||||
set (GTSAM_VERSION_MINOR 0)
|
set (GTSAM_VERSION_MINOR 0)
|
||||||
set (GTSAM_VERSION_PATCH 2)
|
set (GTSAM_VERSION_PATCH 3)
|
||||||
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
||||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||||
|
|
||||||
|
@ -68,8 +68,8 @@ if(GTSAM_UNSTABLE_AVAILABLE)
|
||||||
endif()
|
endif()
|
||||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." OFF)
|
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." OFF)
|
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||||
|
@ -454,12 +454,11 @@ endif()
|
||||||
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
|
set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
|
||||||
# Set up cache options
|
# Set up cache options
|
||||||
set(GTSAM_CYTHON_INSTALL_PATH "" CACHE PATH "Cython toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/cython")
|
# Cython install path appended with Build type (e.g. cython, cythonDebug, etc).
|
||||||
if(NOT GTSAM_CYTHON_INSTALL_PATH)
|
# This does not override custom values set from the command line
|
||||||
set(GTSAM_CYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/cython")
|
set(GTSAM_CYTHON_INSTALL_PATH "${PROJECT_BINARY_DIR}/cython${GTSAM_BUILD_TAG}" CACHE PATH "Cython toolbox destination, blank defaults to PROJECT_BINARY_DIR/cython<GTSAM_BUILD_TAG>")
|
||||||
endif()
|
|
||||||
set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
|
set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
|
||||||
add_subdirectory(cython)
|
add_subdirectory(cython ${GTSAM_CYTHON_INSTALL_PATH})
|
||||||
else()
|
else()
|
||||||
set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
|
set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
|
||||||
endif()
|
endif()
|
||||||
|
@ -607,7 +606,7 @@ endif()
|
||||||
message(STATUS "Cython toolbox flags ")
|
message(STATUS "Cython toolbox flags ")
|
||||||
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
||||||
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
|
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
||||||
endif()
|
endif()
|
||||||
message(STATUS "===============================================================")
|
message(STATUS "===============================================================")
|
||||||
|
|
||||||
|
|
|
@ -72,9 +72,9 @@ A Lie group is both a manifold *and* a group. Hence, a LIE_GROUP type should imp
|
||||||
However, we now also need to be able to evaluate the derivatives of compose and inverse.
|
However, we now also need to be able to evaluate the derivatives of compose and inverse.
|
||||||
Hence, we have the following extra valid static functions defined in the struct `gtsam::traits<T>`:
|
Hence, we have the following extra valid static functions defined in the struct `gtsam::traits<T>`:
|
||||||
|
|
||||||
* `r = traits<T>::Compose(p,q,Hq,Hp)`
|
* `r = traits<T>::Compose(p,q,Hp,Hq)`
|
||||||
* `q = traits<T>::Inverse(p,Hp)`
|
* `q = traits<T>::Inverse(p,Hp)`
|
||||||
* `r = traits<T>::Between(p,q,Hq,H2p)`
|
* `r = traits<T>::Between(p,q,Hp,Hq)`
|
||||||
|
|
||||||
where above the *H* arguments stand for optional Jacobian arguments.
|
where above the *H* arguments stand for optional Jacobian arguments.
|
||||||
That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor).
|
That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor).
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# version format
|
# version format
|
||||||
version: 4.0.2-{branch}-build{build}
|
version: 4.0.3-{branch}-build{build}
|
||||||
|
|
||||||
os: Visual Studio 2019
|
os: Visual Studio 2019
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
|
# Set cmake policy to recognize the AppleClang compiler
|
||||||
|
# independently from the Clang compiler.
|
||||||
|
if(POLICY CMP0025)
|
||||||
|
cmake_policy(SET CMP0025 NEW)
|
||||||
|
endif()
|
||||||
|
|
||||||
# function: list_append_cache(var [new_values ...])
|
# function: list_append_cache(var [new_values ...])
|
||||||
# Like "list(APPEND ...)" but working for CACHE variables.
|
# Like "list(APPEND ...)" but working for CACHE variables.
|
||||||
|
|
|
@ -41,9 +41,8 @@ execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||||
function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
|
function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
|
||||||
# Paths for generated files
|
# Paths for generated files
|
||||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
get_filename_component(module_name "${interface_header}" NAME_WE)
|
||||||
set(generated_files_path "${PROJECT_BINARY_DIR}/cython/${module_name}")
|
set(generated_files_path "${install_path}")
|
||||||
wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
|
wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
|
||||||
install_cython_wrapped_library("${interface_header}" "${generated_files_path}" "${install_path}")
|
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
function(set_up_required_cython_packages)
|
function(set_up_required_cython_packages)
|
||||||
|
@ -138,6 +137,10 @@ function(cythonize target pyx_file output_lib_we output_dir include_dirs libs in
|
||||||
target_link_libraries(${target} "${libs}")
|
target_link_libraries(${target} "${libs}")
|
||||||
endif()
|
endif()
|
||||||
add_dependencies(${target} ${target}_pyx2cpp)
|
add_dependencies(${target} ${target}_pyx2cpp)
|
||||||
|
|
||||||
|
if(TARGET ${python_install_target})
|
||||||
|
add_dependencies(${python_install_target} ${target})
|
||||||
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Internal function that wraps a library and compiles the wrapper
|
# Internal function that wraps a library and compiles the wrapper
|
||||||
|
@ -150,9 +153,12 @@ function(wrap_library_cython interface_header generated_files_path extra_imports
|
||||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
get_filename_component(module_name "${interface_header}" NAME_WE)
|
||||||
|
|
||||||
# Wrap module to Cython pyx
|
# Wrap module to Cython pyx
|
||||||
message(STATUS "Cython wrapper generating ${module_name}.pyx")
|
message(STATUS "Cython wrapper generating ${generated_files_path}/${module_name}.pyx")
|
||||||
set(generated_pyx "${generated_files_path}/${module_name}.pyx")
|
set(generated_pyx "${generated_files_path}/${module_name}.pyx")
|
||||||
file(MAKE_DIRECTORY "${generated_files_path}")
|
if(NOT EXISTS ${generated_files_path})
|
||||||
|
file(MAKE_DIRECTORY "${generated_files_path}")
|
||||||
|
endif()
|
||||||
|
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT ${generated_pyx}
|
OUTPUT ${generated_pyx}
|
||||||
DEPENDS ${interface_header} wrap
|
DEPENDS ${interface_header} wrap
|
||||||
|
@ -175,46 +181,6 @@ function(wrap_library_cython interface_header generated_files_path extra_imports
|
||||||
COMMAND cmake -E remove_directory ${generated_files_path})
|
COMMAND cmake -E remove_directory ${generated_files_path})
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Internal function that installs a wrap toolbox
|
|
||||||
function(install_cython_wrapped_library interface_header generated_files_path install_path)
|
|
||||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
|
||||||
|
|
||||||
# NOTE: only installs .pxd and .pyx and binary files (not .cpp) - the trailing slash on the directory name
|
|
||||||
# here prevents creating the top-level module name directory in the destination.
|
|
||||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH/subdirectory if there is one
|
|
||||||
get_filename_component(location "${install_path}" PATH)
|
|
||||||
get_filename_component(name "${install_path}" NAME)
|
|
||||||
message(STATUS "Installing Cython Toolbox to ${location}${GTSAM_BUILD_TAG}/${name}") #${GTSAM_CYTHON_INSTALL_PATH}"
|
|
||||||
|
|
||||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
|
||||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
|
||||||
string(TOUPPER "${build_type}" build_type_upper)
|
|
||||||
if(${build_type_upper} STREQUAL "RELEASE")
|
|
||||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
|
||||||
else()
|
|
||||||
set(build_type_tag "${build_type}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
install(DIRECTORY "${generated_files_path}/" DESTINATION "${location}${build_type_tag}/${name}"
|
|
||||||
CONFIGURATIONS "${build_type}"
|
|
||||||
PATTERN "build" EXCLUDE
|
|
||||||
PATTERN "CMakeFiles" EXCLUDE
|
|
||||||
PATTERN "Makefile" EXCLUDE
|
|
||||||
PATTERN "*.cmake" EXCLUDE
|
|
||||||
PATTERN "*.cpp" EXCLUDE
|
|
||||||
PATTERN "*.py" EXCLUDE)
|
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
install(DIRECTORY "${generated_files_path}/" DESTINATION ${install_path}
|
|
||||||
PATTERN "build" EXCLUDE
|
|
||||||
PATTERN "CMakeFiles" EXCLUDE
|
|
||||||
PATTERN "Makefile" EXCLUDE
|
|
||||||
PATTERN "*.cmake" EXCLUDE
|
|
||||||
PATTERN "*.cpp" EXCLUDE
|
|
||||||
PATTERN "*.py" EXCLUDE)
|
|
||||||
endif()
|
|
||||||
endfunction()
|
|
||||||
|
|
||||||
# Helper function to install Cython scripts and handle multiple build types where the scripts
|
# Helper function to install Cython scripts and handle multiple build types where the scripts
|
||||||
# should be installed to all build type toolboxes
|
# should be installed to all build type toolboxes
|
||||||
#
|
#
|
||||||
|
@ -232,50 +198,7 @@ function(install_cython_scripts source_directory dest_directory patterns)
|
||||||
foreach(pattern ${patterns})
|
foreach(pattern ${patterns})
|
||||||
list(APPEND patterns_args PATTERN "${pattern}")
|
list(APPEND patterns_args PATTERN "${pattern}")
|
||||||
endforeach()
|
endforeach()
|
||||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
|
||||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
file(COPY "${source_directory}" DESTINATION "${dest_directory}"
|
||||||
string(TOUPPER "${build_type}" build_type_upper)
|
|
||||||
if(${build_type_upper} STREQUAL "RELEASE")
|
|
||||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
|
||||||
else()
|
|
||||||
set(build_type_tag "${build_type}")
|
|
||||||
endif()
|
|
||||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
|
|
||||||
get_filename_component(location "${dest_directory}" PATH)
|
|
||||||
get_filename_component(name "${dest_directory}" NAME)
|
|
||||||
install(DIRECTORY "${source_directory}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}"
|
|
||||||
FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
install(DIRECTORY "${source_directory}" DESTINATION "${dest_directory}" FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Helper function to install specific files and handle multiple build types where the scripts
|
|
||||||
# should be installed to all build type toolboxes
|
|
||||||
#
|
|
||||||
# Arguments:
|
|
||||||
# source_files: The source files to be installed.
|
|
||||||
# dest_directory: The destination directory to install to.
|
|
||||||
function(install_cython_files source_files dest_directory)
|
|
||||||
|
|
||||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
|
||||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
|
||||||
string(TOUPPER "${build_type}" build_type_upper)
|
|
||||||
if(${build_type_upper} STREQUAL "RELEASE")
|
|
||||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
|
||||||
else()
|
|
||||||
set(build_type_tag "${build_type}")
|
|
||||||
endif()
|
|
||||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
|
|
||||||
get_filename_component(location "${dest_directory}" PATH)
|
|
||||||
get_filename_component(name "${dest_directory}" NAME)
|
|
||||||
install(FILES "${source_files}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}")
|
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
install(FILES "${source_files}" DESTINATION "${dest_directory}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
endfunction()
|
|
||||||
|
|
||||||
|
|
|
@ -3,16 +3,32 @@ include(GtsamCythonWrap)
|
||||||
|
|
||||||
# Create the cython toolbox for the gtsam library
|
# Create the cython toolbox for the gtsam library
|
||||||
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
|
# Add the new make target command
|
||||||
|
set(python_install_target python-install)
|
||||||
|
add_custom_target(${python_install_target}
|
||||||
|
COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_CYTHON_INSTALL_PATH}/setup.py install
|
||||||
|
WORKING_DIRECTORY ${GTSAM_CYTHON_INSTALL_PATH})
|
||||||
|
|
||||||
# build and include the eigency version of eigency
|
# build and include the eigency version of eigency
|
||||||
add_subdirectory(gtsam_eigency)
|
add_subdirectory(gtsam_eigency)
|
||||||
include_directories(${PROJECT_BINARY_DIR}/cython/gtsam_eigency)
|
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
|
||||||
|
|
||||||
# Fix for error "C1128: number of sections exceeded object file format limit"
|
# Fix for error "C1128: number of sections exceeded object file format limit"
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
add_compile_options(/bigobj)
|
add_compile_options(/bigobj)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# wrap gtsam
|
# First set up all the package related files.
|
||||||
|
# This also ensures the below wrap operations work correctly.
|
||||||
|
set(CYTHON_INSTALL_REQUIREMENTS_FILE "${PROJECT_SOURCE_DIR}/cython/requirements.txt")
|
||||||
|
|
||||||
|
# Install the custom-generated __init__.py
|
||||||
|
# This makes the cython (sub-)directories into python packages, so gtsam can be found while wrapping gtsam_unstable
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam/__init__.py COPYONLY)
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable/__init__.py COPYONLY)
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${GTSAM_CYTHON_INSTALL_PATH}/setup.py)
|
||||||
|
|
||||||
|
# Wrap gtsam
|
||||||
add_custom_target(gtsam_header DEPENDS "../gtsam.h")
|
add_custom_target(gtsam_header DEPENDS "../gtsam.h")
|
||||||
wrap_and_install_library_cython("../gtsam.h" # interface_header
|
wrap_and_install_library_cython("../gtsam.h" # interface_header
|
||||||
"" # extra imports
|
"" # extra imports
|
||||||
|
@ -20,8 +36,9 @@ if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
gtsam # library to link with
|
gtsam # library to link with
|
||||||
"wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
|
"wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
|
||||||
)
|
)
|
||||||
|
add_dependencies(${python_install_target} gtsam gtsam_header)
|
||||||
|
|
||||||
# wrap gtsam_unstable
|
# Wrap gtsam_unstable
|
||||||
if(GTSAM_BUILD_UNSTABLE)
|
if(GTSAM_BUILD_UNSTABLE)
|
||||||
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
|
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
|
||||||
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
|
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
|
||||||
|
@ -30,17 +47,9 @@ if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
gtsam_unstable # library to link with
|
gtsam_unstable # library to link with
|
||||||
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
|
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
|
||||||
)
|
)
|
||||||
|
add_dependencies(${python_install_target} gtsam_unstable gtsam_unstable_header)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
file(READ "${PROJECT_SOURCE_DIR}/cython/requirements.txt" CYTHON_INSTALL_REQUIREMENTS)
|
|
||||||
file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
|
|
||||||
|
|
||||||
# Install the custom-generated __init__.py
|
|
||||||
# This is to make the build/cython/gtsam folder a python package, so gtsam can be found while wrapping gtsam_unstable
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py COPYONLY)
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py COPYONLY)
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${PROJECT_BINARY_DIR}/cython/setup.py)
|
|
||||||
install_cython_files("${PROJECT_BINARY_DIR}/cython/setup.py" "${GTSAM_CYTHON_INSTALL_PATH}")
|
|
||||||
# install scripts and tests
|
# install scripts and tests
|
||||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||||
|
|
134
cython/README.md
134
cython/README.md
|
@ -1,43 +1,51 @@
|
||||||
# Python Wrapper
|
# Python Wrapper
|
||||||
|
|
||||||
This is the Cython/Python wrapper around the GTSAM C++ library.
|
This is the Python wrapper around the GTSAM C++ library. We use Cython to generate the bindings to the underlying C++ code.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- If you want to build the GTSAM python library for a specific python version (eg 3.6),
|
||||||
|
use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used.
|
||||||
|
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment),
|
||||||
|
then the environment should be active while building GTSAM.
|
||||||
|
- This wrapper needs `Cython(>=0.25.2)`, `backports_abc(>=0.5)`, and `numpy(>=1.11.0)`. These can be installed as follows:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -r <gtsam_folder>/cython/requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
- For compatibility with GTSAM's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
|
||||||
|
named `gtsam_eigency`, to interface between C++'s Eigen and Python's numpy.
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
- if you want to build the gtsam python library for a specific python version (eg 2.7), use the `-DGTSAM_PYTHON_VERSION=2.7` option when running `cmake` otherwise the default interpreter will be used.
|
- Run cmake with the `GTSAM_INSTALL_CYTHON_TOOLBOX` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory defined by `GTSAM_CYTHON_INSTALL_PATH`, which is by default `<PROJECT_BINARY_DIR>/cython` in Release mode and `<PROJECT_BINARY_DIR>/cython<CMAKE_BUILD_TYPE>` for other modes.
|
||||||
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment) then the environment should be active while building gtsam.
|
|
||||||
- This wrapper needs Cython(>=0.25.2), backports_abc>=0.5, and numpy. These can be installed as follows:
|
|
||||||
|
|
||||||
```bash
|
- Build GTSAM and the wrapper with `make`.
|
||||||
pip install -r <gtsam_folder>/cython/requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
- For compatibility with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
|
- To install, simply run `make python-install`.
|
||||||
named **gtsam_eigency**, to interface between C++'s Eigen and Python's numpy.
|
- The same command can be used to install into a virtual environment if it is active.
|
||||||
|
- **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory.
|
||||||
|
|
||||||
- Build and install gtsam using cmake with `GTSAM_INSTALL_CYTHON_TOOLBOX` enabled.
|
- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly.
|
||||||
The wrapped module will be installed to `GTSAM_CYTHON_INSTALL_PATH`, which is
|
|
||||||
by default: `<your CMAKE_INSTALL_PREFIX>/cython`
|
|
||||||
|
|
||||||
- To use the library without installing system-wide: modify your `PYTHONPATH` to include the `GTSAM_CYTHON_INSTALL_PATH`:
|
|
||||||
```bash
|
|
||||||
export PYTHONPATH=$PYTHONPATH:<GTSAM_CYTHON_INSTALL_PATH>
|
|
||||||
```
|
|
||||||
- To install system-wide: run `make install` then navigate to `GTSAM_CYTHON_INSTALL_PATH` and run `python setup.py install`
|
|
||||||
- (the same command can be used to install into a virtual environment if it is active)
|
|
||||||
- note: if you don't want gtsam to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install gtsam to a subdirectory of the build directory.
|
|
||||||
- if you run `setup.py` from the build directory rather than the installation directory, the script will warn you with the message: `setup.py is being run from an unexpected location`.
|
|
||||||
Before `make install` is run, not all the components of the package have been copied across, so running `setup.py` from the build directory would result in an incomplete package.
|
|
||||||
|
|
||||||
## Unit Tests
|
## Unit Tests
|
||||||
|
|
||||||
The Cython toolbox also has a small set of unit tests located in the
|
The Cython toolbox also has a small set of unit tests located in the
|
||||||
test directory. To run them:
|
test directory. To run them:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd <your GTSAM_CYTHON_INSTALL_PATH>
|
cd <GTSAM_CYTHON_INSTALL_PATH>
|
||||||
python -m unittest discover
|
python -m unittest discover
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Utils
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
## Writing Your Own Scripts
|
## Writing Your Own Scripts
|
||||||
|
|
||||||
|
@ -46,79 +54,63 @@ See the tests for examples.
|
||||||
### Some Important Notes:
|
### Some Important Notes:
|
||||||
|
|
||||||
- Vector/Matrix:
|
- Vector/Matrix:
|
||||||
+ GTSAM expects double-precision floating point vectors and matrices.
|
|
||||||
Hence, you should pass numpy matrices with dtype=float, or 'float64'.
|
- GTSAM expects double-precision floating point vectors and matrices.
|
||||||
+ Also, GTSAM expects *column-major* matrices, unlike the default storage
|
Hence, you should pass numpy matrices with `dtype=float`, or `float64`.
|
||||||
scheme in numpy. Hence, you should pass column-major matrices to gtsam using
|
- Also, GTSAM expects _column-major_ matrices, unlike the default storage
|
||||||
|
scheme in numpy. Hence, you should pass column-major matrices to GTSAM using
|
||||||
the flag order='F'. And you always get column-major matrices back.
|
the flag order='F'. And you always get column-major matrices back.
|
||||||
For more details, see: https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed
|
For more details, see [this link](https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed).
|
||||||
+ Passing row-major matrices of different dtype, e.g. 'int', will also work
|
- Passing row-major matrices of different dtype, e.g. `int`, will also work
|
||||||
as the wrapper converts them to column-major and dtype float for you,
|
as the wrapper converts them to column-major and dtype float for you,
|
||||||
using numpy.array.astype(float, order='F', copy=False).
|
using numpy.array.astype(float, order='F', copy=False).
|
||||||
However, this will result a copy if your matrix is not in the expected type
|
However, this will result a copy if your matrix is not in the expected type
|
||||||
and storage order.
|
and storage order.
|
||||||
|
|
||||||
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>_ in Python.
|
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>\_ in Python.
|
||||||
Examples: noiseModel_Gaussian, noiseModel_mEstimator_Tukey
|
|
||||||
|
Examples: `noiseModel_Gaussian`, `noiseModel_mEstimator_Tukey`
|
||||||
|
|
||||||
- Casting from a base class to a derive class must be done explicitly.
|
- Casting from a base class to a derive class must be done explicitly.
|
||||||
Examples:
|
|
||||||
```Python
|
|
||||||
noiseBase = factor.noiseModel()
|
|
||||||
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Wrapping Your Own Project That Uses GTSAM
|
Examples:
|
||||||
|
|
||||||
- Set PYTHONPATH to include ${GTSAM_CYTHON_INSTALL_PATH}
|
```python
|
||||||
+ so that it can find gtsam Cython header: gtsam/gtsam.pxd
|
noiseBase = factor.noiseModel()
|
||||||
|
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
|
||||||
|
```
|
||||||
|
|
||||||
- In your CMakeList.txt
|
## Wrapping Custom GTSAM-based Project
|
||||||
```cmake
|
|
||||||
find_package(GTSAM REQUIRED) # Make sure gtsam's install folder is in your PATH
|
|
||||||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${GTSAM_DIR}/../GTSAMCMakeTools")
|
|
||||||
|
|
||||||
# Wrap
|
Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python).
|
||||||
include(GtsamCythonWrap)
|
|
||||||
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
|
|
||||||
wrap_and_install_library_cython("your_project_interface.h"
|
|
||||||
"from gtsam.gtsam cimport *" # extra import of gtsam/gtsam.pxd Cython header
|
|
||||||
"your_install_path"
|
|
||||||
"libraries_to_link_with_the_cython_module"
|
|
||||||
"dependencies_which_need_to_be_built_before_the_wrapper"
|
|
||||||
)
|
|
||||||
#Optional: install_cython_scripts and install_cython_files. See GtsamCythonWrap.cmake.
|
|
||||||
```
|
|
||||||
|
|
||||||
## KNOWN ISSUES
|
## KNOWN ISSUES
|
||||||
|
|
||||||
- Doesn't work with python3 installed from homebrew
|
- Doesn't work with python3 installed from homebrew
|
||||||
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
|
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
|
||||||
- Guess: 64 vs 32b? disutils Compiler flags?
|
- Guess: 64 vs 32b? disutils Compiler flags?
|
||||||
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
|
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
|
||||||
- Upgrading to 0.25 solves the problem
|
- Upgrading to 0.25 solves the problem
|
||||||
- Need default constructor and default copy constructor for almost every classes... :(
|
- Need default constructor and default copy constructor for almost every classes... :(
|
||||||
- support these constructors by default and declare "delete" for special classes?
|
- support these constructors by default and declare "delete" for special classes?
|
||||||
|
|
||||||
|
|
||||||
### TODO
|
### TODO
|
||||||
|
|
||||||
- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
|
- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
|
||||||
- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in gtsam?)
|
- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in GTSAM?)
|
||||||
- [ ] inner namespaces ==> inner packages?
|
- [ ] inner namespaces ==> inner packages?
|
||||||
- [ ] Wrap fixed-size Matrices/Vectors?
|
- [ ] Wrap fixed-size Matrices/Vectors?
|
||||||
|
|
||||||
|
|
||||||
### Completed/Cancelled:
|
### Completed/Cancelled:
|
||||||
|
|
||||||
- [x] Fix Python tests: don't use " import <package> * ": Bad style!!! (18-03-17 19:50)
|
- [x] Fix Python tests: don't use " import <package> \* ": Bad style!!! (18-03-17 19:50)
|
||||||
- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
|
- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
|
||||||
- [x] Wrap unstable @done (18-03-17 15:30)
|
- [x] Wrap unstable @done (18-03-17 15:30)
|
||||||
- [x] Unify cython/gtsam.h and the original gtsam.h @done (18-03-17 15:30)
|
- [x] Unify cython/GTSAM.h and the original GTSAM.h @done (18-03-17 15:30)
|
||||||
- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
|
- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
|
||||||
- [x] 06-03-17: manage to remove the requirements for default and copy constructors
|
- [x] 06-03-17: manage to remove the requirements for default and copy constructors
|
||||||
- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>.
|
- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>.
|
||||||
- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: ```gtsam::JointMarginal __pyx_t_1;``` Users don't have to wrap this constructor, however.
|
- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: `GTSAM::JointMarginal __pyx_t_1;` Users don't have to wrap this constructor, however.
|
||||||
- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
|
- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
|
||||||
- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
|
- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
|
||||||
- [x] CMake install script @done (25-11-16 02:30)
|
- [x] CMake install script @done (25-11-16 02:30)
|
||||||
|
@ -132,7 +124,7 @@ wrap_and_install_library_cython("your_project_interface.h"
|
||||||
- [x] Casting from parent and grandparents @done (16-11-16 17:00)
|
- [x] Casting from parent and grandparents @done (16-11-16 17:00)
|
||||||
- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
|
- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
|
||||||
- [x] Support "print obj" @done (16-11-16 17:00)
|
- [x] Support "print obj" @done (16-11-16 17:00)
|
||||||
- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
|
- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
|
||||||
- [x] Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34)
|
- [x] Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34)
|
||||||
- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
|
- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
|
||||||
- [x] [Nice to have] parse typedef @done (16-09-13 17:19)
|
- [x] [Nice to have] parse typedef @done (16-09-13 17:19)
|
||||||
|
|
|
@ -17,7 +17,8 @@ import unittest
|
||||||
import gtsam
|
import gtsam
|
||||||
from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer,
|
from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer,
|
||||||
GaussNewtonParams, LevenbergMarquardtOptimizer,
|
GaussNewtonParams, LevenbergMarquardtOptimizer,
|
||||||
LevenbergMarquardtParams, NonlinearFactorGraph, Ordering,
|
LevenbergMarquardtParams, PCGSolverParameters,
|
||||||
|
DummyPreconditionerParameters, NonlinearFactorGraph, Ordering,
|
||||||
Point2, PriorFactorPoint2, Values)
|
Point2, PriorFactorPoint2, Values)
|
||||||
from gtsam.utils.test_case import GtsamTestCase
|
from gtsam.utils.test_case import GtsamTestCase
|
||||||
|
|
||||||
|
@ -61,6 +62,16 @@ class TestScenario(GtsamTestCase):
|
||||||
fg, initial_values, lmParams).optimize()
|
fg, initial_values, lmParams).optimize()
|
||||||
self.assertAlmostEqual(0, fg.error(actual2))
|
self.assertAlmostEqual(0, fg.error(actual2))
|
||||||
|
|
||||||
|
# Levenberg-Marquardt
|
||||||
|
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||||
|
lmParams.setLinearSolverType("ITERATIVE")
|
||||||
|
cgParams = PCGSolverParameters()
|
||||||
|
cgParams.setPreconditionerParams(DummyPreconditionerParameters())
|
||||||
|
lmParams.setIterativeParams(cgParams)
|
||||||
|
actual2 = LevenbergMarquardtOptimizer(
|
||||||
|
fg, initial_values, lmParams).optimize()
|
||||||
|
self.assertAlmostEqual(0, fg.error(actual2))
|
||||||
|
|
||||||
# Dogleg
|
# Dogleg
|
||||||
dlParams = DoglegParams()
|
dlParams = DoglegParams()
|
||||||
dlParams.setOrdering(ordering)
|
dlParams.setOrdering(ordering)
|
||||||
|
|
|
@ -43,6 +43,11 @@ class TestOptimizeComet(GtsamTestCase):
|
||||||
self.optimizer = gtsam.GaussNewtonOptimizer(
|
self.optimizer = gtsam.GaussNewtonOptimizer(
|
||||||
graph, initial, self.params)
|
graph, initial, self.params)
|
||||||
|
|
||||||
|
self.lmparams = gtsam.LevenbergMarquardtParams()
|
||||||
|
self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer(
|
||||||
|
graph, initial, self.lmparams
|
||||||
|
)
|
||||||
|
|
||||||
# setup output capture
|
# setup output capture
|
||||||
self.capturedOutput = StringIO()
|
self.capturedOutput = StringIO()
|
||||||
sys.stdout = self.capturedOutput
|
sys.stdout = self.capturedOutput
|
||||||
|
@ -65,6 +70,16 @@ class TestOptimizeComet(GtsamTestCase):
|
||||||
actual = self.optimizer.values()
|
actual = self.optimizer.values()
|
||||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||||
|
|
||||||
|
def test_lm_simple_printing(self):
|
||||||
|
"""Make sure we are properly terminating LM"""
|
||||||
|
def hook(_, error):
|
||||||
|
print(error)
|
||||||
|
|
||||||
|
gtsam_optimize(self.lmoptimizer, self.lmparams, hook)
|
||||||
|
|
||||||
|
actual = self.lmoptimizer.values()
|
||||||
|
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||||
|
|
||||||
@unittest.skip("Not a test we want run every time, as needs comet.ml account")
|
@unittest.skip("Not a test we want run every time, as needs comet.ml account")
|
||||||
def test_comet(self):
|
def test_comet(self):
|
||||||
"""Test with a comet hook."""
|
"""Test with a comet hook."""
|
||||||
|
|
|
@ -46,5 +46,7 @@ def gtsam_optimize(optimizer,
|
||||||
def check_convergence(optimizer, current_error, new_error):
|
def check_convergence(optimizer, current_error, new_error):
|
||||||
return (optimizer.iterations() >= params.getMaxIterations()) or (
|
return (optimizer.iterations() >= params.getMaxIterations()) or (
|
||||||
gtsam.checkConvergence(params.getRelativeErrorTol(), params.getAbsoluteErrorTol(), params.getErrorTol(),
|
gtsam.checkConvergence(params.getRelativeErrorTol(), params.getAbsoluteErrorTol(), params.getErrorTol(),
|
||||||
current_error, new_error))
|
current_error, new_error)) or (
|
||||||
|
isinstance(optimizer, gtsam.LevenbergMarquardtOptimizer) and optimizer.lambda_() > params.getlambdaUpperBound())
|
||||||
optimize(optimizer, check_convergence, hook)
|
optimize(optimizer, check_convergence, hook)
|
||||||
|
return optimizer.values()
|
||||||
|
|
|
@ -4,11 +4,11 @@ include(GtsamCythonWrap)
|
||||||
# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
|
# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
|
||||||
# and eigency's cython pxd headers can be found when cythonizing gtsam
|
# and eigency's cython pxd headers can be found when cythonizing gtsam
|
||||||
file(COPY "." DESTINATION ".")
|
file(COPY "." DESTINATION ".")
|
||||||
set(OUTPUT_DIR "${PROJECT_BINARY_DIR}/cython/gtsam_eigency")
|
set(OUTPUT_DIR "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency")
|
||||||
set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
|
set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
|
||||||
|
|
||||||
# This is to make the build/cython/gtsam_eigency folder a python package
|
# This is to make the build/cython/gtsam_eigency folder a python package
|
||||||
configure_file(__init__.py.in ${PROJECT_BINARY_DIR}/cython/gtsam_eigency/__init__.py)
|
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
|
||||||
|
|
||||||
# include eigency headers
|
# include eigency headers
|
||||||
include_directories(${EIGENCY_INCLUDE_DIR})
|
include_directories(${EIGENCY_INCLUDE_DIR})
|
||||||
|
@ -16,8 +16,8 @@ include_directories(${EIGENCY_INCLUDE_DIR})
|
||||||
# Cythonize and build eigency
|
# Cythonize and build eigency
|
||||||
message(STATUS "Cythonize and build eigency")
|
message(STATUS "Cythonize and build eigency")
|
||||||
# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
|
# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
|
||||||
# a part of the gtsam_eigency package and generate the function call import_gtsam_igency__conversions()
|
# a part of the gtsam_eigency package and generate the function call import_gtsam_eigency__conversions()
|
||||||
# in conversions_api.h correctly!!!
|
# in conversions_api.h correctly!
|
||||||
cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
|
cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
|
||||||
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
||||||
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
|
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
|
||||||
|
@ -37,13 +37,6 @@ add_dependencies(cythonize_eigency_core cythonize_eigency_conversions)
|
||||||
add_custom_target(cythonize_eigency)
|
add_custom_target(cythonize_eigency)
|
||||||
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
|
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
|
||||||
|
|
||||||
# install
|
if(TARGET ${python_install_target})
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
add_dependencies(${python_install_target} cythonize_eigency)
|
||||||
DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}"
|
endif()
|
||||||
PATTERN "CMakeLists.txt" EXCLUDE
|
|
||||||
PATTERN "__init__.py.in" EXCLUDE)
|
|
||||||
install(TARGETS cythonize_eigency_core cythonize_eigency_conversions
|
|
||||||
DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency")
|
|
||||||
install(FILES ${OUTPUT_DIR}/conversions_api.h DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
|
|
||||||
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
|
|
||||||
install(FILES ${OUTPUT_DIR}/__init__.py DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
|
|
||||||
|
|
|
@ -7,6 +7,22 @@ except ImportError:
|
||||||
|
|
||||||
packages = find_packages()
|
packages = find_packages()
|
||||||
|
|
||||||
|
package_data = {
|
||||||
|
package:
|
||||||
|
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')]
|
||||||
|
for package in packages
|
||||||
|
}
|
||||||
|
|
||||||
|
cython_install_requirements = open("${CYTHON_INSTALL_REQUIREMENTS_FILE}").readlines()
|
||||||
|
|
||||||
|
install_requires = [line.strip() \
|
||||||
|
for line in cython_install_requirements \
|
||||||
|
if len(line.strip()) > 0 and not line.strip().startswith('#')
|
||||||
|
]
|
||||||
|
|
||||||
|
# Cleaner to read in the contents rather than copy them over.
|
||||||
|
readme_contents = open("${PROJECT_SOURCE_DIR}/README.md").read()
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='gtsam',
|
name='gtsam',
|
||||||
description='Georgia Tech Smoothing And Mapping library',
|
description='Georgia Tech Smoothing And Mapping library',
|
||||||
|
@ -16,7 +32,7 @@ setup(
|
||||||
author_email='frank.dellaert@gtsam.org',
|
author_email='frank.dellaert@gtsam.org',
|
||||||
license='Simplified BSD license',
|
license='Simplified BSD license',
|
||||||
keywords='slam sam robotics localization mapping optimization',
|
keywords='slam sam robotics localization mapping optimization',
|
||||||
long_description='''${README_CONTENTS}''',
|
long_description=readme_contents,
|
||||||
long_description_content_type='text/markdown',
|
long_description_content_type='text/markdown',
|
||||||
python_requires='>=2.7',
|
python_requires='>=2.7',
|
||||||
# https://pypi.org/pypi?%3Aaction=list_classifiers
|
# https://pypi.org/pypi?%3Aaction=list_classifiers
|
||||||
|
@ -34,11 +50,6 @@ setup(
|
||||||
],
|
],
|
||||||
|
|
||||||
packages=packages,
|
packages=packages,
|
||||||
package_data={package:
|
package_data=package_data,
|
||||||
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')]
|
install_requires=install_requires
|
||||||
for package in packages
|
|
||||||
},
|
|
||||||
install_requires=[line.strip() for line in '''
|
|
||||||
${CYTHON_INSTALL_REQUIREMENTS}
|
|
||||||
'''.splitlines() if len(line.strip()) > 0 and not line.strip().startswith('#')]
|
|
||||||
)
|
)
|
||||||
|
|
Binary file not shown.
|
@ -0,0 +1,21 @@
|
||||||
|
# Instructions
|
||||||
|
|
||||||
|
Build all docker images, in order:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
(cd ubuntu-boost-tbb && ./build.sh)
|
||||||
|
(cd ubuntu-gtsam && ./build.sh)
|
||||||
|
(cd ubuntu-gtsam-python && ./build.sh)
|
||||||
|
(cd ubuntu-gtsam-python-vnc && ./build.sh)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then launch with:
|
||||||
|
|
||||||
|
docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic
|
||||||
|
|
||||||
|
Then open a remote VNC X client, for example:
|
||||||
|
|
||||||
|
sudo apt-get install tigervnc-viewer
|
||||||
|
xtigervncviewer :5900
|
||||||
|
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Get the base Ubuntu image from Docker Hub
|
|
||||||
FROM ubuntu:bionic
|
|
||||||
|
|
||||||
# Update apps on the base image
|
|
||||||
RUN apt-get -y update && apt-get install -y
|
|
||||||
|
|
||||||
# Install C++
|
|
||||||
RUN apt-get -y install build-essential
|
|
||||||
|
|
||||||
# Install boost and cmake
|
|
||||||
RUN apt-get -y install libboost-all-dev cmake
|
|
||||||
|
|
||||||
# Install TBB
|
|
||||||
RUN apt-get -y install libtbb-dev
|
|
||||||
|
|
||||||
# Install latest Eigen
|
|
||||||
RUN apt-get install -y libeigen3-dev
|
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
# Basic Ubuntu 18.04 image with Boost and TBB installed. To be used for building further downstream packages.
|
||||||
|
|
||||||
|
# Get the base Ubuntu image from Docker Hub
|
||||||
|
FROM ubuntu:bionic
|
||||||
|
|
||||||
|
# Disable GUI prompts
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
|
||||||
|
# Update apps on the base image
|
||||||
|
RUN apt-get -y update && apt-get -y install
|
||||||
|
|
||||||
|
# Install C++
|
||||||
|
RUN apt-get -y install build-essential apt-utils
|
||||||
|
|
||||||
|
# Install boost and cmake
|
||||||
|
RUN apt-get -y install libboost-all-dev cmake
|
||||||
|
|
||||||
|
# Install TBB
|
||||||
|
RUN apt-get -y install libtbb-dev
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic .
|
|
@ -0,0 +1,20 @@
|
||||||
|
# This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction.
|
||||||
|
|
||||||
|
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||||
|
FROM dellaert/ubuntu-gtsam-python:bionic
|
||||||
|
|
||||||
|
# Things needed to get a python GUI
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
RUN apt install -y python-tk
|
||||||
|
RUN python3 -m pip install matplotlib
|
||||||
|
|
||||||
|
# Install a VNC X-server, Frame buffer, and windows manager
|
||||||
|
RUN apt install -y x11vnc xvfb fluxbox
|
||||||
|
|
||||||
|
# Finally, install wmctrl needed for bootstrap script
|
||||||
|
RUN apt install -y wmctrl
|
||||||
|
|
||||||
|
# Copy bootstrap script and make sure it runs
|
||||||
|
COPY bootstrap.sh /
|
||||||
|
|
||||||
|
CMD '/bootstrap.sh'
|
|
@ -0,0 +1,111 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Based on: http://www.richud.com/wiki/Ubuntu_Fluxbox_GUI_with_x11vnc_and_Xvfb
|
||||||
|
|
||||||
|
main() {
|
||||||
|
log_i "Starting xvfb virtual display..."
|
||||||
|
launch_xvfb
|
||||||
|
log_i "Starting window manager..."
|
||||||
|
launch_window_manager
|
||||||
|
log_i "Starting VNC server..."
|
||||||
|
run_vnc_server
|
||||||
|
}
|
||||||
|
|
||||||
|
launch_xvfb() {
|
||||||
|
local xvfbLockFilePath="/tmp/.X1-lock"
|
||||||
|
if [ -f "${xvfbLockFilePath}" ]
|
||||||
|
then
|
||||||
|
log_i "Removing xvfb lock file '${xvfbLockFilePath}'..."
|
||||||
|
if ! rm -v "${xvfbLockFilePath}"
|
||||||
|
then
|
||||||
|
log_e "Failed to remove xvfb lock file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set defaults if the user did not specify envs.
|
||||||
|
export DISPLAY=${XVFB_DISPLAY:-:1}
|
||||||
|
local screen=${XVFB_SCREEN:-0}
|
||||||
|
local resolution=${XVFB_RESOLUTION:-1280x960x24}
|
||||||
|
local timeout=${XVFB_TIMEOUT:-5}
|
||||||
|
|
||||||
|
# Start and wait for either Xvfb to be fully up or we hit the timeout.
|
||||||
|
Xvfb ${DISPLAY} -screen ${screen} ${resolution} &
|
||||||
|
local loopCount=0
|
||||||
|
until xdpyinfo -display ${DISPLAY} > /dev/null 2>&1
|
||||||
|
do
|
||||||
|
loopCount=$((loopCount+1))
|
||||||
|
sleep 1
|
||||||
|
if [ ${loopCount} -gt ${timeout} ]
|
||||||
|
then
|
||||||
|
log_e "xvfb failed to start"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
launch_window_manager() {
|
||||||
|
local timeout=${XVFB_TIMEOUT:-5}
|
||||||
|
|
||||||
|
# Start and wait for either fluxbox to be fully up or we hit the timeout.
|
||||||
|
fluxbox &
|
||||||
|
local loopCount=0
|
||||||
|
until wmctrl -m > /dev/null 2>&1
|
||||||
|
do
|
||||||
|
loopCount=$((loopCount+1))
|
||||||
|
sleep 1
|
||||||
|
if [ ${loopCount} -gt ${timeout} ]
|
||||||
|
then
|
||||||
|
log_e "fluxbox failed to start"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
run_vnc_server() {
|
||||||
|
local passwordArgument='-nopw'
|
||||||
|
|
||||||
|
if [ -n "${VNC_SERVER_PASSWORD}" ]
|
||||||
|
then
|
||||||
|
local passwordFilePath="${HOME}/.x11vnc.pass"
|
||||||
|
if ! x11vnc -storepasswd "${VNC_SERVER_PASSWORD}" "${passwordFilePath}"
|
||||||
|
then
|
||||||
|
log_e "Failed to store x11vnc password"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
passwordArgument=-"-rfbauth ${passwordFilePath}"
|
||||||
|
log_i "The VNC server will ask for a password"
|
||||||
|
else
|
||||||
|
log_w "The VNC server will NOT ask for a password"
|
||||||
|
fi
|
||||||
|
|
||||||
|
x11vnc -ncache 10 -ncache_cr -display ${DISPLAY} -forever ${passwordArgument} &
|
||||||
|
wait $!
|
||||||
|
}
|
||||||
|
|
||||||
|
log_i() {
|
||||||
|
log "[INFO] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_w() {
|
||||||
|
log "[WARN] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_e() {
|
||||||
|
log "[ERROR] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log() {
|
||||||
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
control_c() {
|
||||||
|
echo ""
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
trap control_c SIGINT SIGTERM SIGHUP
|
||||||
|
|
||||||
|
main
|
||||||
|
|
||||||
|
exit
|
|
@ -0,0 +1,4 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
# Needs to be run in docker/ubuntu-gtsam-python-vnc directory
|
||||||
|
docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic .
|
|
@ -0,0 +1,5 @@
|
||||||
|
# After running this script, connect VNC client to 0.0.0.0:5900
|
||||||
|
docker run -it \
|
||||||
|
--workdir="/usr/src/gtsam" \
|
||||||
|
-p 5900:5900 \
|
||||||
|
dellaert/ubuntu-gtsam-python-vnc:bionic
|
|
@ -0,0 +1,31 @@
|
||||||
|
# GTSAM Ubuntu image with Python wrapper support.
|
||||||
|
|
||||||
|
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||||
|
FROM dellaert/ubuntu-gtsam:bionic
|
||||||
|
|
||||||
|
# Install pip
|
||||||
|
RUN apt-get install -y python3-pip python3-dev
|
||||||
|
|
||||||
|
# Install python wrapper requirements
|
||||||
|
RUN python3 -m pip install -U -r /usr/src/gtsam/cython/requirements.txt
|
||||||
|
|
||||||
|
# Run cmake again, now with cython toolbox on
|
||||||
|
WORKDIR /usr/src/gtsam/build
|
||||||
|
RUN cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGTSAM_WITH_EIGEN_MKL=OFF \
|
||||||
|
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TESTS=OFF \
|
||||||
|
-DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \
|
||||||
|
-DGTSAM_PYTHON_VERSION=3\
|
||||||
|
..
|
||||||
|
|
||||||
|
# Build again, as ubuntu-gtsam image cleaned
|
||||||
|
RUN make -j4 install && make clean
|
||||||
|
|
||||||
|
# Needed to run python wrapper:
|
||||||
|
RUN echo 'export PYTHONPATH=/usr/local/cython/:$PYTHONPATH' >> /root/.bashrc
|
||||||
|
|
||||||
|
# Run bash
|
||||||
|
CMD ["bash"]
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic .
|
|
@ -0,0 +1,36 @@
|
||||||
|
# Ubuntu image with GTSAM installed. Configured with Boost and TBB support.
|
||||||
|
|
||||||
|
# Get the base Ubuntu image from Docker Hub
|
||||||
|
FROM dellaert/ubuntu-boost-tbb:bionic
|
||||||
|
|
||||||
|
# Install git
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y git
|
||||||
|
|
||||||
|
# Install compiler
|
||||||
|
RUN apt-get install -y build-essential
|
||||||
|
|
||||||
|
# Clone GTSAM (develop branch)
|
||||||
|
WORKDIR /usr/src/
|
||||||
|
RUN git clone --single-branch --branch develop https://github.com/borglab/gtsam.git
|
||||||
|
|
||||||
|
# Change to build directory. Will be created automatically.
|
||||||
|
WORKDIR /usr/src/gtsam/build
|
||||||
|
# Run cmake
|
||||||
|
RUN cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGTSAM_WITH_EIGEN_MKL=OFF \
|
||||||
|
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TESTS=OFF \
|
||||||
|
-DGTSAM_INSTALL_CYTHON_TOOLBOX=OFF \
|
||||||
|
..
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN make -j4 install && make clean
|
||||||
|
|
||||||
|
# Needed to link with GTSAM
|
||||||
|
RUN echo 'export LD_LIBRARY_PATH=/usr/local/lib:LD_LIBRARY_PATH' >> /root/.bashrc
|
||||||
|
|
||||||
|
# Run bash
|
||||||
|
CMD ["bash"]
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
docker build --no-cache -t dellaert/ubuntu-gtsam:bionic .
|
|
@ -0,0 +1,83 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file DiscreteBayesNetExample.cpp
|
||||||
|
* @brief Discrete Bayes Net example with famous Asia Bayes Network
|
||||||
|
* @author Frank Dellaert
|
||||||
|
* @date JULY 10, 2020
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
|
||||||
|
#include <iomanip>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
int main(int argc, char **argv) {
|
||||||
|
DiscreteBayesNet asia;
|
||||||
|
DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2),
|
||||||
|
Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
|
||||||
|
asia.add(Asia % "99/1");
|
||||||
|
asia.add(Smoking % "50/50");
|
||||||
|
|
||||||
|
asia.add(Tuberculosis | Asia = "99/1 95/5");
|
||||||
|
asia.add(LungCancer | Smoking = "99/1 90/10");
|
||||||
|
asia.add(Bronchitis | Smoking = "70/30 40/60");
|
||||||
|
|
||||||
|
asia.add((Either | Tuberculosis, LungCancer) = "F T T T");
|
||||||
|
|
||||||
|
asia.add(XRay | Either = "95/5 2/98");
|
||||||
|
asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9");
|
||||||
|
|
||||||
|
// print
|
||||||
|
vector<string> pretty = {"Asia", "Dyspnea", "XRay", "Tuberculosis",
|
||||||
|
"Smoking", "Either", "LungCancer", "Bronchitis"};
|
||||||
|
auto formatter = [pretty](Key key) { return pretty[key]; };
|
||||||
|
asia.print("Asia", formatter);
|
||||||
|
|
||||||
|
// Convert to factor graph
|
||||||
|
DiscreteFactorGraph fg(asia);
|
||||||
|
|
||||||
|
// Create solver and eliminate
|
||||||
|
Ordering ordering;
|
||||||
|
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
|
||||||
|
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||||
|
|
||||||
|
// solve
|
||||||
|
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||||
|
GTSAM_PRINT(*mpe);
|
||||||
|
|
||||||
|
// We can also build a Bayes tree (directed junction tree).
|
||||||
|
// The elimination order above will do fine:
|
||||||
|
auto bayesTree = fg.eliminateMultifrontal(ordering);
|
||||||
|
bayesTree->print("bayesTree", formatter);
|
||||||
|
|
||||||
|
// add evidence, we were in Asia and we have dyspnea
|
||||||
|
fg.add(Asia, "0 1");
|
||||||
|
fg.add(Dyspnea, "0 1");
|
||||||
|
|
||||||
|
// solve again, now with evidence
|
||||||
|
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||||
|
DiscreteFactor::sharedValues mpe2 = chordal2->optimize();
|
||||||
|
GTSAM_PRINT(*mpe2);
|
||||||
|
|
||||||
|
// We can also sample from it
|
||||||
|
cout << "\n10 samples:" << endl;
|
||||||
|
for (size_t i = 0; i < 10; i++) {
|
||||||
|
DiscreteFactor::sharedValues sample = chordal2->sample();
|
||||||
|
GTSAM_PRINT(*sample);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -10,7 +10,7 @@
|
||||||
* -------------------------------------------------------------------------- */
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file DiscreteBayesNet_graph.cpp
|
* @file DiscreteBayesNet_FG.cpp
|
||||||
* @brief Discrete Bayes Net example using Factor Graphs
|
* @brief Discrete Bayes Net example using Factor Graphs
|
||||||
* @author Abhijit
|
* @author Abhijit
|
||||||
* @date Jun 4, 2012
|
* @date Jun 4, 2012
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file DiscreteBayesNetExample.cpp
|
||||||
|
* @brief Hidden Markov Model example, discrete.
|
||||||
|
* @author Frank Dellaert
|
||||||
|
* @date July 12, 2020
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
|
||||||
|
#include <iomanip>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
int main(int argc, char **argv) {
|
||||||
|
const int nrNodes = 4;
|
||||||
|
const size_t nrStates = 3;
|
||||||
|
|
||||||
|
// Define variables as well as ordering
|
||||||
|
Ordering ordering;
|
||||||
|
vector<DiscreteKey> keys;
|
||||||
|
for (int k = 0; k < nrNodes; k++) {
|
||||||
|
DiscreteKey key_i(k, nrStates);
|
||||||
|
keys.push_back(key_i);
|
||||||
|
ordering.emplace_back(k);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create HMM as a DiscreteBayesNet
|
||||||
|
DiscreteBayesNet hmm;
|
||||||
|
|
||||||
|
// Define backbone
|
||||||
|
const string transition = "8/1/1 1/8/1 1/1/8";
|
||||||
|
for (int k = 1; k < nrNodes; k++) {
|
||||||
|
hmm.add(keys[k] | keys[k - 1] = transition);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add some measurements, not needed for all time steps!
|
||||||
|
hmm.add(keys[0] % "7/2/1");
|
||||||
|
hmm.add(keys[1] % "1/9/0");
|
||||||
|
hmm.add(keys.back() % "5/4/1");
|
||||||
|
|
||||||
|
// print
|
||||||
|
hmm.print("HMM");
|
||||||
|
|
||||||
|
// Convert to factor graph
|
||||||
|
DiscreteFactorGraph factorGraph(hmm);
|
||||||
|
|
||||||
|
// Create solver and eliminate
|
||||||
|
// This will create a DAG ordered with arrow of time reversed
|
||||||
|
DiscreteBayesNet::shared_ptr chordal =
|
||||||
|
factorGraph.eliminateSequential(ordering);
|
||||||
|
chordal->print("Eliminated");
|
||||||
|
|
||||||
|
// solve
|
||||||
|
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||||
|
GTSAM_PRINT(*mpe);
|
||||||
|
|
||||||
|
// We can also sample from it
|
||||||
|
cout << "\n10 samples:" << endl;
|
||||||
|
for (size_t k = 0; k < 10; k++) {
|
||||||
|
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||||
|
GTSAM_PRINT(*sample);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or compute the marginals. This re-eliminates the FG into a Bayes tree
|
||||||
|
cout << "\nComputing Node Marginals .." << endl;
|
||||||
|
DiscreteMarginals marginals(factorGraph);
|
||||||
|
for (int k = 0; k < nrNodes; k++) {
|
||||||
|
Vector margProbs = marginals.marginalProbabilities(keys[k]);
|
||||||
|
stringstream ss;
|
||||||
|
ss << "marginal " << k;
|
||||||
|
print(margProbs, ss.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(frank): put in the glue to have DiscreteMarginals produce *arbitrary*
|
||||||
|
// joints efficiently, by the Bayes tree shortcut magic. All the code is there
|
||||||
|
// but it's not yet connected.
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -163,7 +163,7 @@ int main(int argc, char* argv[]) {
|
||||||
vector<GpsMeasurement> gps_measurements;
|
vector<GpsMeasurement> gps_measurements;
|
||||||
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||||
|
|
||||||
Vector6 BodyP = (Vector(6) << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
|
Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
|
||||||
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
|
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||||
.finished();
|
.finished();
|
||||||
auto body_T_imu = Pose3::Expmap(BodyP);
|
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||||
|
@ -173,28 +173,29 @@ int main(int argc, char* argv[]) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure different variables
|
// Configure different variables
|
||||||
double t_offset = gps_measurements[0].time;
|
// double t_offset = gps_measurements[0].time;
|
||||||
size_t first_gps_pose = 1;
|
size_t first_gps_pose = 1;
|
||||||
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||||
double g = 9.8;
|
double g = 9.8;
|
||||||
auto w_coriolis = Vector3(); // zero vector
|
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||||
|
|
||||||
// Configure noise models
|
// Configure noise models
|
||||||
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector(6) << Vector3::Constant(0),
|
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||||
Vector3::Constant(1.0/0.07))
|
Vector3::Constant(1.0/0.07))
|
||||||
.finished());
|
.finished());
|
||||||
|
|
||||||
// Set initial conditions for the estimated trajectory
|
// Set initial conditions for the estimated trajectory
|
||||||
// initial pose is the reference frame (navigation frame)
|
// initial pose is the reference frame (navigation frame)
|
||||||
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||||
auto current_velocity_global = Vector3(); // the vehicle is stationary at the beginning at position 0,0,0
|
// the vehicle is stationary at the beginning at position 0,0,0
|
||||||
|
Vector3 current_velocity_global = Vector3::Zero();
|
||||||
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||||
|
|
||||||
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector(6) << Vector3::Constant(0),
|
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||||
Vector3::Constant(1.0))
|
Vector3::Constant(1.0))
|
||||||
.finished());
|
.finished());
|
||||||
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector(6) << Vector3::Constant(0.100),
|
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100),
|
||||||
Vector3::Constant(5.00e-05))
|
Vector3::Constant(5.00e-05))
|
||||||
.finished());
|
.finished());
|
||||||
|
|
||||||
|
@ -270,7 +271,7 @@ int main(int argc, char* argv[]) {
|
||||||
previous_bias_key, *current_summarized_measurement);
|
previous_bias_key, *current_summarized_measurement);
|
||||||
|
|
||||||
// Bias evolution as given in the IMU metadata
|
// Bias evolution as given in the IMU metadata
|
||||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector(6) <<
|
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() <<
|
||||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
|
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
|
||||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
|
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
|
||||||
.finished());
|
.finished());
|
||||||
|
@ -342,6 +343,11 @@ int main(int argc, char* argv[]) {
|
||||||
auto pose_quat = pose.rotation().toQuaternion();
|
auto pose_quat = pose.rotation().toQuaternion();
|
||||||
auto gps = gps_measurements[i].position;
|
auto gps = gps_measurements[i].position;
|
||||||
|
|
||||||
|
cout << "State at #" << i << endl;
|
||||||
|
cout << "Pose:" << endl << pose << endl;
|
||||||
|
cout << "Velocity:" << endl << velocity << endl;
|
||||||
|
cout << "Bias:" << endl << bias << endl;
|
||||||
|
|
||||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||||
gps_measurements[i].time,
|
gps_measurements[i].time,
|
||||||
pose.x(), pose.y(), pose.z(),
|
pose.x(), pose.y(), pose.z(),
|
||||||
|
|
|
@ -35,22 +35,28 @@
|
||||||
* optional arguments:
|
* optional arguments:
|
||||||
* data_csv_path path to the CSV file with the IMU data.
|
* data_csv_path path to the CSV file with the IMU data.
|
||||||
* -c use CombinedImuFactor
|
* -c use CombinedImuFactor
|
||||||
|
* Note: Define USE_LM to use Levenberg Marquardt Optimizer
|
||||||
|
* By default ISAM2 is used
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// GTSAM related includes.
|
// GTSAM related includes.
|
||||||
#include <gtsam/inference/Symbol.h>
|
|
||||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||||
#include <gtsam/navigation/GPSFactor.h>
|
#include <gtsam/navigation/GPSFactor.h>
|
||||||
#include <gtsam/navigation/ImuFactor.h>
|
#include <gtsam/navigation/ImuFactor.h>
|
||||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
|
||||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
|
||||||
#include <gtsam/slam/BetweenFactor.h>
|
#include <gtsam/slam/BetweenFactor.h>
|
||||||
#include <gtsam/slam/dataset.h>
|
#include <gtsam/slam/dataset.h>
|
||||||
|
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
#include <gtsam/nonlinear/ISAM2.h>
|
||||||
|
#include <gtsam/inference/Symbol.h>
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
|
// Uncomment the following to use Levenberg Marquardt Optimizer
|
||||||
|
// #define USE_LM
|
||||||
|
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -64,6 +70,17 @@ static const char use_combined_imu_flag[3] = "-c";
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
string data_filename;
|
string data_filename;
|
||||||
bool use_combined_imu = false;
|
bool use_combined_imu = false;
|
||||||
|
|
||||||
|
#ifndef USE_LM
|
||||||
|
printf("Using ISAM2\n");
|
||||||
|
ISAM2Params parameters;
|
||||||
|
parameters.relinearizeThreshold = 0.01;
|
||||||
|
parameters.relinearizeSkip = 1;
|
||||||
|
ISAM2 isam2(parameters);
|
||||||
|
#else
|
||||||
|
printf("Using Levenberg Marquardt Optimizer\n");
|
||||||
|
#endif
|
||||||
|
|
||||||
if (argc < 2) {
|
if (argc < 2) {
|
||||||
printf("using default CSV file\n");
|
printf("using default CSV file\n");
|
||||||
data_filename = findExampleDataFile("imuAndGPSdata.csv");
|
data_filename = findExampleDataFile("imuAndGPSdata.csv");
|
||||||
|
@ -252,9 +269,19 @@ int main(int argc, char* argv[]) {
|
||||||
initial_values.insert(V(correction_count), prop_state.v());
|
initial_values.insert(V(correction_count), prop_state.v());
|
||||||
initial_values.insert(B(correction_count), prev_bias);
|
initial_values.insert(B(correction_count), prev_bias);
|
||||||
|
|
||||||
|
Values result;
|
||||||
|
#ifdef USE_LM
|
||||||
LevenbergMarquardtOptimizer optimizer(*graph, initial_values);
|
LevenbergMarquardtOptimizer optimizer(*graph, initial_values);
|
||||||
Values result = optimizer.optimize();
|
result = optimizer.optimize();
|
||||||
|
#else
|
||||||
|
isam2.update(*graph, initial_values);
|
||||||
|
isam2.update();
|
||||||
|
result = isam2.calculateEstimate();
|
||||||
|
|
||||||
|
// reset the graph
|
||||||
|
graph->resize(0);
|
||||||
|
initial_values.clear();
|
||||||
|
#endif
|
||||||
// Overwrite the beginning of the preintegration for the next step.
|
// Overwrite the beginning of the preintegration for the next step.
|
||||||
prev_state = NavState(result.at<Pose3>(X(correction_count)),
|
prev_state = NavState(result.at<Pose3>(X(correction_count)),
|
||||||
result.at<Vector3>(V(correction_count)));
|
result.at<Vector3>(V(correction_count)));
|
||||||
|
|
|
@ -109,7 +109,7 @@ int main(int argc, char* argv[]) {
|
||||||
Symbol('x', i), corrupted_pose);
|
Symbol('x', i), corrupted_pose);
|
||||||
}
|
}
|
||||||
for (size_t j = 0; j < points.size(); ++j) {
|
for (size_t j = 0; j < points.size(); ++j) {
|
||||||
auto corrupted_point = points[j] + Point3(-0.25, 0.20, 0.15);
|
Point3 corrupted_point = points[j] + Point3(-0.25, 0.20, 0.15);
|
||||||
initialEstimate.insert<Point3>(Symbol('l', j), corrupted_point);
|
initialEstimate.insert<Point3>(Symbol('l', j), corrupted_point);
|
||||||
}
|
}
|
||||||
initialEstimate.print("Initial Estimates:\n");
|
initialEstimate.print("Initial Estimates:\n");
|
||||||
|
|
34
gtsam.h
34
gtsam.h
|
@ -1459,7 +1459,7 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1470,7 +1470,7 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1481,7 +1481,7 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
|
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1492,7 +1492,7 @@ virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1503,7 +1503,7 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
|
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1514,7 +1514,7 @@ virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
|
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1525,7 +1525,7 @@ virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
|
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1536,7 +1536,7 @@ virtual class DCS: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1547,7 +1547,7 @@ virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}///\namespace mEstimator
|
}///\namespace mEstimator
|
||||||
|
@ -1938,6 +1938,22 @@ virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParamete
|
||||||
void print() const;
|
void print() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#include <gtsam/linear/Preconditioner.h>
|
||||||
|
virtual class PreconditionerParameters {
|
||||||
|
PreconditionerParameters();
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual class DummyPreconditionerParameters : gtsam::PreconditionerParameters {
|
||||||
|
DummyPreconditionerParameters();
|
||||||
|
};
|
||||||
|
|
||||||
|
#include <gtsam/linear/PCGSolver.h>
|
||||||
|
virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters {
|
||||||
|
PCGSolverParameters();
|
||||||
|
void print(string s);
|
||||||
|
void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner);
|
||||||
|
};
|
||||||
|
|
||||||
#include <gtsam/linear/SubgraphSolver.h>
|
#include <gtsam/linear/SubgraphSolver.h>
|
||||||
virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters {
|
virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters {
|
||||||
SubgraphSolverParameters();
|
SubgraphSolverParameters();
|
||||||
|
|
|
@ -15,7 +15,7 @@ set (gtsam_subdirs
|
||||||
sfm
|
sfm
|
||||||
slam
|
slam
|
||||||
smart
|
smart
|
||||||
navigation
|
navigation
|
||||||
)
|
)
|
||||||
|
|
||||||
set(gtsam_srcs)
|
set(gtsam_srcs)
|
||||||
|
@ -186,11 +186,17 @@ install(
|
||||||
list(APPEND GTSAM_EXPORTED_TARGETS gtsam)
|
list(APPEND GTSAM_EXPORTED_TARGETS gtsam)
|
||||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||||
|
|
||||||
# make sure that ccolamd compiles even in face of warnings
|
# Make sure that ccolamd compiles even in face of warnings
|
||||||
|
# and suppress all warnings from 3rd party code if Release build
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-w")
|
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "/w")
|
||||||
else()
|
else()
|
||||||
|
if("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
|
||||||
|
# Suppress all warnings from 3rd party sources.
|
||||||
|
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-w")
|
||||||
|
else()
|
||||||
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-Wno-error")
|
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-Wno-error")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Create the matlab toolbox for the gtsam library
|
# Create the matlab toolbox for the gtsam library
|
||||||
|
|
|
@ -548,17 +548,47 @@ GTSAM_EXPORT Vector columnNormSquare(const Matrix &A);
|
||||||
namespace boost {
|
namespace boost {
|
||||||
namespace serialization {
|
namespace serialization {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ref. https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
|
||||||
|
*
|
||||||
|
* Eigen supports calling resize() on both static and dynamic matrices.
|
||||||
|
* This allows for a uniform API, with resize having no effect if the static matrix
|
||||||
|
* is already the correct size.
|
||||||
|
* https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
|
||||||
|
*
|
||||||
|
* We use all the Matrix template parameters to ensure wide compatibility.
|
||||||
|
*
|
||||||
|
* eigen_typekit in ROS uses the same code
|
||||||
|
* http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
|
||||||
|
*/
|
||||||
|
|
||||||
// split version - sends sizes ahead
|
// split version - sends sizes ahead
|
||||||
template<class Archive>
|
template<class Archive,
|
||||||
void save(Archive & ar, const gtsam::Matrix & m, unsigned int /*version*/) {
|
typename Scalar_,
|
||||||
|
int Rows_,
|
||||||
|
int Cols_,
|
||||||
|
int Ops_,
|
||||||
|
int MaxRows_,
|
||||||
|
int MaxCols_>
|
||||||
|
void save(Archive & ar,
|
||||||
|
const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||||
|
const unsigned int /*version*/) {
|
||||||
const size_t rows = m.rows(), cols = m.cols();
|
const size_t rows = m.rows(), cols = m.cols();
|
||||||
ar << BOOST_SERIALIZATION_NVP(rows);
|
ar << BOOST_SERIALIZATION_NVP(rows);
|
||||||
ar << BOOST_SERIALIZATION_NVP(cols);
|
ar << BOOST_SERIALIZATION_NVP(cols);
|
||||||
ar << make_nvp("data", make_array(m.data(), m.size()));
|
ar << make_nvp("data", make_array(m.data(), m.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Archive>
|
template<class Archive,
|
||||||
void load(Archive & ar, gtsam::Matrix & m, unsigned int /*version*/) {
|
typename Scalar_,
|
||||||
|
int Rows_,
|
||||||
|
int Cols_,
|
||||||
|
int Ops_,
|
||||||
|
int MaxRows_,
|
||||||
|
int MaxCols_>
|
||||||
|
void load(Archive & ar,
|
||||||
|
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||||
|
const unsigned int /*version*/) {
|
||||||
size_t rows, cols;
|
size_t rows, cols;
|
||||||
ar >> BOOST_SERIALIZATION_NVP(rows);
|
ar >> BOOST_SERIALIZATION_NVP(rows);
|
||||||
ar >> BOOST_SERIALIZATION_NVP(cols);
|
ar >> BOOST_SERIALIZATION_NVP(cols);
|
||||||
|
@ -566,8 +596,25 @@ namespace boost {
|
||||||
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
|
||||||
|
template<class Archive,
|
||||||
|
typename Scalar_,
|
||||||
|
int Rows_,
|
||||||
|
int Cols_,
|
||||||
|
int Ops_,
|
||||||
|
int MaxRows_,
|
||||||
|
int MaxCols_>
|
||||||
|
void serialize(Archive & ar,
|
||||||
|
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||||
|
const unsigned int version) {
|
||||||
|
split_free(ar, m, version);
|
||||||
|
}
|
||||||
|
|
||||||
|
// specialized to Matrix for MATLAB wrapper
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) {
|
||||||
|
split_free(ar, m, version);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace serialization
|
} // namespace serialization
|
||||||
} // namespace boost
|
} // namespace boost
|
||||||
|
|
||||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Matrix);
|
|
||||||
|
|
||||||
|
|
|
@ -20,13 +20,14 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <boost/shared_ptr.hpp>
|
#include <boost/shared_ptr.hpp>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
#include <gtsam/inference/FactorGraph.h>
|
#include <gtsam/inference/FactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteConditional.h>
|
#include <gtsam/discrete/DiscreteConditional.h>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/** A Bayes net made from linear-Discrete densities */
|
/** A Bayes net made from linear-Discrete densities */
|
||||||
class GTSAM_EXPORT DiscreteBayesNet: public FactorGraph<DiscreteConditional>
|
class GTSAM_EXPORT DiscreteBayesNet: public BayesNet<DiscreteConditional>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
|
|
@ -29,13 +29,32 @@ namespace gtsam {
|
||||||
template class BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>;
|
template class BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>;
|
||||||
template class BayesTree<DiscreteBayesTreeClique>;
|
template class BayesTree<DiscreteBayesTreeClique>;
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double DiscreteBayesTreeClique::evaluate(
|
||||||
|
const DiscreteConditional::Values& values) const {
|
||||||
|
// evaluate all conditionals and multiply
|
||||||
|
double result = (*conditional_)(values);
|
||||||
|
for (const auto& child : children) {
|
||||||
|
result *= child->evaluate(values);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
bool DiscreteBayesTree::equals(const This& other, double tol) const
|
bool DiscreteBayesTree::equals(const This& other, double tol) const {
|
||||||
{
|
|
||||||
return Base::equals(other, tol);
|
return Base::equals(other, tol);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double DiscreteBayesTree::evaluate(
|
||||||
|
const DiscreteConditional::Values& values) const {
|
||||||
|
double result = 1.0;
|
||||||
|
for (const auto& root : roots_) {
|
||||||
|
result *= root->evaluate(values);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
} // \namespace gtsam
|
} // \namespace gtsam
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,8 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file DiscreteBayesTree.h
|
* @file DiscreteBayesTree.h
|
||||||
* @brief Discrete Bayes Tree, the result of eliminating a DiscreteJunctionTree
|
* @brief Discrete Bayes Tree, the result of eliminating a
|
||||||
|
* DiscreteJunctionTree
|
||||||
* @brief DiscreteBayesTree
|
* @brief DiscreteBayesTree
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
* @author Richard Roberts
|
* @author Richard Roberts
|
||||||
|
@ -22,45 +23,62 @@
|
||||||
#include <gtsam/discrete/DiscreteBayesNet.h>
|
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/inference/BayesTree.h>
|
#include <gtsam/inference/BayesTree.h>
|
||||||
|
#include <gtsam/inference/Conditional.h>
|
||||||
#include <gtsam/inference/BayesTreeCliqueBase.h>
|
#include <gtsam/inference/BayesTreeCliqueBase.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
// Forward declarations
|
// Forward declarations
|
||||||
class DiscreteConditional;
|
class DiscreteConditional;
|
||||||
class VectorValues;
|
class VectorValues;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/** A clique in a DiscreteBayesTree */
|
/** A clique in a DiscreteBayesTree */
|
||||||
class GTSAM_EXPORT DiscreteBayesTreeClique :
|
class GTSAM_EXPORT DiscreteBayesTreeClique
|
||||||
public BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>
|
: public BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph> {
|
||||||
{
|
public:
|
||||||
public:
|
typedef DiscreteBayesTreeClique This;
|
||||||
typedef DiscreteBayesTreeClique This;
|
typedef BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>
|
||||||
typedef BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph> Base;
|
Base;
|
||||||
typedef boost::shared_ptr<This> shared_ptr;
|
typedef boost::shared_ptr<This> shared_ptr;
|
||||||
typedef boost::weak_ptr<This> weak_ptr;
|
typedef boost::weak_ptr<This> weak_ptr;
|
||||||
DiscreteBayesTreeClique() {}
|
DiscreteBayesTreeClique() {}
|
||||||
DiscreteBayesTreeClique(const boost::shared_ptr<DiscreteConditional>& conditional) : Base(conditional) {}
|
DiscreteBayesTreeClique(
|
||||||
};
|
const boost::shared_ptr<DiscreteConditional>& conditional)
|
||||||
|
: Base(conditional) {}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/// print index signature only
|
||||||
/** A Bayes tree representing a Discrete density */
|
void printSignature(
|
||||||
class GTSAM_EXPORT DiscreteBayesTree :
|
const std::string& s = "Clique: ",
|
||||||
public BayesTree<DiscreteBayesTreeClique>
|
const KeyFormatter& formatter = DefaultKeyFormatter) const {
|
||||||
{
|
conditional_->printSignature(s, formatter);
|
||||||
private:
|
}
|
||||||
typedef BayesTree<DiscreteBayesTreeClique> Base;
|
|
||||||
|
|
||||||
public:
|
//** evaluate conditional probability of subtree for given Values */
|
||||||
typedef DiscreteBayesTree This;
|
double evaluate(const DiscreteConditional::Values& values) const;
|
||||||
typedef boost::shared_ptr<This> shared_ptr;
|
};
|
||||||
|
|
||||||
/** Default constructor, creates an empty Bayes tree */
|
/* ************************************************************************* */
|
||||||
DiscreteBayesTree() {}
|
/** A Bayes tree representing a Discrete density */
|
||||||
|
class GTSAM_EXPORT DiscreteBayesTree
|
||||||
|
: public BayesTree<DiscreteBayesTreeClique> {
|
||||||
|
private:
|
||||||
|
typedef BayesTree<DiscreteBayesTreeClique> Base;
|
||||||
|
|
||||||
/** Check equality */
|
public:
|
||||||
bool equals(const This& other, double tol = 1e-9) const;
|
typedef DiscreteBayesTree This;
|
||||||
};
|
typedef boost::shared_ptr<This> shared_ptr;
|
||||||
|
|
||||||
}
|
/** Default constructor, creates an empty Bayes tree */
|
||||||
|
DiscreteBayesTree() {}
|
||||||
|
|
||||||
|
/** Check equality */
|
||||||
|
bool equals(const This& other, double tol = 1e-9) const;
|
||||||
|
|
||||||
|
//** evaluate probability for given Values */
|
||||||
|
double evaluate(const DiscreteConditional::Values& values) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace gtsam
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <random>
|
#include <random>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
@ -61,16 +62,26 @@ DiscreteConditional::DiscreteConditional(const DecisionTreeFactor& joint,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
DiscreteConditional::DiscreteConditional(const Signature& signature) :
|
DiscreteConditional::DiscreteConditional(const Signature& signature)
|
||||||
BaseFactor(signature.discreteKeysParentsFirst(), signature.cpt()), BaseConditional(
|
: BaseFactor(signature.discreteKeys(), signature.cpt()),
|
||||||
1) {
|
BaseConditional(1) {}
|
||||||
}
|
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
void DiscreteConditional::print(const std::string& s,
|
void DiscreteConditional::print(const string& s,
|
||||||
const KeyFormatter& formatter) const {
|
const KeyFormatter& formatter) const {
|
||||||
std::cout << s << std::endl;
|
cout << s << " P( ";
|
||||||
Potentials::print(s);
|
for (const_iterator it = beginFrontals(); it != endFrontals(); ++it) {
|
||||||
|
cout << formatter(*it) << " ";
|
||||||
|
}
|
||||||
|
if (nrParents()) {
|
||||||
|
cout << "| ";
|
||||||
|
for (const_iterator it = beginParents(); it != endParents(); ++it) {
|
||||||
|
cout << formatter(*it) << " ";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cout << ")";
|
||||||
|
Potentials::print("");
|
||||||
|
cout << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
|
@ -173,55 +184,28 @@ size_t DiscreteConditional::solve(const Values& parentsValues) const {
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
size_t DiscreteConditional::sample(const Values& parentsValues) const {
|
size_t DiscreteConditional::sample(const Values& parentsValues) const {
|
||||||
|
static mt19937 rng(2); // random number generator
|
||||||
static mt19937 rng(2); // random number generator
|
|
||||||
|
|
||||||
bool debug = ISDEBUG("DiscreteConditional::sample");
|
|
||||||
|
|
||||||
// Get the correct conditional density
|
// Get the correct conditional density
|
||||||
ADT pFS = choose(parentsValues); // P(F|S=parentsValues)
|
ADT pFS = choose(parentsValues); // P(F|S=parentsValues)
|
||||||
if (debug)
|
|
||||||
GTSAM_PRINT(pFS);
|
|
||||||
|
|
||||||
// get cumulative distribution function (cdf)
|
// TODO(Duy): only works for one key now, seems horribly slow this way
|
||||||
// TODO, only works for one key now, seems horribly slow this way
|
|
||||||
assert(nrFrontals() == 1);
|
assert(nrFrontals() == 1);
|
||||||
Key j = (firstFrontalKey());
|
Key key = firstFrontalKey();
|
||||||
size_t nj = cardinality(j);
|
size_t nj = cardinality(key);
|
||||||
vector<double> cdf(nj);
|
vector<double> p(nj);
|
||||||
Values frontals;
|
Values frontals;
|
||||||
double sum = 0;
|
|
||||||
for (size_t value = 0; value < nj; value++) {
|
for (size_t value = 0; value < nj; value++) {
|
||||||
frontals[j] = value;
|
frontals[key] = value;
|
||||||
double pValueS = pFS(frontals); // P(F=value|S=parentsValues)
|
p[value] = pFS(frontals); // P(F=value|S=parentsValues)
|
||||||
sum += pValueS; // accumulate
|
if (p[value] == 1.0) {
|
||||||
if (debug)
|
return value; // shortcut exit
|
||||||
cout << sum << " ";
|
|
||||||
if (pValueS == 1) {
|
|
||||||
if (debug)
|
|
||||||
cout << "--> " << value << endl;
|
|
||||||
return value; // shortcut exit
|
|
||||||
}
|
}
|
||||||
cdf[value] = sum;
|
|
||||||
}
|
}
|
||||||
|
std::discrete_distribution<size_t> distribution(p.begin(), p.end());
|
||||||
// inspired by http://www.boost.org/doc/libs/1_46_1/doc/html/boost_random/tutorial.html
|
return distribution(rng);
|
||||||
uniform_real_distribution<double> dist(0, cdf.back());
|
|
||||||
size_t sampled = lower_bound(cdf.begin(), cdf.end(), dist(rng)) - cdf.begin();
|
|
||||||
if (debug)
|
|
||||||
cout << "-> " << sampled << endl;
|
|
||||||
|
|
||||||
return sampled;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
|
||||||
//void DiscreteConditional::permuteWithInverse(
|
|
||||||
// const Permutation& inversePermutation) {
|
|
||||||
// IndexConditionalOrdered::permuteWithInverse(inversePermutation);
|
|
||||||
// Potentials::permuteWithInverse(inversePermutation);
|
|
||||||
//}
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
|
|
||||||
}// namespace
|
}// namespace
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
#include <boost/shared_ptr.hpp>
|
#include <boost/shared_ptr.hpp>
|
||||||
#include <boost/make_shared.hpp>
|
#include <boost/make_shared.hpp>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -92,6 +94,13 @@ public:
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
/// print index signature only
|
||||||
|
void printSignature(
|
||||||
|
const std::string& s = "Discrete Conditional: ",
|
||||||
|
const KeyFormatter& formatter = DefaultKeyFormatter) const {
|
||||||
|
static_cast<const BaseConditional*>(this)->print(s, formatter);
|
||||||
|
}
|
||||||
|
|
||||||
/// Evaluate, just look up in AlgebraicDecisonTree
|
/// Evaluate, just look up in AlgebraicDecisonTree
|
||||||
virtual double operator()(const Values& values) const {
|
virtual double operator()(const Values& values) const {
|
||||||
return Potentials::operator()(values);
|
return Potentials::operator()(values);
|
||||||
|
|
|
@ -15,50 +15,52 @@
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <gtsam/discrete/Potentials.h>
|
|
||||||
#include <gtsam/discrete/DecisionTree-inl.h>
|
#include <gtsam/discrete/DecisionTree-inl.h>
|
||||||
|
#include <gtsam/discrete/Potentials.h>
|
||||||
|
|
||||||
#include <boost/format.hpp>
|
#include <boost/format.hpp>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
// explicit instantiation
|
// explicit instantiation
|
||||||
template class DecisionTree<Key, double> ;
|
template class DecisionTree<Key, double>;
|
||||||
template class AlgebraicDecisionTree<Key> ;
|
template class AlgebraicDecisionTree<Key>;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Potentials::safe_div(const double& a, const double& b) {
|
double Potentials::safe_div(const double& a, const double& b) {
|
||||||
// cout << boost::format("%g / %g = %g\n") % a % b % ((a == 0) ? 0 : (a / b));
|
// cout << boost::format("%g / %g = %g\n") % a % b % ((a == 0) ? 0 : (a / b));
|
||||||
// The use for safe_div is when we divide the product factor by the sum factor.
|
// The use for safe_div is when we divide the product factor by the sum
|
||||||
// If the product or sum is zero, we accord zero probability to the event.
|
// factor. If the product or sum is zero, we accord zero probability to the
|
||||||
return (a == 0 || b == 0) ? 0 : (a / b);
|
// event.
|
||||||
}
|
return (a == 0 || b == 0) ? 0 : (a / b);
|
||||||
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ********************************************************************************
|
||||||
Potentials::Potentials() :
|
*/
|
||||||
ADT(1.0) {
|
Potentials::Potentials() : ADT(1.0) {}
|
||||||
}
|
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ********************************************************************************
|
||||||
Potentials::Potentials(const DiscreteKeys& keys, const ADT& decisionTree) :
|
*/
|
||||||
ADT(decisionTree), cardinalities_(keys.cardinalities()) {
|
Potentials::Potentials(const DiscreteKeys& keys, const ADT& decisionTree)
|
||||||
}
|
: ADT(decisionTree), cardinalities_(keys.cardinalities()) {}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
bool Potentials::equals(const Potentials& other, double tol) const {
|
bool Potentials::equals(const Potentials& other, double tol) const {
|
||||||
return ADT::equals(other, tol);
|
return ADT::equals(other, tol);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void Potentials::print(const string& s,
|
void Potentials::print(const string& s, const KeyFormatter& formatter) const {
|
||||||
const KeyFormatter& formatter) const {
|
cout << s << "\n Cardinalities: {";
|
||||||
cout << s << "\n Cardinalities: ";
|
for (const DiscreteKey& key : cardinalities_)
|
||||||
for(const DiscreteKey& key: cardinalities_)
|
cout << formatter(key.first) << ":" << key.second << ", ";
|
||||||
cout << formatter(key.first) << "=" << formatter(key.second) << " ";
|
cout << "}" << endl;
|
||||||
cout << endl;
|
ADT::print(" ");
|
||||||
ADT::print(" ");
|
}
|
||||||
}
|
|
||||||
//
|
//
|
||||||
// /* ************************************************************************* */
|
// /* ************************************************************************* */
|
||||||
// template<class P>
|
// template<class P>
|
||||||
|
@ -95,4 +97,4 @@ namespace gtsam {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
} // namespace gtsam
|
} // namespace gtsam
|
||||||
|
|
|
@ -122,28 +122,30 @@ namespace gtsam {
|
||||||
key_(key) {
|
key_(key) {
|
||||||
}
|
}
|
||||||
|
|
||||||
DiscreteKeys Signature::discreteKeysParentsFirst() const {
|
DiscreteKeys Signature::discreteKeys() const {
|
||||||
DiscreteKeys keys;
|
DiscreteKeys keys;
|
||||||
for(const DiscreteKey& key: parents_)
|
|
||||||
keys.push_back(key);
|
|
||||||
keys.push_back(key_);
|
keys.push_back(key_);
|
||||||
|
for (const DiscreteKey& key : parents_) keys.push_back(key);
|
||||||
return keys;
|
return keys;
|
||||||
}
|
}
|
||||||
|
|
||||||
KeyVector Signature::indices() const {
|
KeyVector Signature::indices() const {
|
||||||
KeyVector js;
|
KeyVector js;
|
||||||
js.push_back(key_.first);
|
js.push_back(key_.first);
|
||||||
for(const DiscreteKey& key: parents_)
|
for (const DiscreteKey& key : parents_) js.push_back(key.first);
|
||||||
js.push_back(key.first);
|
|
||||||
return js;
|
return js;
|
||||||
}
|
}
|
||||||
|
|
||||||
vector<double> Signature::cpt() const {
|
vector<double> Signature::cpt() const {
|
||||||
vector<double> cpt;
|
vector<double> cpt;
|
||||||
if (table_) {
|
if (table_) {
|
||||||
for(const Row& row: *table_)
|
const size_t nrStates = table_->at(0).size();
|
||||||
for(const double& x: row)
|
for (size_t j = 0; j < nrStates; j++) {
|
||||||
cpt.push_back(x);
|
for (const Row& row : *table_) {
|
||||||
|
assert(row.size() == nrStates);
|
||||||
|
cpt.push_back(row[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return cpt;
|
return cpt;
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,8 +86,8 @@ namespace gtsam {
|
||||||
return parents_;
|
return parents_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** All keys, with variable key last */
|
/** All keys, with variable key first */
|
||||||
DiscreteKeys discreteKeysParentsFirst() const;
|
DiscreteKeys discreteKeys() const;
|
||||||
|
|
||||||
/** All key indices, with variable key first */
|
/** All key indices, with variable key first */
|
||||||
KeyVector indices() const;
|
KeyVector indices() const;
|
||||||
|
|
|
@ -132,7 +132,7 @@ TEST(ADT, example3)
|
||||||
|
|
||||||
/** Convert Signature into CPT */
|
/** Convert Signature into CPT */
|
||||||
ADT create(const Signature& signature) {
|
ADT create(const Signature& signature) {
|
||||||
ADT p(signature.discreteKeysParentsFirst(), signature.cpt());
|
ADT p(signature.discreteKeys(), signature.cpt());
|
||||||
static size_t count = 0;
|
static size_t count = 0;
|
||||||
const DiscreteKey& key = signature.key();
|
const DiscreteKey& key = signature.key();
|
||||||
string dotfile = (boost::format("CPT-%03d-%d") % ++count % key.first).str();
|
string dotfile = (boost::format("CPT-%03d-%d") % ++count % key.first).str();
|
||||||
|
@ -181,19 +181,20 @@ TEST(ADT, joint)
|
||||||
dot(joint, "Asia-ASTLBEX");
|
dot(joint, "Asia-ASTLBEX");
|
||||||
joint = apply(joint, pD, &mul);
|
joint = apply(joint, pD, &mul);
|
||||||
dot(joint, "Asia-ASTLBEXD");
|
dot(joint, "Asia-ASTLBEXD");
|
||||||
EXPECT_LONGS_EQUAL(346, (long)muls);
|
EXPECT_LONGS_EQUAL(346, muls);
|
||||||
gttoc_(asiaJoint);
|
gttoc_(asiaJoint);
|
||||||
tictoc_getNode(asiaJointNode, asiaJoint);
|
tictoc_getNode(asiaJointNode, asiaJoint);
|
||||||
elapsed = asiaJointNode->secs() + asiaJointNode->wall();
|
elapsed = asiaJointNode->secs() + asiaJointNode->wall();
|
||||||
tictoc_reset_();
|
tictoc_reset_();
|
||||||
printCounts("Asia joint");
|
printCounts("Asia joint");
|
||||||
|
|
||||||
|
// Form P(A,S,T,L) = P(A) P(S) P(T|A) P(L|S)
|
||||||
ADT pASTL = pA;
|
ADT pASTL = pA;
|
||||||
pASTL = apply(pASTL, pS, &mul);
|
pASTL = apply(pASTL, pS, &mul);
|
||||||
pASTL = apply(pASTL, pT, &mul);
|
pASTL = apply(pASTL, pT, &mul);
|
||||||
pASTL = apply(pASTL, pL, &mul);
|
pASTL = apply(pASTL, pL, &mul);
|
||||||
|
|
||||||
// test combine
|
// test combine to check that P(A) = \sum_{S,T,L} P(A,S,T,L)
|
||||||
ADT fAa = pASTL.combine(L, &add_).combine(T, &add_).combine(S, &add_);
|
ADT fAa = pASTL.combine(L, &add_).combine(T, &add_).combine(S, &add_);
|
||||||
EXPECT(assert_equal(pA, fAa));
|
EXPECT(assert_equal(pA, fAa));
|
||||||
ADT fAb = pASTL.combine(S, &add_).combine(T, &add_).combine(L, &add_);
|
ADT fAb = pASTL.combine(S, &add_).combine(T, &add_).combine(L, &add_);
|
||||||
|
|
|
@ -18,110 +18,135 @@
|
||||||
|
|
||||||
#include <gtsam/discrete/DiscreteBayesNet.h>
|
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
#include <gtsam/base/debug.h>
|
#include <gtsam/base/debug.h>
|
||||||
|
#include <gtsam/base/Testable.h>
|
||||||
|
#include <gtsam/base/Vector.h>
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
#include <boost/assign/std/map.hpp>
|
|
||||||
#include <boost/assign/list_inserter.hpp>
|
#include <boost/assign/list_inserter.hpp>
|
||||||
|
#include <boost/assign/std/map.hpp>
|
||||||
|
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(DiscreteBayesNet, Asia)
|
TEST(DiscreteBayesNet, bayesNet) {
|
||||||
{
|
DiscreteBayesNet bayesNet;
|
||||||
|
DiscreteKey Parent(0, 2), Child(1, 2);
|
||||||
|
|
||||||
|
auto prior = boost::make_shared<DiscreteConditional>(Parent % "6/4");
|
||||||
|
CHECK(assert_equal(Potentials::ADT({Parent}, "0.6 0.4"),
|
||||||
|
(Potentials::ADT)*prior));
|
||||||
|
bayesNet.push_back(prior);
|
||||||
|
|
||||||
|
auto conditional =
|
||||||
|
boost::make_shared<DiscreteConditional>(Child | Parent = "7/3 8/2");
|
||||||
|
EXPECT_LONGS_EQUAL(1, *(conditional->beginFrontals()));
|
||||||
|
Potentials::ADT expected(Child & Parent, "0.7 0.8 0.3 0.2");
|
||||||
|
CHECK(assert_equal(expected, (Potentials::ADT)*conditional));
|
||||||
|
bayesNet.push_back(conditional);
|
||||||
|
|
||||||
|
DiscreteFactorGraph fg(bayesNet);
|
||||||
|
LONGS_EQUAL(2, fg.back()->size());
|
||||||
|
|
||||||
|
// Check the marginals
|
||||||
|
const double expectedMarginal[2]{0.4, 0.6 * 0.3 + 0.4 * 0.2};
|
||||||
|
DiscreteMarginals marginals(fg);
|
||||||
|
for (size_t j = 0; j < 2; j++) {
|
||||||
|
Vector FT = marginals.marginalProbabilities(DiscreteKey(j, 2));
|
||||||
|
EXPECT_DOUBLES_EQUAL(expectedMarginal[j], FT[1], 1e-3);
|
||||||
|
EXPECT_DOUBLES_EQUAL(FT[0], 1.0 - FT[1], 1e-9);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(DiscreteBayesNet, Asia) {
|
||||||
DiscreteBayesNet asia;
|
DiscreteBayesNet asia;
|
||||||
// DiscreteKey A("Asia"), S("Smoking"), T("Tuberculosis"), L("LungCancer"), B(
|
DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2),
|
||||||
// "Bronchitis"), E("Either"), X("XRay"), D("Dyspnoea");
|
Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
|
||||||
DiscreteKey A(0,2), S(4,2), T(3,2), L(6,2), B(7,2), E(5,2), X(2,2), D(1,2);
|
|
||||||
|
|
||||||
// TODO: make a version that doesn't use the parser
|
asia.add(Asia % "99/1");
|
||||||
asia.add(A % "99/1");
|
asia.add(Smoking % "50/50");
|
||||||
asia.add(S % "50/50");
|
|
||||||
|
|
||||||
asia.add(T | A = "99/1 95/5");
|
asia.add(Tuberculosis | Asia = "99/1 95/5");
|
||||||
asia.add(L | S = "99/1 90/10");
|
asia.add(LungCancer | Smoking = "99/1 90/10");
|
||||||
asia.add(B | S = "70/30 40/60");
|
asia.add(Bronchitis | Smoking = "70/30 40/60");
|
||||||
|
|
||||||
asia.add((E | T, L) = "F T T T");
|
asia.add((Either | Tuberculosis, LungCancer) = "F T T T");
|
||||||
|
|
||||||
asia.add(X | E = "95/5 2/98");
|
asia.add(XRay | Either = "95/5 2/98");
|
||||||
// next lines are same as asia.add((D | E, B) = "9/1 2/8 3/7 1/9");
|
asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9");
|
||||||
DiscreteConditional::shared_ptr actual =
|
|
||||||
boost::make_shared<DiscreteConditional>((D | E, B) = "9/1 2/8 3/7 1/9");
|
|
||||||
asia.push_back(actual);
|
|
||||||
// GTSAM_PRINT(asia);
|
|
||||||
|
|
||||||
// Convert to factor graph
|
// Convert to factor graph
|
||||||
DiscreteFactorGraph fg(asia);
|
DiscreteFactorGraph fg(asia);
|
||||||
// GTSAM_PRINT(fg);
|
LONGS_EQUAL(3, fg.back()->size());
|
||||||
LONGS_EQUAL(3,fg.back()->size());
|
|
||||||
Potentials::ADT expected(B & D & E, "0.9 0.3 0.1 0.7 0.2 0.1 0.8 0.9");
|
// Check the marginals we know (of the parent-less nodes)
|
||||||
CHECK(assert_equal(expected,(Potentials::ADT)*actual));
|
DiscreteMarginals marginals(fg);
|
||||||
|
Vector2 va(0.99, 0.01), vs(0.5, 0.5);
|
||||||
|
EXPECT(assert_equal(va, marginals.marginalProbabilities(Asia)));
|
||||||
|
EXPECT(assert_equal(vs, marginals.marginalProbabilities(Smoking)));
|
||||||
|
|
||||||
// Create solver and eliminate
|
// Create solver and eliminate
|
||||||
Ordering ordering;
|
Ordering ordering;
|
||||||
ordering += Key(0),Key(1),Key(2),Key(3),Key(4),Key(5),Key(6),Key(7);
|
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
|
||||||
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||||
// GTSAM_PRINT(*chordal);
|
DiscreteConditional expected2(Bronchitis % "11/9");
|
||||||
DiscreteConditional expected2(B % "11/9");
|
EXPECT(assert_equal(expected2, *chordal->back()));
|
||||||
CHECK(assert_equal(expected2,*chordal->back()));
|
|
||||||
|
|
||||||
// solve
|
// solve
|
||||||
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
||||||
DiscreteFactor::Values expectedMPE;
|
DiscreteFactor::Values expectedMPE;
|
||||||
insert(expectedMPE)(A.first, 0)(D.first, 0)(X.first, 0)(T.first, 0)(S.first,
|
insert(expectedMPE)(Asia.first, 0)(Dyspnea.first, 0)(XRay.first, 0)(
|
||||||
0)(E.first, 0)(L.first, 0)(B.first, 0);
|
Tuberculosis.first, 0)(Smoking.first, 0)(Either.first, 0)(
|
||||||
|
LungCancer.first, 0)(Bronchitis.first, 0);
|
||||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||||
|
|
||||||
// add evidence, we were in Asia and we have Dispnoea
|
// add evidence, we were in Asia and we have dyspnea
|
||||||
fg.add(A, "0 1");
|
fg.add(Asia, "0 1");
|
||||||
fg.add(D, "0 1");
|
fg.add(Dyspnea, "0 1");
|
||||||
// fg.product().dot("fg");
|
|
||||||
|
|
||||||
// solve again, now with evidence
|
// solve again, now with evidence
|
||||||
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||||
// GTSAM_PRINT(*chordal2);
|
|
||||||
DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize();
|
DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize();
|
||||||
DiscreteFactor::Values expectedMPE2;
|
DiscreteFactor::Values expectedMPE2;
|
||||||
insert(expectedMPE2)(A.first, 1)(D.first, 1)(X.first, 0)(T.first, 0)(S.first,
|
insert(expectedMPE2)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 0)(
|
||||||
1)(E.first, 0)(L.first, 0)(B.first, 1);
|
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 0)(
|
||||||
|
LungCancer.first, 0)(Bronchitis.first, 1);
|
||||||
EXPECT(assert_equal(expectedMPE2, *actualMPE2));
|
EXPECT(assert_equal(expectedMPE2, *actualMPE2));
|
||||||
|
|
||||||
// now sample from it
|
// now sample from it
|
||||||
DiscreteFactor::Values expectedSample;
|
DiscreteFactor::Values expectedSample;
|
||||||
SETDEBUG("DiscreteConditional::sample", false);
|
SETDEBUG("DiscreteConditional::sample", false);
|
||||||
insert(expectedSample)(A.first, 1)(D.first, 1)(X.first, 1)(T.first, 0)(
|
insert(expectedSample)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 1)(
|
||||||
S.first, 1)(E.first, 1)(L.first, 1)(B.first, 0);
|
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 1)(
|
||||||
|
LungCancer.first, 1)(Bronchitis.first, 0);
|
||||||
DiscreteFactor::sharedValues actualSample = chordal2->sample();
|
DiscreteFactor::sharedValues actualSample = chordal2->sample();
|
||||||
EXPECT(assert_equal(expectedSample, *actualSample));
|
EXPECT(assert_equal(expectedSample, *actualSample));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST_UNSAFE(DiscreteBayesNet, Sugar)
|
TEST_UNSAFE(DiscreteBayesNet, Sugar) {
|
||||||
{
|
DiscreteKey T(0, 2), L(1, 2), E(2, 2), C(8, 3), S(7, 2);
|
||||||
DiscreteKey T(0,2), L(1,2), E(2,2), D(3,2), C(8,3), S(7,2);
|
|
||||||
|
|
||||||
DiscreteBayesNet bn;
|
DiscreteBayesNet bn;
|
||||||
|
|
||||||
// test some mistakes
|
|
||||||
// add(bn, D);
|
|
||||||
// add(bn, D | E);
|
|
||||||
// add(bn, D | E = "blah");
|
|
||||||
|
|
||||||
// try logic
|
// try logic
|
||||||
bn.add((E | T, L) = "OR");
|
bn.add((E | T, L) = "OR");
|
||||||
bn.add((E | T, L) = "AND");
|
bn.add((E | T, L) = "AND");
|
||||||
|
|
||||||
// // try multivalued
|
// try multivalued
|
||||||
bn.add(C % "1/1/2");
|
bn.add(C % "1/1/2");
|
||||||
bn.add(C | S = "1/1/2 5/2/3");
|
bn.add(C | S = "1/1/2 5/2/3");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -130,4 +155,3 @@ int main() {
|
||||||
return TestRegistry::runAllTests(tr);
|
return TestRegistry::runAllTests(tr);
|
||||||
}
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
|
@ -1,261 +1,228 @@
|
||||||
///* ----------------------------------------------------------------------------
|
/* ----------------------------------------------------------------------------
|
||||||
//
|
|
||||||
// * GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||||
// * Atlanta, Georgia 30332-0415
|
* Atlanta, Georgia 30332-0415
|
||||||
// * All Rights Reserved
|
* All Rights Reserved
|
||||||
// * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
//
|
|
||||||
// * See LICENSE for the license information
|
* See LICENSE for the license information
|
||||||
//
|
|
||||||
// * -------------------------------------------------------------------------- */
|
* -------------------------------------------------------------------------- */
|
||||||
//
|
|
||||||
///*
|
/*
|
||||||
// * @file testDiscreteBayesTree.cpp
|
* @file testDiscreteBayesTree.cpp
|
||||||
// * @date sept 15, 2012
|
* @date sept 15, 2012
|
||||||
// * @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
// */
|
*/
|
||||||
//
|
|
||||||
//#include <gtsam/discrete/DiscreteBayesNet.h>
|
#include <gtsam/base/Vector.h>
|
||||||
//#include <gtsam/discrete/DiscreteBayesTree.h>
|
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||||
//#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteBayesTree.h>
|
||||||
//
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
//#include <boost/assign/std/vector.hpp>
|
#include <gtsam/inference/BayesNet.h>
|
||||||
//using namespace boost::assign;
|
|
||||||
//
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
//
|
|
||||||
//using namespace std;
|
#include <vector>
|
||||||
//using namespace gtsam;
|
|
||||||
//
|
using namespace std;
|
||||||
//static bool debug = false;
|
using namespace gtsam;
|
||||||
//
|
|
||||||
///**
|
static bool debug = false;
|
||||||
// * Custom clique class to debug shortcuts
|
|
||||||
// */
|
/* ************************************************************************* */
|
||||||
////class Clique: public BayesTreeCliqueBaseOrdered<Clique, DiscreteConditional> {
|
|
||||||
////
|
TEST_UNSAFE(DiscreteBayesTree, ThinTree) {
|
||||||
////protected:
|
const int nrNodes = 15;
|
||||||
////
|
const size_t nrStates = 2;
|
||||||
////public:
|
|
||||||
////
|
// define variables
|
||||||
//// typedef BayesTreeCliqueBaseOrdered<Clique, DiscreteConditional> Base;
|
vector<DiscreteKey> key;
|
||||||
//// typedef boost::shared_ptr<Clique> shared_ptr;
|
for (int i = 0; i < nrNodes; i++) {
|
||||||
////
|
DiscreteKey key_i(i, nrStates);
|
||||||
//// // Constructors
|
key.push_back(key_i);
|
||||||
//// Clique() {
|
}
|
||||||
//// }
|
|
||||||
//// Clique(const DiscreteConditional::shared_ptr& conditional) :
|
// create a thin-tree Bayesnet, a la Jean-Guillaume
|
||||||
//// Base(conditional) {
|
DiscreteBayesNet bayesNet;
|
||||||
//// }
|
bayesNet.add(key[14] % "1/3");
|
||||||
//// Clique(
|
|
||||||
//// const std::pair<DiscreteConditional::shared_ptr,
|
bayesNet.add(key[13] | key[14] = "1/3 3/1");
|
||||||
//// DiscreteConditional::FactorType::shared_ptr>& result) :
|
bayesNet.add(key[12] | key[14] = "3/1 3/1");
|
||||||
//// Base(result) {
|
|
||||||
//// }
|
bayesNet.add((key[11] | key[13], key[14]) = "1/4 2/3 3/2 4/1");
|
||||||
////
|
bayesNet.add((key[10] | key[13], key[14]) = "1/4 3/2 2/3 4/1");
|
||||||
//// /// print index signature only
|
bayesNet.add((key[9] | key[12], key[14]) = "4/1 2/3 F 1/4");
|
||||||
//// void printSignature(const std::string& s = "Clique: ",
|
bayesNet.add((key[8] | key[12], key[14]) = "T 1/4 3/2 4/1");
|
||||||
//// const KeyFormatter& indexFormatter = DefaultKeyFormatter) const {
|
|
||||||
//// ((IndexConditionalOrdered::shared_ptr) conditional_)->print(s, indexFormatter);
|
bayesNet.add((key[7] | key[11], key[13]) = "1/4 2/3 3/2 4/1");
|
||||||
//// }
|
bayesNet.add((key[6] | key[11], key[13]) = "1/4 3/2 2/3 4/1");
|
||||||
////
|
bayesNet.add((key[5] | key[10], key[13]) = "4/1 2/3 3/2 1/4");
|
||||||
//// /// evaluate value of sub-tree
|
bayesNet.add((key[4] | key[10], key[13]) = "2/3 1/4 3/2 4/1");
|
||||||
//// double evaluate(const DiscreteConditional::Values & values) {
|
|
||||||
//// double result = (*(this->conditional_))(values);
|
bayesNet.add((key[3] | key[9], key[12]) = "1/4 2/3 3/2 4/1");
|
||||||
//// // evaluate all children and multiply into result
|
bayesNet.add((key[2] | key[9], key[12]) = "1/4 8/2 2/3 4/1");
|
||||||
//// for(boost::shared_ptr<Clique> c: children_)
|
bayesNet.add((key[1] | key[8], key[12]) = "4/1 2/3 3/2 1/4");
|
||||||
//// result *= c->evaluate(values);
|
bayesNet.add((key[0] | key[8], key[12]) = "2/3 1/4 3/2 4/1");
|
||||||
//// return result;
|
|
||||||
//// }
|
if (debug) {
|
||||||
////
|
GTSAM_PRINT(bayesNet);
|
||||||
////};
|
bayesNet.saveGraph("/tmp/discreteBayesNet.dot");
|
||||||
//
|
}
|
||||||
////typedef BayesTreeOrdered<DiscreteConditional, Clique> DiscreteBayesTree;
|
|
||||||
////
|
// create a BayesTree out of a Bayes net
|
||||||
/////* ************************************************************************* */
|
auto bayesTree = DiscreteFactorGraph(bayesNet).eliminateMultifrontal();
|
||||||
////double evaluate(const DiscreteBayesTree& tree,
|
if (debug) {
|
||||||
//// const DiscreteConditional::Values & values) {
|
GTSAM_PRINT(*bayesTree);
|
||||||
//// return tree.root()->evaluate(values);
|
bayesTree->saveGraph("/tmp/discreteBayesTree.dot");
|
||||||
////}
|
}
|
||||||
//
|
|
||||||
///* ************************************************************************* */
|
// Check frontals and parents
|
||||||
//
|
for (size_t i : {13, 14, 9, 3, 2, 8, 1, 0, 10, 5, 4}) {
|
||||||
//TEST_UNSAFE( DiscreteBayesTree, thinTree ) {
|
auto clique_i = (*bayesTree)[i];
|
||||||
//
|
EXPECT_LONGS_EQUAL(i, *(clique_i->conditional_->beginFrontals()));
|
||||||
// const int nrNodes = 15;
|
}
|
||||||
// const size_t nrStates = 2;
|
|
||||||
//
|
auto R = bayesTree->roots().front();
|
||||||
// // define variables
|
|
||||||
// vector<DiscreteKey> key;
|
// Check whether BN and BT give the same answer on all configurations
|
||||||
// for (int i = 0; i < nrNodes; i++) {
|
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
||||||
// DiscreteKey key_i(i, nrStates);
|
key[0] & key[1] & key[2] & key[3] & key[4] & key[5] & key[6] & key[7] &
|
||||||
// key.push_back(key_i);
|
key[8] & key[9] & key[10] & key[11] & key[12] & key[13] & key[14]);
|
||||||
// }
|
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||||
//
|
DiscreteFactor::Values x = allPosbValues[i];
|
||||||
// // create a thin-tree Bayesnet, a la Jean-Guillaume
|
double expected = bayesNet.evaluate(x);
|
||||||
// DiscreteBayesNet bayesNet;
|
double actual = bayesTree->evaluate(x);
|
||||||
// bayesNet.add(key[14] % "1/3");
|
DOUBLES_EQUAL(expected, actual, 1e-9);
|
||||||
//
|
}
|
||||||
// bayesNet.add(key[13] | key[14] = "1/3 3/1");
|
|
||||||
// bayesNet.add(key[12] | key[14] = "3/1 3/1");
|
// Calculate all some marginals for Values==all1
|
||||||
//
|
Vector marginals = Vector::Zero(15);
|
||||||
// bayesNet.add((key[11] | key[13], key[14]) = "1/4 2/3 3/2 4/1");
|
double joint_12_14 = 0, joint_9_12_14 = 0, joint_8_12_14 = 0, joint_8_12 = 0,
|
||||||
// bayesNet.add((key[10] | key[13], key[14]) = "1/4 3/2 2/3 4/1");
|
joint82 = 0, joint12 = 0, joint24 = 0, joint45 = 0, joint46 = 0,
|
||||||
// bayesNet.add((key[9] | key[12], key[14]) = "4/1 2/3 F 1/4");
|
joint_4_11 = 0, joint_11_13 = 0, joint_11_13_14 = 0,
|
||||||
// bayesNet.add((key[8] | key[12], key[14]) = "T 1/4 3/2 4/1");
|
joint_11_12_13_14 = 0, joint_9_11_12_13 = 0, joint_8_11_12_13 = 0;
|
||||||
//
|
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||||
// bayesNet.add((key[7] | key[11], key[13]) = "1/4 2/3 3/2 4/1");
|
DiscreteFactor::Values x = allPosbValues[i];
|
||||||
// bayesNet.add((key[6] | key[11], key[13]) = "1/4 3/2 2/3 4/1");
|
double px = bayesTree->evaluate(x);
|
||||||
// bayesNet.add((key[5] | key[10], key[13]) = "4/1 2/3 3/2 1/4");
|
for (size_t i = 0; i < 15; i++)
|
||||||
// bayesNet.add((key[4] | key[10], key[13]) = "2/3 1/4 3/2 4/1");
|
if (x[i]) marginals[i] += px;
|
||||||
//
|
if (x[12] && x[14]) {
|
||||||
// bayesNet.add((key[3] | key[9], key[12]) = "1/4 2/3 3/2 4/1");
|
joint_12_14 += px;
|
||||||
// bayesNet.add((key[2] | key[9], key[12]) = "1/4 8/2 2/3 4/1");
|
if (x[9]) joint_9_12_14 += px;
|
||||||
// bayesNet.add((key[1] | key[8], key[12]) = "4/1 2/3 3/2 1/4");
|
if (x[8]) joint_8_12_14 += px;
|
||||||
// bayesNet.add((key[0] | key[8], key[12]) = "2/3 1/4 3/2 4/1");
|
}
|
||||||
//
|
if (x[8] && x[12]) joint_8_12 += px;
|
||||||
//// if (debug) {
|
if (x[2]) {
|
||||||
//// GTSAM_PRINT(bayesNet);
|
if (x[8]) joint82 += px;
|
||||||
//// bayesNet.saveGraph("/tmp/discreteBayesNet.dot");
|
if (x[1]) joint12 += px;
|
||||||
//// }
|
}
|
||||||
//
|
if (x[4]) {
|
||||||
// // create a BayesTree out of a Bayes net
|
if (x[2]) joint24 += px;
|
||||||
// DiscreteBayesTree bayesTree(bayesNet);
|
if (x[5]) joint45 += px;
|
||||||
// if (debug) {
|
if (x[6]) joint46 += px;
|
||||||
// GTSAM_PRINT(bayesTree);
|
if (x[11]) joint_4_11 += px;
|
||||||
// bayesTree.saveGraph("/tmp/discreteBayesTree.dot");
|
}
|
||||||
// }
|
if (x[11] && x[13]) {
|
||||||
//
|
joint_11_13 += px;
|
||||||
// // Check whether BN and BT give the same answer on all configurations
|
if (x[8] && x[12]) joint_8_11_12_13 += px;
|
||||||
// // Also calculate all some marginals
|
if (x[9] && x[12]) joint_9_11_12_13 += px;
|
||||||
// Vector marginals = zero(15);
|
if (x[14]) {
|
||||||
// double joint_12_14 = 0, joint_9_12_14 = 0, joint_8_12_14 = 0, joint_8_12 = 0,
|
joint_11_13_14 += px;
|
||||||
// joint82 = 0, joint12 = 0, joint24 = 0, joint45 = 0, joint46 = 0,
|
if (x[12]) {
|
||||||
// joint_4_11 = 0;
|
joint_11_12_13_14 += px;
|
||||||
// vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
}
|
||||||
// key[0] & key[1] & key[2] & key[3] & key[4] & key[5] & key[6] & key[7]
|
}
|
||||||
// & key[8] & key[9] & key[10] & key[11] & key[12] & key[13] & key[14]);
|
}
|
||||||
// for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
}
|
||||||
// DiscreteFactor::Values x = allPosbValues[i];
|
DiscreteFactor::Values all1 = allPosbValues.back();
|
||||||
// double expected = evaluate(bayesNet, x);
|
|
||||||
// double actual = evaluate(bayesTree, x);
|
// check separator marginal P(S0)
|
||||||
// DOUBLES_EQUAL(expected, actual, 1e-9);
|
auto clique = (*bayesTree)[0];
|
||||||
// // collect marginals
|
DiscreteFactorGraph separatorMarginal0 =
|
||||||
// for (size_t i = 0; i < 15; i++)
|
clique->separatorMarginal(EliminateDiscrete);
|
||||||
// if (x[i])
|
DOUBLES_EQUAL(joint_8_12, separatorMarginal0(all1), 1e-9);
|
||||||
// marginals[i] += actual;
|
|
||||||
// // calculate shortcut 8 and 0
|
// check separator marginal P(S9), should be P(14)
|
||||||
// if (x[12] && x[14])
|
clique = (*bayesTree)[9];
|
||||||
// joint_12_14 += actual;
|
DiscreteFactorGraph separatorMarginal9 =
|
||||||
// if (x[9] && x[12] & x[14])
|
clique->separatorMarginal(EliminateDiscrete);
|
||||||
// joint_9_12_14 += actual;
|
DOUBLES_EQUAL(marginals[14], separatorMarginal9(all1), 1e-9);
|
||||||
// if (x[8] && x[12] & x[14])
|
|
||||||
// joint_8_12_14 += actual;
|
// check separator marginal of root, should be empty
|
||||||
// if (x[8] && x[12])
|
clique = (*bayesTree)[11];
|
||||||
// joint_8_12 += actual;
|
DiscreteFactorGraph separatorMarginal11 =
|
||||||
// if (x[8] && x[2])
|
clique->separatorMarginal(EliminateDiscrete);
|
||||||
// joint82 += actual;
|
LONGS_EQUAL(0, separatorMarginal11.size());
|
||||||
// if (x[1] && x[2])
|
|
||||||
// joint12 += actual;
|
// check shortcut P(S9||R) to root
|
||||||
// if (x[2] && x[4])
|
clique = (*bayesTree)[9];
|
||||||
// joint24 += actual;
|
DiscreteBayesNet shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
// if (x[4] && x[5])
|
LONGS_EQUAL(1, shortcut.size());
|
||||||
// joint45 += actual;
|
DOUBLES_EQUAL(joint_11_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// if (x[4] && x[6])
|
|
||||||
// joint46 += actual;
|
// check shortcut P(S8||R) to root
|
||||||
// if (x[4] && x[11])
|
clique = (*bayesTree)[8];
|
||||||
// joint_4_11 += actual;
|
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
// }
|
DOUBLES_EQUAL(joint_11_12_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// DiscreteFactor::Values all1 = allPosbValues.back();
|
|
||||||
//
|
// check shortcut P(S2||R) to root
|
||||||
// Clique::shared_ptr R = bayesTree.root();
|
clique = (*bayesTree)[2];
|
||||||
//
|
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
// // check separator marginal P(S0)
|
DOUBLES_EQUAL(joint_9_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// Clique::shared_ptr c = bayesTree[0];
|
|
||||||
// DiscreteFactorGraph separatorMarginal0 = c->separatorMarginal(R,
|
// check shortcut P(S0||R) to root
|
||||||
// EliminateDiscrete);
|
clique = (*bayesTree)[0];
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_8_12, separatorMarginal0(all1), 1e-9);
|
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
//
|
DOUBLES_EQUAL(joint_8_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// // check separator marginal P(S9), should be P(14)
|
|
||||||
// c = bayesTree[9];
|
// calculate all shortcuts to root
|
||||||
// DiscreteFactorGraph separatorMarginal9 = c->separatorMarginal(R,
|
DiscreteBayesTree::Nodes cliques = bayesTree->nodes();
|
||||||
// EliminateDiscrete);
|
for (auto clique : cliques) {
|
||||||
// EXPECT_DOUBLES_EQUAL(marginals[14], separatorMarginal9(all1), 1e-9);
|
DiscreteBayesNet shortcut = clique.second->shortcut(R, EliminateDiscrete);
|
||||||
//
|
if (debug) {
|
||||||
// // check separator marginal of root, should be empty
|
clique.second->conditional_->printSignature();
|
||||||
// c = bayesTree[11];
|
shortcut.print("shortcut:");
|
||||||
// DiscreteFactorGraph separatorMarginal11 = c->separatorMarginal(R,
|
}
|
||||||
// EliminateDiscrete);
|
}
|
||||||
// EXPECT_LONGS_EQUAL(0, separatorMarginal11.size());
|
|
||||||
//
|
// Check all marginals
|
||||||
// // check shortcut P(S9||R) to root
|
DiscreteFactor::shared_ptr marginalFactor;
|
||||||
// c = bayesTree[9];
|
for (size_t i = 0; i < 15; i++) {
|
||||||
// DiscreteBayesNet shortcut = c->shortcut(R, EliminateDiscrete);
|
marginalFactor = bayesTree->marginalFactor(i, EliminateDiscrete);
|
||||||
// EXPECT_LONGS_EQUAL(0, shortcut.size());
|
double actual = (*marginalFactor)(all1);
|
||||||
//
|
DOUBLES_EQUAL(marginals[i], actual, 1e-9);
|
||||||
// // check shortcut P(S8||R) to root
|
}
|
||||||
// c = bayesTree[8];
|
|
||||||
// shortcut = c->shortcut(R, EliminateDiscrete);
|
DiscreteBayesNet::shared_ptr actualJoint;
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_12_14/marginals[14], evaluate(shortcut,all1),
|
|
||||||
// 1e-9);
|
// Check joint P(8, 2)
|
||||||
//
|
actualJoint = bayesTree->jointBayesNet(8, 2, EliminateDiscrete);
|
||||||
// // check shortcut P(S2||R) to root
|
DOUBLES_EQUAL(joint82, actualJoint->evaluate(all1), 1e-9);
|
||||||
// c = bayesTree[2];
|
|
||||||
// shortcut = c->shortcut(R, EliminateDiscrete);
|
// Check joint P(1, 2)
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_9_12_14/marginals[14], evaluate(shortcut,all1),
|
actualJoint = bayesTree->jointBayesNet(1, 2, EliminateDiscrete);
|
||||||
// 1e-9);
|
DOUBLES_EQUAL(joint12, actualJoint->evaluate(all1), 1e-9);
|
||||||
//
|
|
||||||
// // check shortcut P(S0||R) to root
|
// Check joint P(2, 4)
|
||||||
// c = bayesTree[0];
|
actualJoint = bayesTree->jointBayesNet(2, 4, EliminateDiscrete);
|
||||||
// shortcut = c->shortcut(R, EliminateDiscrete);
|
DOUBLES_EQUAL(joint24, actualJoint->evaluate(all1), 1e-9);
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_8_12_14/marginals[14], evaluate(shortcut,all1),
|
|
||||||
// 1e-9);
|
// Check joint P(4, 5)
|
||||||
//
|
actualJoint = bayesTree->jointBayesNet(4, 5, EliminateDiscrete);
|
||||||
// // calculate all shortcuts to root
|
DOUBLES_EQUAL(joint45, actualJoint->evaluate(all1), 1e-9);
|
||||||
// DiscreteBayesTree::Nodes cliques = bayesTree.nodes();
|
|
||||||
// for(Clique::shared_ptr c: cliques) {
|
// Check joint P(4, 6)
|
||||||
// DiscreteBayesNet shortcut = c->shortcut(R, EliminateDiscrete);
|
actualJoint = bayesTree->jointBayesNet(4, 6, EliminateDiscrete);
|
||||||
// if (debug) {
|
DOUBLES_EQUAL(joint46, actualJoint->evaluate(all1), 1e-9);
|
||||||
// c->printSignature();
|
|
||||||
// shortcut.print("shortcut:");
|
// Check joint P(4, 11)
|
||||||
// }
|
actualJoint = bayesTree->jointBayesNet(4, 11, EliminateDiscrete);
|
||||||
// }
|
DOUBLES_EQUAL(joint_4_11, actualJoint->evaluate(all1), 1e-9);
|
||||||
//
|
}
|
||||||
// // Check all marginals
|
|
||||||
// DiscreteFactor::shared_ptr marginalFactor;
|
|
||||||
// for (size_t i = 0; i < 15; i++) {
|
|
||||||
// marginalFactor = bayesTree.marginalFactor(i, EliminateDiscrete);
|
|
||||||
// double actual = (*marginalFactor)(all1);
|
|
||||||
// EXPECT_DOUBLES_EQUAL(marginals[i], actual, 1e-9);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// DiscreteBayesNet::shared_ptr actualJoint;
|
|
||||||
//
|
|
||||||
// // Check joint P(8,2) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(8, 2, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint82, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(1,2) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(1, 2, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint12, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(2,4)
|
|
||||||
// actualJoint = bayesTree.jointBayesNet(2, 4, EliminateDiscrete);
|
|
||||||
// EXPECT_DOUBLES_EQUAL(joint24, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(4,5) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(4, 5, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint46, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(4,6) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(4, 6, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint46, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(4,11)
|
|
||||||
// actualJoint = bayesTree.jointBayesNet(4, 11, EliminateDiscrete);
|
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_4_11, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
//}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() {
|
int main() {
|
||||||
|
@ -263,4 +230,3 @@ int main() {
|
||||||
return TestRegistry::runAllTests(tr);
|
return TestRegistry::runAllTests(tr);
|
||||||
}
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -16,9 +16,9 @@
|
||||||
* @date Feb 14, 2011
|
* @date Feb 14, 2011
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <boost/make_shared.hpp>
|
|
||||||
#include <boost/assign/std/map.hpp>
|
#include <boost/assign/std/map.hpp>
|
||||||
#include <boost/assign/std/vector.hpp>
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
#include <boost/make_shared.hpp>
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
@ -36,6 +36,11 @@ TEST( DiscreteConditional, constructors)
|
||||||
DiscreteConditional::shared_ptr expected1 = //
|
DiscreteConditional::shared_ptr expected1 = //
|
||||||
boost::make_shared<DiscreteConditional>(X | Y = "1/1 2/3 1/4");
|
boost::make_shared<DiscreteConditional>(X | Y = "1/1 2/3 1/4");
|
||||||
EXPECT(expected1);
|
EXPECT(expected1);
|
||||||
|
EXPECT_LONGS_EQUAL(0, *(expected1->beginFrontals()));
|
||||||
|
EXPECT_LONGS_EQUAL(2, *(expected1->beginParents()));
|
||||||
|
EXPECT(expected1->endParents() == expected1->end());
|
||||||
|
EXPECT(expected1->endFrontals() == expected1->beginParents());
|
||||||
|
|
||||||
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
||||||
DiscreteConditional actual1(1, f1);
|
DiscreteConditional actual1(1, f1);
|
||||||
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
||||||
|
@ -43,71 +48,68 @@ TEST( DiscreteConditional, constructors)
|
||||||
DecisionTreeFactor f2(X & Y & Z,
|
DecisionTreeFactor f2(X & Y & Z,
|
||||||
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
||||||
DiscreteConditional actual2(1, f2);
|
DiscreteConditional actual2(1, f2);
|
||||||
DecisionTreeFactor::shared_ptr actual2factor = actual2.toFactor();
|
EXPECT(assert_equal(f2 / *f2.sum(1), *actual2.toFactor(), 1e-9));
|
||||||
// EXPECT(assert_equal(f2, *actual2factor, 1e-9));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, constructors_alt_interface)
|
TEST(DiscreteConditional, constructors_alt_interface) {
|
||||||
{
|
DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering !
|
||||||
DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering !
|
|
||||||
|
|
||||||
Signature::Table table;
|
Signature::Table table;
|
||||||
Signature::Row r1, r2, r3;
|
Signature::Row r1, r2, r3;
|
||||||
r1 += 1.0, 1.0; r2 += 2.0, 3.0; r3 += 1.0, 4.0;
|
r1 += 1.0, 1.0;
|
||||||
|
r2 += 2.0, 3.0;
|
||||||
|
r3 += 1.0, 4.0;
|
||||||
table += r1, r2, r3;
|
table += r1, r2, r3;
|
||||||
DiscreteConditional::shared_ptr expected1 = //
|
auto actual1 = boost::make_shared<DiscreteConditional>(X | Y = table);
|
||||||
boost::make_shared<DiscreteConditional>(X | Y = table);
|
EXPECT(actual1);
|
||||||
EXPECT(expected1);
|
|
||||||
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
||||||
DiscreteConditional actual1(1, f1);
|
DiscreteConditional expected1(1, f1);
|
||||||
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
EXPECT(assert_equal(expected1, *actual1, 1e-9));
|
||||||
|
|
||||||
DecisionTreeFactor f2(X & Y & Z,
|
DecisionTreeFactor f2(
|
||||||
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
X & Y & Z, "0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
||||||
DiscreteConditional actual2(1, f2);
|
DiscreteConditional actual2(1, f2);
|
||||||
DecisionTreeFactor::shared_ptr actual2factor = actual2.toFactor();
|
EXPECT(assert_equal(f2 / *f2.sum(1), *actual2.toFactor(), 1e-9));
|
||||||
// EXPECT(assert_equal(f2, *actual2factor, 1e-9));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, constructors2)
|
TEST(DiscreteConditional, constructors2) {
|
||||||
{
|
|
||||||
// Declare keys and ordering
|
// Declare keys and ordering
|
||||||
DiscreteKey C(0,2), B(1,2);
|
DiscreteKey C(0, 2), B(1, 2);
|
||||||
DecisionTreeFactor expected(C & B, "0.8 0.75 0.2 0.25");
|
DecisionTreeFactor actual(C & B, "0.8 0.75 0.2 0.25");
|
||||||
Signature signature((C | B) = "4/1 3/1");
|
Signature signature((C | B) = "4/1 3/1");
|
||||||
DiscreteConditional actual(signature);
|
DiscreteConditional expected(signature);
|
||||||
DecisionTreeFactor::shared_ptr actualFactor = actual.toFactor();
|
DecisionTreeFactor::shared_ptr expectedFactor = expected.toFactor();
|
||||||
EXPECT(assert_equal(expected, *actualFactor));
|
EXPECT(assert_equal(*expectedFactor, actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, constructors3)
|
TEST(DiscreteConditional, constructors3) {
|
||||||
{
|
|
||||||
// Declare keys and ordering
|
// Declare keys and ordering
|
||||||
DiscreteKey C(0,2), B(1,2), A(2,2);
|
DiscreteKey C(0, 2), B(1, 2), A(2, 2);
|
||||||
DecisionTreeFactor expected(C & B & A, "0.8 0.5 0.5 0.2 0.2 0.5 0.5 0.8");
|
DecisionTreeFactor actual(C & B & A, "0.8 0.5 0.5 0.2 0.2 0.5 0.5 0.8");
|
||||||
Signature signature((C | B, A) = "4/1 1/1 1/1 1/4");
|
Signature signature((C | B, A) = "4/1 1/1 1/1 1/4");
|
||||||
DiscreteConditional actual(signature);
|
DiscreteConditional expected(signature);
|
||||||
DecisionTreeFactor::shared_ptr actualFactor = actual.toFactor();
|
DecisionTreeFactor::shared_ptr expectedFactor = expected.toFactor();
|
||||||
EXPECT(assert_equal(expected, *actualFactor));
|
EXPECT(assert_equal(*expectedFactor, actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, Combine) {
|
TEST(DiscreteConditional, Combine) {
|
||||||
DiscreteKey A(0, 2), B(1, 2);
|
DiscreteKey A(0, 2), B(1, 2);
|
||||||
vector<DiscreteConditional::shared_ptr> c;
|
vector<DiscreteConditional::shared_ptr> c;
|
||||||
c.push_back(boost::make_shared<DiscreteConditional>(A | B = "1/2 2/1"));
|
c.push_back(boost::make_shared<DiscreteConditional>(A | B = "1/2 2/1"));
|
||||||
c.push_back(boost::make_shared<DiscreteConditional>(B % "1/2"));
|
c.push_back(boost::make_shared<DiscreteConditional>(B % "1/2"));
|
||||||
DecisionTreeFactor factor(A & B, "0.111111 0.444444 0.222222 0.222222");
|
DecisionTreeFactor factor(A & B, "0.111111 0.444444 0.222222 0.222222");
|
||||||
DiscreteConditional expected(2, factor);
|
DiscreteConditional actual(2, factor);
|
||||||
DiscreteConditional::shared_ptr actual = DiscreteConditional::Combine(
|
auto expected = DiscreteConditional::Combine(c.begin(), c.end());
|
||||||
c.begin(), c.end());
|
EXPECT(assert_equal(*expected, actual, 1e-5));
|
||||||
EXPECT(assert_equal(expected, *actual,1e-5));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
int main() {
|
||||||
|
TestResult tr;
|
||||||
|
return TestRegistry::runAllTests(tr);
|
||||||
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteEliminationTree.h>
|
#include <gtsam/discrete/DiscreteEliminationTree.h>
|
||||||
#include <gtsam/discrete/DiscreteBayesTree.h>
|
#include <gtsam/discrete/DiscreteBayesTree.h>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
|
|
|
@ -146,8 +146,7 @@ TEST_UNSAFE( DiscreteMarginals, truss ) {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// Second truss example with non-trivial factors
|
// Second truss example with non-trivial factors
|
||||||
TEST_UNSAFE( DiscreteMarginals, truss2 ) {
|
TEST_UNSAFE(DiscreteMarginals, truss2) {
|
||||||
|
|
||||||
const int nrNodes = 5;
|
const int nrNodes = 5;
|
||||||
const size_t nrStates = 2;
|
const size_t nrStates = 2;
|
||||||
|
|
||||||
|
@ -160,40 +159,39 @@ TEST_UNSAFE( DiscreteMarginals, truss2 ) {
|
||||||
|
|
||||||
// create graph and add three truss potentials
|
// create graph and add three truss potentials
|
||||||
DiscreteFactorGraph graph;
|
DiscreteFactorGraph graph;
|
||||||
graph.add(key[0] & key[2] & key[4],"1 2 3 4 5 6 7 8");
|
graph.add(key[0] & key[2] & key[4], "1 2 3 4 5 6 7 8");
|
||||||
graph.add(key[1] & key[3] & key[4],"1 2 3 4 5 6 7 8");
|
graph.add(key[1] & key[3] & key[4], "1 2 3 4 5 6 7 8");
|
||||||
graph.add(key[2] & key[3] & key[4],"1 2 3 4 5 6 7 8");
|
graph.add(key[2] & key[3] & key[4], "1 2 3 4 5 6 7 8");
|
||||||
|
|
||||||
// Calculate the marginals by brute force
|
// Calculate the marginals by brute force
|
||||||
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
vector<DiscreteFactor::Values> allPosbValues =
|
||||||
key[0] & key[1] & key[2] & key[3] & key[4]);
|
cartesianProduct(key[0] & key[1] & key[2] & key[3] & key[4]);
|
||||||
Vector T = Z_5x1, F = Z_5x1;
|
Vector T = Z_5x1, F = Z_5x1;
|
||||||
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||||
DiscreteFactor::Values x = allPosbValues[i];
|
DiscreteFactor::Values x = allPosbValues[i];
|
||||||
double px = graph(x);
|
double px = graph(x);
|
||||||
for (size_t j=0;j<5;j++)
|
for (size_t j = 0; j < 5; j++)
|
||||||
if (x[j]) T[j]+=px; else F[j]+=px;
|
if (x[j])
|
||||||
// cout << x[0] << " " << x[1] << " "<< x[2] << " " << x[3] << " " << x[4] << " :\t" << px << endl;
|
T[j] += px;
|
||||||
|
else
|
||||||
|
F[j] += px;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check all marginals given by a sequential solver and Marginals
|
// Check all marginals given by a sequential solver and Marginals
|
||||||
// DiscreteSequentialSolver solver(graph);
|
// DiscreteSequentialSolver solver(graph);
|
||||||
DiscreteMarginals marginals(graph);
|
DiscreteMarginals marginals(graph);
|
||||||
for (size_t j=0;j<5;j++) {
|
for (size_t j = 0; j < 5; j++) {
|
||||||
double sum = T[j]+F[j];
|
double sum = T[j] + F[j];
|
||||||
T[j]/=sum;
|
T[j] /= sum;
|
||||||
F[j]/=sum;
|
F[j] /= sum;
|
||||||
|
|
||||||
// // solver
|
|
||||||
// Vector actualV = solver.marginalProbabilities(key[j]);
|
|
||||||
// EXPECT(assert_equal((Vector(2) << F[j], T[j]), actualV));
|
|
||||||
|
|
||||||
// Marginals
|
// Marginals
|
||||||
vector<double> table;
|
vector<double> table;
|
||||||
table += F[j],T[j];
|
table += F[j], T[j];
|
||||||
DecisionTreeFactor expectedM(key[j],table);
|
DecisionTreeFactor expectedM(key[j], table);
|
||||||
DiscreteFactor::shared_ptr actualM = marginals(j);
|
DiscreteFactor::shared_ptr actualM = marginals(j);
|
||||||
EXPECT(assert_equal(expectedM, *boost::dynamic_pointer_cast<DecisionTreeFactor>(actualM)));
|
EXPECT(assert_equal(
|
||||||
|
expectedM, *boost::dynamic_pointer_cast<DecisionTreeFactor>(actualM)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,36 +11,43 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file testSignature
|
* @file testSignature
|
||||||
* @brief Tests focusing on the details of Signatures to evaluate boost compliance
|
* @brief Tests focusing on the details of Signatures to evaluate boost
|
||||||
|
* compliance
|
||||||
* @author Alex Cunningham
|
* @author Alex Cunningham
|
||||||
* @date Sept 19th 2011
|
* @date Sept 19th 2011
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <boost/assign/std/vector.hpp>
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/base/Testable.h>
|
||||||
#include <gtsam/discrete/Signature.h>
|
#include <gtsam/discrete/Signature.h>
|
||||||
|
|
||||||
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
DiscreteKey X(0,2), Y(1,3), Z(2,2);
|
DiscreteKey X(0, 2), Y(1, 3), Z(2, 2);
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(testSignature, simple_conditional) {
|
TEST(testSignature, simple_conditional) {
|
||||||
Signature sig(X | Y = "1/1 2/3 1/4");
|
Signature sig(X | Y = "1/1 2/3 1/4");
|
||||||
|
Signature::Table table = *sig.table();
|
||||||
|
vector<double> row[3]{{0.5, 0.5}, {0.4, 0.6}, {0.2, 0.8}};
|
||||||
|
CHECK(row[0] == table[0]);
|
||||||
|
CHECK(row[1] == table[1]);
|
||||||
|
CHECK(row[2] == table[2]);
|
||||||
DiscreteKey actKey = sig.key();
|
DiscreteKey actKey = sig.key();
|
||||||
LONGS_EQUAL((long)X.first, (long)actKey.first);
|
LONGS_EQUAL(X.first, actKey.first);
|
||||||
|
|
||||||
DiscreteKeys actKeys = sig.discreteKeysParentsFirst();
|
DiscreteKeys actKeys = sig.discreteKeys();
|
||||||
LONGS_EQUAL(2, (long)actKeys.size());
|
LONGS_EQUAL(2, actKeys.size());
|
||||||
LONGS_EQUAL((long)Y.first, (long)actKeys.front().first);
|
LONGS_EQUAL(X.first, actKeys.front().first);
|
||||||
LONGS_EQUAL((long)X.first, (long)actKeys.back().first);
|
LONGS_EQUAL(Y.first, actKeys.back().first);
|
||||||
|
|
||||||
vector<double> actCpt = sig.cpt();
|
vector<double> actCpt = sig.cpt();
|
||||||
EXPECT_LONGS_EQUAL(6, (long)actCpt.size());
|
EXPECT_LONGS_EQUAL(6, actCpt.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -54,17 +61,20 @@ TEST(testSignature, simple_conditional_nonparser) {
|
||||||
|
|
||||||
Signature sig(X | Y = table);
|
Signature sig(X | Y = table);
|
||||||
DiscreteKey actKey = sig.key();
|
DiscreteKey actKey = sig.key();
|
||||||
EXPECT_LONGS_EQUAL((long)X.first, (long)actKey.first);
|
EXPECT_LONGS_EQUAL(X.first, actKey.first);
|
||||||
|
|
||||||
DiscreteKeys actKeys = sig.discreteKeysParentsFirst();
|
DiscreteKeys actKeys = sig.discreteKeys();
|
||||||
LONGS_EQUAL(2, (long)actKeys.size());
|
LONGS_EQUAL(2, actKeys.size());
|
||||||
LONGS_EQUAL((long)Y.first, (long)actKeys.front().first);
|
LONGS_EQUAL(X.first, actKeys.front().first);
|
||||||
LONGS_EQUAL((long)X.first, (long)actKeys.back().first);
|
LONGS_EQUAL(Y.first, actKeys.back().first);
|
||||||
|
|
||||||
vector<double> actCpt = sig.cpt();
|
vector<double> actCpt = sig.cpt();
|
||||||
EXPECT_LONGS_EQUAL(6, (long)actCpt.size());
|
EXPECT_LONGS_EQUAL(6, actCpt.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
int main() {
|
||||||
|
TestResult tr;
|
||||||
|
return TestRegistry::runAllTests(tr);
|
||||||
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -19,8 +19,10 @@
|
||||||
#include <gtsam/geometry/concepts.h>
|
#include <gtsam/geometry/concepts.h>
|
||||||
#include <gtsam/base/concepts.h>
|
#include <gtsam/base/concepts.h>
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <iostream>
|
||||||
|
#include <limits>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -36,10 +38,10 @@ Pose3::Pose3(const Pose2& pose2) :
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::Create(const Rot3& R, const Point3& t, OptionalJacobian<6, 3> H1,
|
Pose3 Pose3::Create(const Rot3& R, const Point3& t, OptionalJacobian<6, 3> HR,
|
||||||
OptionalJacobian<6, 3> H2) {
|
OptionalJacobian<6, 3> Ht) {
|
||||||
if (H1) *H1 << I_3x3, Z_3x3;
|
if (HR) *HR << I_3x3, Z_3x3;
|
||||||
if (H2) *H2 << Z_3x3, R.transpose();
|
if (Ht) *Ht << Z_3x3, R.transpose();
|
||||||
return Pose3(R, t);
|
return Pose3(R, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,15 +74,15 @@ Matrix6 Pose3::adjointMap(const Vector6& xi) {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
||||||
OptionalJacobian<6,6> H) {
|
OptionalJacobian<6, 6> Hxi) {
|
||||||
if (H) {
|
if (Hxi) {
|
||||||
H->setZero();
|
Hxi->setZero();
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
Vector6 dxi;
|
Vector6 dxi;
|
||||||
dxi.setZero();
|
dxi.setZero();
|
||||||
dxi(i) = 1.0;
|
dxi(i) = 1.0;
|
||||||
Matrix6 Gi = adjointMap(dxi);
|
Matrix6 Gi = adjointMap(dxi);
|
||||||
H->col(i) = Gi * y;
|
Hxi->col(i) = Gi * y;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return adjointMap(xi) * y;
|
return adjointMap(xi) * y;
|
||||||
|
@ -88,15 +90,15 @@ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||||
OptionalJacobian<6,6> H) {
|
OptionalJacobian<6, 6> Hxi) {
|
||||||
if (H) {
|
if (Hxi) {
|
||||||
H->setZero();
|
Hxi->setZero();
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
Vector6 dxi;
|
Vector6 dxi;
|
||||||
dxi.setZero();
|
dxi.setZero();
|
||||||
dxi(i) = 1.0;
|
dxi(i) = 1.0;
|
||||||
Matrix6 GTi = adjointMap(dxi).transpose();
|
Matrix6 GTi = adjointMap(dxi).transpose();
|
||||||
H->col(i) = GTi * y;
|
Hxi->col(i) = GTi * y;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return adjointMap(xi).transpose() * y;
|
return adjointMap(xi).transpose() * y;
|
||||||
|
@ -116,8 +118,8 @@ bool Pose3::equals(const Pose3& pose, double tol) const {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/** Modified from Murray94book version (which assumes w and v normalized?) */
|
/** Modified from Murray94book version (which assumes w and v normalized?) */
|
||||||
Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) {
|
Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi) {
|
||||||
if (H) *H = ExpmapDerivative(xi);
|
if (Hxi) *Hxi = ExpmapDerivative(xi);
|
||||||
|
|
||||||
// get angular velocity omega and translational velocity v from twist xi
|
// get angular velocity omega and translational velocity v from twist xi
|
||||||
Vector3 omega(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5));
|
Vector3 omega(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5));
|
||||||
|
@ -125,8 +127,8 @@ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) {
|
||||||
Rot3 R = Rot3::Expmap(omega);
|
Rot3 R = Rot3::Expmap(omega);
|
||||||
double theta2 = omega.dot(omega);
|
double theta2 = omega.dot(omega);
|
||||||
if (theta2 > std::numeric_limits<double>::epsilon()) {
|
if (theta2 > std::numeric_limits<double>::epsilon()) {
|
||||||
Vector3 t_parallel = omega * omega.dot(v); // translation parallel to axis
|
Vector3 t_parallel = omega * omega.dot(v); // translation parallel to axis
|
||||||
Vector3 omega_cross_v = omega.cross(v); // points towards axis
|
Vector3 omega_cross_v = omega.cross(v); // points towards axis
|
||||||
Vector3 t = (omega_cross_v - R * omega_cross_v + t_parallel) / theta2;
|
Vector3 t = (omega_cross_v - R * omega_cross_v + t_parallel) / theta2;
|
||||||
return Pose3(R, t);
|
return Pose3(R, t);
|
||||||
} else {
|
} else {
|
||||||
|
@ -135,10 +137,10 @@ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) {
|
Vector6 Pose3::Logmap(const Pose3& pose, OptionalJacobian<6, 6> Hpose) {
|
||||||
if (H) *H = LogmapDerivative(p);
|
if (Hpose) *Hpose = LogmapDerivative(pose);
|
||||||
const Vector3 w = Rot3::Logmap(p.rotation());
|
const Vector3 w = Rot3::Logmap(pose.rotation());
|
||||||
const Vector3 T = p.translation();
|
const Vector3 T = pose.translation();
|
||||||
const double t = w.norm();
|
const double t = w.norm();
|
||||||
if (t < 1e-10) {
|
if (t < 1e-10) {
|
||||||
Vector6 log;
|
Vector6 log;
|
||||||
|
@ -158,33 +160,33 @@ Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::ChartAtOrigin::Retract(const Vector6& xi, ChartJacobian H) {
|
Pose3 Pose3::ChartAtOrigin::Retract(const Vector6& xi, ChartJacobian Hxi) {
|
||||||
#ifdef GTSAM_POSE3_EXPMAP
|
#ifdef GTSAM_POSE3_EXPMAP
|
||||||
return Expmap(xi, H);
|
return Expmap(xi, Hxi);
|
||||||
#else
|
#else
|
||||||
Matrix3 DR;
|
Matrix3 DR;
|
||||||
Rot3 R = Rot3::Retract(xi.head<3>(), H ? &DR : 0);
|
Rot3 R = Rot3::Retract(xi.head<3>(), Hxi ? &DR : 0);
|
||||||
if (H) {
|
if (Hxi) {
|
||||||
*H = I_6x6;
|
*Hxi = I_6x6;
|
||||||
H->topLeftCorner<3,3>() = DR;
|
Hxi->topLeftCorner<3, 3>() = DR;
|
||||||
}
|
}
|
||||||
return Pose3(R, Point3(xi.tail<3>()));
|
return Pose3(R, Point3(xi.tail<3>()));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::ChartAtOrigin::Local(const Pose3& T, ChartJacobian H) {
|
Vector6 Pose3::ChartAtOrigin::Local(const Pose3& pose, ChartJacobian Hpose) {
|
||||||
#ifdef GTSAM_POSE3_EXPMAP
|
#ifdef GTSAM_POSE3_EXPMAP
|
||||||
return Logmap(T, H);
|
return Logmap(pose, Hpose);
|
||||||
#else
|
#else
|
||||||
Matrix3 DR;
|
Matrix3 DR;
|
||||||
Vector3 omega = Rot3::LocalCoordinates(T.rotation(), H ? &DR : 0);
|
Vector3 omega = Rot3::LocalCoordinates(pose.rotation(), Hpose ? &DR : 0);
|
||||||
if (H) {
|
if (Hpose) {
|
||||||
*H = I_6x6;
|
*Hpose = I_6x6;
|
||||||
H->topLeftCorner<3,3>() = DR;
|
Hpose->topLeftCorner<3, 3>() = DR;
|
||||||
}
|
}
|
||||||
Vector6 xi;
|
Vector6 xi;
|
||||||
xi << omega, T.translation();
|
xi << omega, pose.translation();
|
||||||
return xi;
|
return xi;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -261,16 +263,16 @@ Matrix6 Pose3::LogmapDerivative(const Pose3& pose) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
const Point3& Pose3::translation(OptionalJacobian<3, 6> H) const {
|
const Point3& Pose3::translation(OptionalJacobian<3, 6> Hself) const {
|
||||||
if (H) *H << Z_3x3, rotation().matrix();
|
if (Hself) *Hself << Z_3x3, rotation().matrix();
|
||||||
return t_;
|
return t_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
const Rot3& Pose3::rotation(OptionalJacobian<3, 6> H) const {
|
const Rot3& Pose3::rotation(OptionalJacobian<3, 6> Hself) const {
|
||||||
if (H) {
|
if (Hself) {
|
||||||
*H << I_3x3, Z_3x3;
|
*Hself << I_3x3, Z_3x3;
|
||||||
}
|
}
|
||||||
return R_;
|
return R_;
|
||||||
}
|
}
|
||||||
|
@ -284,9 +286,10 @@ Matrix4 Pose3::matrix() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::transformPoseFrom(const Pose3& aTb) const {
|
Pose3 Pose3::transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself,
|
||||||
|
OptionalJacobian<6, 6> HaTb) const {
|
||||||
const Pose3& wTa = *this;
|
const Pose3& wTa = *this;
|
||||||
return wTa * aTb;
|
return wTa.compose(aTb, Hself, HaTb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -299,101 +302,101 @@ Pose3 Pose3::transform_to(const Pose3& pose) const {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> H1,
|
Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself,
|
||||||
OptionalJacobian<6, 6> H2) const {
|
OptionalJacobian<6, 6> HwTb) const {
|
||||||
if (H1) *H1 = -wTb.inverse().AdjointMap() * AdjointMap();
|
if (Hself) *Hself = -wTb.inverse().AdjointMap() * AdjointMap();
|
||||||
if (H2) *H2 = I_6x6;
|
if (HwTb) *HwTb = I_6x6;
|
||||||
const Pose3& wTa = *this;
|
const Pose3& wTa = *this;
|
||||||
return wTa.inverse() * wTb;
|
return wTa.inverse() * wTb;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Point3 Pose3::transformFrom(const Point3& p, OptionalJacobian<3,6> Dpose,
|
Point3 Pose3::transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself,
|
||||||
OptionalJacobian<3,3> Dpoint) const {
|
OptionalJacobian<3, 3> Hpoint) const {
|
||||||
// Only get matrix once, to avoid multiple allocations,
|
// Only get matrix once, to avoid multiple allocations,
|
||||||
// as well as multiple conversions in the Quaternion case
|
// as well as multiple conversions in the Quaternion case
|
||||||
const Matrix3 R = R_.matrix();
|
const Matrix3 R = R_.matrix();
|
||||||
if (Dpose) {
|
if (Hself) {
|
||||||
Dpose->leftCols<3>() = R * skewSymmetric(-p.x(), -p.y(), -p.z());
|
Hself->leftCols<3>() = R * skewSymmetric(-point.x(), -point.y(), -point.z());
|
||||||
Dpose->rightCols<3>() = R;
|
Hself->rightCols<3>() = R;
|
||||||
}
|
}
|
||||||
if (Dpoint) {
|
if (Hpoint) {
|
||||||
*Dpoint = R;
|
*Hpoint = R;
|
||||||
}
|
}
|
||||||
return R_ * p + t_;
|
return R_ * point + t_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Point3 Pose3::transformTo(const Point3& p, OptionalJacobian<3,6> Dpose,
|
Point3 Pose3::transformTo(const Point3& point, OptionalJacobian<3, 6> Hself,
|
||||||
OptionalJacobian<3,3> Dpoint) const {
|
OptionalJacobian<3, 3> Hpoint) const {
|
||||||
// Only get transpose once, to avoid multiple allocations,
|
// Only get transpose once, to avoid multiple allocations,
|
||||||
// as well as multiple conversions in the Quaternion case
|
// as well as multiple conversions in the Quaternion case
|
||||||
const Matrix3 Rt = R_.transpose();
|
const Matrix3 Rt = R_.transpose();
|
||||||
const Point3 q(Rt*(p - t_));
|
const Point3 q(Rt*(point - t_));
|
||||||
if (Dpose) {
|
if (Hself) {
|
||||||
const double wx = q.x(), wy = q.y(), wz = q.z();
|
const double wx = q.x(), wy = q.y(), wz = q.z();
|
||||||
(*Dpose) <<
|
(*Hself) <<
|
||||||
0.0, -wz, +wy,-1.0, 0.0, 0.0,
|
0.0, -wz, +wy,-1.0, 0.0, 0.0,
|
||||||
+wz, 0.0, -wx, 0.0,-1.0, 0.0,
|
+wz, 0.0, -wx, 0.0,-1.0, 0.0,
|
||||||
-wy, +wx, 0.0, 0.0, 0.0,-1.0;
|
-wy, +wx, 0.0, 0.0, 0.0,-1.0;
|
||||||
}
|
}
|
||||||
if (Dpoint) {
|
if (Hpoint) {
|
||||||
*Dpoint = Rt;
|
*Hpoint = Rt;
|
||||||
}
|
}
|
||||||
return q;
|
return q;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> H1,
|
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> Hself,
|
||||||
OptionalJacobian<1, 3> H2) const {
|
OptionalJacobian<1, 3> Hpoint) const {
|
||||||
Matrix36 D_local_pose;
|
Matrix36 D_local_pose;
|
||||||
Matrix3 D_local_point;
|
Matrix3 D_local_point;
|
||||||
Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0);
|
Point3 local = transformTo(point, Hself ? &D_local_pose : 0, Hpoint ? &D_local_point : 0);
|
||||||
if (!H1 && !H2) {
|
if (!Hself && !Hpoint) {
|
||||||
return local.norm();
|
return local.norm();
|
||||||
} else {
|
} else {
|
||||||
Matrix13 D_r_local;
|
Matrix13 D_r_local;
|
||||||
const double r = norm3(local, D_r_local);
|
const double r = norm3(local, D_r_local);
|
||||||
if (H1) *H1 = D_r_local * D_local_pose;
|
if (Hself) *Hself = D_r_local * D_local_pose;
|
||||||
if (H2) *H2 = D_r_local * D_local_point;
|
if (Hpoint) *Hpoint = D_r_local * D_local_point;
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> H1,
|
double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> Hself,
|
||||||
OptionalJacobian<1, 6> H2) const {
|
OptionalJacobian<1, 6> Hpose) const {
|
||||||
Matrix13 D_local_point;
|
Matrix13 D_local_point;
|
||||||
double r = range(pose.translation(), H1, H2 ? &D_local_point : 0);
|
double r = range(pose.translation(), Hself, Hpose ? &D_local_point : 0);
|
||||||
if (H2) *H2 << Matrix13::Zero(), D_local_point * pose.rotation().matrix();
|
if (Hpose) *Hpose << Matrix13::Zero(), D_local_point * pose.rotation().matrix();
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Unit3 Pose3::bearing(const Point3& point, OptionalJacobian<2, 6> H1,
|
Unit3 Pose3::bearing(const Point3& point, OptionalJacobian<2, 6> Hself,
|
||||||
OptionalJacobian<2, 3> H2) const {
|
OptionalJacobian<2, 3> Hpoint) const {
|
||||||
Matrix36 D_local_pose;
|
Matrix36 D_local_pose;
|
||||||
Matrix3 D_local_point;
|
Matrix3 D_local_point;
|
||||||
Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0);
|
Point3 local = transformTo(point, Hself ? &D_local_pose : 0, Hpoint ? &D_local_point : 0);
|
||||||
if (!H1 && !H2) {
|
if (!Hself && !Hpoint) {
|
||||||
return Unit3(local);
|
return Unit3(local);
|
||||||
} else {
|
} else {
|
||||||
Matrix23 D_b_local;
|
Matrix23 D_b_local;
|
||||||
Unit3 b = Unit3::FromPoint3(local, D_b_local);
|
Unit3 b = Unit3::FromPoint3(local, D_b_local);
|
||||||
if (H1) *H1 = D_b_local * D_local_pose;
|
if (Hself) *Hself = D_b_local * D_local_pose;
|
||||||
if (H2) *H2 = D_b_local * D_local_point;
|
if (Hpoint) *Hpoint = D_b_local * D_local_point;
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> H1,
|
Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself,
|
||||||
OptionalJacobian<2, 6> H2) const {
|
OptionalJacobian<2, 6> Hpose) const {
|
||||||
if (H2) {
|
if (Hpose) {
|
||||||
H2->setZero();
|
Hpose->setZero();
|
||||||
return bearing(pose.translation(), H1, H2.cols<3>(3));
|
return bearing(pose.translation(), Hself, Hpose.cols<3>(3));
|
||||||
}
|
}
|
||||||
return bearing(pose.translation(), H1, boost::none);
|
return bearing(pose.translation(), Hself, boost::none);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -75,8 +75,8 @@ public:
|
||||||
|
|
||||||
/// Named constructor with derivatives
|
/// Named constructor with derivatives
|
||||||
static Pose3 Create(const Rot3& R, const Point3& t,
|
static Pose3 Create(const Rot3& R, const Point3& t,
|
||||||
OptionalJacobian<6, 3> H1 = boost::none,
|
OptionalJacobian<6, 3> HR = boost::none,
|
||||||
OptionalJacobian<6, 3> H2 = boost::none);
|
OptionalJacobian<6, 3> Ht = boost::none);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create Pose3 by aligning two point pairs
|
* Create Pose3 by aligning two point pairs
|
||||||
|
@ -117,10 +117,10 @@ public:
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// Exponential map at identity - create a rotation from canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$
|
/// Exponential map at identity - create a rotation from canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$
|
||||||
static Pose3 Expmap(const Vector6& xi, OptionalJacobian<6, 6> H = boost::none);
|
static Pose3 Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi = boost::none);
|
||||||
|
|
||||||
/// Log map at identity - return the canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$ of this rotation
|
/// Log map at identity - return the canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$ of this rotation
|
||||||
static Vector6 Logmap(const Pose3& p, OptionalJacobian<6, 6> H = boost::none);
|
static Vector6 Logmap(const Pose3& pose, OptionalJacobian<6, 6> Hpose = boost::none);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate Adjoint map, transforming a twist in the this pose's (i.e, body) frame to the world spatial frame
|
* Calculate Adjoint map, transforming a twist in the this pose's (i.e, body) frame to the world spatial frame
|
||||||
|
@ -157,7 +157,7 @@ public:
|
||||||
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
|
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
|
||||||
*/
|
*/
|
||||||
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y,
|
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y,
|
||||||
OptionalJacobian<6, 6> = boost::none);
|
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||||
|
|
||||||
// temporary fix for wrappers until case issue is resolved
|
// temporary fix for wrappers until case issue is resolved
|
||||||
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
|
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
|
||||||
|
@ -167,7 +167,7 @@ public:
|
||||||
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
||||||
*/
|
*/
|
||||||
static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y,
|
static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||||
OptionalJacobian<6, 6> H = boost::none);
|
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||||
|
|
||||||
/// Derivative of Expmap
|
/// Derivative of Expmap
|
||||||
static Matrix6 ExpmapDerivative(const Vector6& xi);
|
static Matrix6 ExpmapDerivative(const Vector6& xi);
|
||||||
|
@ -177,8 +177,8 @@ public:
|
||||||
|
|
||||||
// Chart at origin, depends on compile-time flag GTSAM_POSE3_EXPMAP
|
// Chart at origin, depends on compile-time flag GTSAM_POSE3_EXPMAP
|
||||||
struct ChartAtOrigin {
|
struct ChartAtOrigin {
|
||||||
static Pose3 Retract(const Vector6& v, ChartJacobian H = boost::none);
|
static Pose3 Retract(const Vector6& xi, ChartJacobian Hxi = boost::none);
|
||||||
static Vector6 Local(const Pose3& r, ChartJacobian H = boost::none);
|
static Vector6 Local(const Pose3& pose, ChartJacobian Hpose = boost::none);
|
||||||
};
|
};
|
||||||
|
|
||||||
using LieGroup<Pose3, 6>::inverse; // version with derivative
|
using LieGroup<Pose3, 6>::inverse; // version with derivative
|
||||||
|
@ -201,38 +201,38 @@ public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief takes point in Pose coordinates and transforms it to world coordinates
|
* @brief takes point in Pose coordinates and transforms it to world coordinates
|
||||||
* @param p point in Pose coordinates
|
* @param point point in Pose coordinates
|
||||||
* @param Dpose optional 3*6 Jacobian wrpt this pose
|
* @param Hself optional 3*6 Jacobian wrpt this pose
|
||||||
* @param Dpoint optional 3*3 Jacobian wrpt point
|
* @param Hpoint optional 3*3 Jacobian wrpt point
|
||||||
* @return point in world coordinates
|
* @return point in world coordinates
|
||||||
*/
|
*/
|
||||||
Point3 transformFrom(const Point3& p, OptionalJacobian<3, 6> Dpose =
|
Point3 transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself =
|
||||||
boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const;
|
boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/** syntactic sugar for transformFrom */
|
/** syntactic sugar for transformFrom */
|
||||||
inline Point3 operator*(const Point3& p) const {
|
inline Point3 operator*(const Point3& point) const {
|
||||||
return transformFrom(p);
|
return transformFrom(point);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief takes point in world coordinates and transforms it to Pose coordinates
|
* @brief takes point in world coordinates and transforms it to Pose coordinates
|
||||||
* @param p point in world coordinates
|
* @param point point in world coordinates
|
||||||
* @param Dpose optional 3*6 Jacobian wrpt this pose
|
* @param Hself optional 3*6 Jacobian wrpt this pose
|
||||||
* @param Dpoint optional 3*3 Jacobian wrpt point
|
* @param Hpoint optional 3*3 Jacobian wrpt point
|
||||||
* @return point in Pose coordinates
|
* @return point in Pose coordinates
|
||||||
*/
|
*/
|
||||||
Point3 transformTo(const Point3& p, OptionalJacobian<3, 6> Dpose =
|
Point3 transformTo(const Point3& point, OptionalJacobian<3, 6> Hself =
|
||||||
boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const;
|
boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// get rotation
|
/// get rotation
|
||||||
const Rot3& rotation(OptionalJacobian<3, 6> H = boost::none) const;
|
const Rot3& rotation(OptionalJacobian<3, 6> Hself = boost::none) const;
|
||||||
|
|
||||||
/// get translation
|
/// get translation
|
||||||
const Point3& translation(OptionalJacobian<3, 6> H = boost::none) const;
|
const Point3& translation(OptionalJacobian<3, 6> Hself = boost::none) const;
|
||||||
|
|
||||||
/// get x
|
/// get x
|
||||||
double x() const {
|
double x() const {
|
||||||
|
@ -252,36 +252,44 @@ public:
|
||||||
/** convert to 4*4 matrix */
|
/** convert to 4*4 matrix */
|
||||||
Matrix4 matrix() const;
|
Matrix4 matrix() const;
|
||||||
|
|
||||||
/** receives a pose in local coordinates and transforms it to world coordinates */
|
/**
|
||||||
Pose3 transformPoseFrom(const Pose3& pose) const;
|
* Assuming self == wTa, takes a pose aTb in local coordinates
|
||||||
|
* and transforms it to world coordinates wTb = wTa * aTb.
|
||||||
|
* This is identical to compose.
|
||||||
|
*/
|
||||||
|
Pose3 transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself = boost::none,
|
||||||
|
OptionalJacobian<6, 6> HaTb = boost::none) const;
|
||||||
|
|
||||||
/** receives a pose in world coordinates and transforms it to local coordinates */
|
/**
|
||||||
Pose3 transformPoseTo(const Pose3& pose, OptionalJacobian<6, 6> H1 = boost::none,
|
* Assuming self == wTa, takes a pose wTb in world coordinates
|
||||||
OptionalJacobian<6, 6> H2 = boost::none) const;
|
* and transforms it to local coordinates aTb = inv(wTa) * wTb
|
||||||
|
*/
|
||||||
|
Pose3 transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself = boost::none,
|
||||||
|
OptionalJacobian<6, 6> HwTb = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate range to a landmark
|
* Calculate range to a landmark
|
||||||
* @param point 3D location of landmark
|
* @param point 3D location of landmark
|
||||||
* @return range (double)
|
* @return range (double)
|
||||||
*/
|
*/
|
||||||
double range(const Point3& point, OptionalJacobian<1, 6> H1 = boost::none,
|
double range(const Point3& point, OptionalJacobian<1, 6> Hself = boost::none,
|
||||||
OptionalJacobian<1, 3> H2 = boost::none) const;
|
OptionalJacobian<1, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate range to another pose
|
* Calculate range to another pose
|
||||||
* @param pose Other SO(3) pose
|
* @param pose Other SO(3) pose
|
||||||
* @return range (double)
|
* @return range (double)
|
||||||
*/
|
*/
|
||||||
double range(const Pose3& pose, OptionalJacobian<1, 6> H1 = boost::none,
|
double range(const Pose3& pose, OptionalJacobian<1, 6> Hself = boost::none,
|
||||||
OptionalJacobian<1, 6> H2 = boost::none) const;
|
OptionalJacobian<1, 6> Hpose = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate bearing to a landmark
|
* Calculate bearing to a landmark
|
||||||
* @param point 3D location of landmark
|
* @param point 3D location of landmark
|
||||||
* @return bearing (Unit3)
|
* @return bearing (Unit3)
|
||||||
*/
|
*/
|
||||||
Unit3 bearing(const Point3& point, OptionalJacobian<2, 6> H1 = boost::none,
|
Unit3 bearing(const Point3& point, OptionalJacobian<2, 6> Hself = boost::none,
|
||||||
OptionalJacobian<2, 3> H2 = boost::none) const;
|
OptionalJacobian<2, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate bearing to another pose
|
* Calculate bearing to another pose
|
||||||
|
@ -289,8 +297,8 @@ public:
|
||||||
* information is ignored.
|
* information is ignored.
|
||||||
* @return bearing (Unit3)
|
* @return bearing (Unit3)
|
||||||
*/
|
*/
|
||||||
Unit3 bearing(const Pose3& pose, OptionalJacobian<2, 6> H1 = boost::none,
|
Unit3 bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself = boost::none,
|
||||||
OptionalJacobian<2, 6> H2 = boost::none) const;
|
OptionalJacobian<2, 6> Hpose = boost::none) const;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Advanced Interface
|
/// @name Advanced Interface
|
||||||
|
@ -321,20 +329,20 @@ public:
|
||||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
/// @name Deprecated
|
/// @name Deprecated
|
||||||
/// @{
|
/// @{
|
||||||
Point3 transform_from(const Point3& p,
|
Point3 transform_from(const Point3& point,
|
||||||
OptionalJacobian<3, 6> Dpose = boost::none,
|
OptionalJacobian<3, 6> Hself = boost::none,
|
||||||
OptionalJacobian<3, 3> Dpoint = boost::none) const {
|
OptionalJacobian<3, 3> Hpoint = boost::none) const {
|
||||||
return transformFrom(p, Dpose, Dpoint);
|
return transformFrom(point, Hself, Hpoint);
|
||||||
}
|
}
|
||||||
Point3 transform_to(const Point3& p,
|
Point3 transform_to(const Point3& point,
|
||||||
OptionalJacobian<3, 6> Dpose = boost::none,
|
OptionalJacobian<3, 6> Hself = boost::none,
|
||||||
OptionalJacobian<3, 3> Dpoint = boost::none) const {
|
OptionalJacobian<3, 3> Hpoint = boost::none) const {
|
||||||
return transformTo(p, Dpose, Dpoint);
|
return transformTo(point, Hself, Hpoint);
|
||||||
}
|
}
|
||||||
Pose3 transform_pose_to(const Pose3& pose,
|
Pose3 transform_pose_to(const Pose3& pose,
|
||||||
OptionalJacobian<6, 6> H1 = boost::none,
|
OptionalJacobian<6, 6> Hself = boost::none,
|
||||||
OptionalJacobian<6, 6> H2 = boost::none) const {
|
OptionalJacobian<6, 6> Hpose = boost::none) const {
|
||||||
return transformPoseTo(pose, H1, H2);
|
return transformPoseTo(pose, Hself, Hpose);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* @deprecated: this function is neither here not there. */
|
* @deprecated: this function is neither here not there. */
|
||||||
|
|
|
@ -340,7 +340,7 @@ void serialize(
|
||||||
const unsigned int file_version
|
const unsigned int file_version
|
||||||
) {
|
) {
|
||||||
Matrix& M = Q.matrix_;
|
Matrix& M = Q.matrix_;
|
||||||
ar& M;
|
ar& BOOST_SERIALIZATION_NVP(M);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -418,6 +418,29 @@ TEST(Pose3, transform_to_rotate) {
|
||||||
EXPECT(assert_equal(expected, actual, 0.001));
|
EXPECT(assert_equal(expected, actual, 0.001));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Check transformPoseFrom and its pushforward
|
||||||
|
Pose3 transformPoseFrom_(const Pose3& wTa, const Pose3& aTb) {
|
||||||
|
return wTa.transformPoseFrom(aTb);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(Pose3, transformPoseFrom)
|
||||||
|
{
|
||||||
|
Matrix actual = (T2*T2).matrix();
|
||||||
|
Matrix expected = T2.matrix()*T2.matrix();
|
||||||
|
EXPECT(assert_equal(actual, expected, 1e-8));
|
||||||
|
|
||||||
|
Matrix H1, H2;
|
||||||
|
T2.transformPoseFrom(T2, H1, H2);
|
||||||
|
|
||||||
|
Matrix numericalH1 = numericalDerivative21(transformPoseFrom_, T2, T2);
|
||||||
|
EXPECT(assert_equal(numericalH1, H1, 5e-3));
|
||||||
|
EXPECT(assert_equal(T2.inverse().AdjointMap(), H1, 5e-3));
|
||||||
|
|
||||||
|
Matrix numericalH2 = numericalDerivative22(transformPoseFrom_, T2, T2);
|
||||||
|
EXPECT(assert_equal(numericalH2, H2, 1e-4));
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(Pose3, transformTo) {
|
TEST(Pose3, transformTo) {
|
||||||
Pose3 transform(Rot3::Rodrigues(0, 0, -1.570796), Point3(2, 4, 0));
|
Pose3 transform(Rot3::Rodrigues(0, 0, -1.570796), Point3(2, 4, 0));
|
||||||
|
|
|
@ -69,4 +69,6 @@ namespace gtsam {
|
||||||
void saveGraph(const std::string &s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
void saveGraph(const std::string &s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#include <gtsam/inference/BayesNet-inst.h>
|
||||||
|
|
|
@ -136,57 +136,61 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
// separator marginal, uses separator marginal of parent recursively
|
// separator marginal, uses separator marginal of parent recursively
|
||||||
// P(C) = P(F|S) P(S)
|
// P(C) = P(F|S) P(S)
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
template<class DERIVED, class FACTORGRAPH>
|
template <class DERIVED, class FACTORGRAPH>
|
||||||
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
||||||
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::separatorMarginal(Eliminate function) const
|
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::separatorMarginal(
|
||||||
{
|
Eliminate function) const {
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal);
|
gttic(BayesTreeCliqueBase_separatorMarginal);
|
||||||
// Check if the Separator marginal was already calculated
|
// Check if the Separator marginal was already calculated
|
||||||
if (!cachedSeparatorMarginal_)
|
if (!cachedSeparatorMarginal_) {
|
||||||
{
|
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
||||||
|
|
||||||
// If this is the root, there is no separator
|
// If this is the root, there is no separator
|
||||||
if (parent_.expired() /*(if we're the root)*/)
|
if (parent_.expired() /*(if we're the root)*/) {
|
||||||
{
|
|
||||||
// we are root, return empty
|
// we are root, return empty
|
||||||
FactorGraphType empty;
|
FactorGraphType empty;
|
||||||
cachedSeparatorMarginal_ = empty;
|
cachedSeparatorMarginal_ = empty;
|
||||||
}
|
} else {
|
||||||
else
|
// Flatten recursion in timing outline
|
||||||
{
|
gttoc(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
||||||
|
gttoc(BayesTreeCliqueBase_separatorMarginal);
|
||||||
|
|
||||||
// Obtain P(S) = \int P(Cp) = \int P(Fp|Sp) P(Sp)
|
// Obtain P(S) = \int P(Cp) = \int P(Fp|Sp) P(Sp)
|
||||||
// initialize P(Cp) with the parent separator marginal
|
// initialize P(Cp) with the parent separator marginal
|
||||||
derived_ptr parent(parent_.lock());
|
derived_ptr parent(parent_.lock());
|
||||||
gttoc(BayesTreeCliqueBase_separatorMarginal_cachemiss); // Flatten recursion in timing outline
|
FactorGraphType p_Cp(parent->separatorMarginal(function)); // P(Sp)
|
||||||
gttoc(BayesTreeCliqueBase_separatorMarginal);
|
|
||||||
FactorGraphType p_Cp(parent->separatorMarginal(function)); // P(Sp)
|
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal);
|
gttic(BayesTreeCliqueBase_separatorMarginal);
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
||||||
|
|
||||||
// now add the parent conditional
|
// now add the parent conditional
|
||||||
p_Cp += parent->conditional_; // P(Fp|Sp)
|
p_Cp += parent->conditional_; // P(Fp|Sp)
|
||||||
|
|
||||||
// The variables we want to keepSet are exactly the ones in S
|
// The variables we want to keepSet are exactly the ones in S
|
||||||
KeyVector indicesS(this->conditional()->beginParents(), this->conditional()->endParents());
|
KeyVector indicesS(this->conditional()->beginParents(),
|
||||||
cachedSeparatorMarginal_ = *p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), function);
|
this->conditional()->endParents());
|
||||||
|
auto separatorMarginal =
|
||||||
|
p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), function);
|
||||||
|
cachedSeparatorMarginal_.reset(*separatorMarginal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the shortcut P(S||B)
|
// return the shortcut P(S||B)
|
||||||
return *cachedSeparatorMarginal_; // return the cached version
|
return *cachedSeparatorMarginal_; // return the cached version
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
// marginal2, uses separator marginal of parent recursively
|
// marginal2, uses separator marginal of parent
|
||||||
// P(C) = P(F|S) P(S)
|
// P(C) = P(F|S) P(S)
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
template<class DERIVED, class FACTORGRAPH>
|
template <class DERIVED, class FACTORGRAPH>
|
||||||
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
||||||
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::marginal2(Eliminate function) const
|
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::marginal2(
|
||||||
{
|
Eliminate function) const {
|
||||||
gttic(BayesTreeCliqueBase_marginal2);
|
gttic(BayesTreeCliqueBase_marginal2);
|
||||||
// initialize with separator marginal P(S)
|
// initialize with separator marginal P(S)
|
||||||
FactorGraphType p_C = this->separatorMarginal(function);
|
FactorGraphType p_C = this->separatorMarginal(function);
|
||||||
|
|
|
@ -65,6 +65,8 @@ namespace gtsam {
|
||||||
Conditional(size_t nrFrontals) : nrFrontals_(nrFrontals) {}
|
Conditional(size_t nrFrontals) : nrFrontals_(nrFrontals) {}
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
|
public:
|
||||||
/// @name Testable
|
/// @name Testable
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
@ -76,7 +78,6 @@ namespace gtsam {
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
public:
|
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
|
|
@ -515,7 +515,7 @@ Vector JacobianFactor::error_vector(const VectorValues& c) const {
|
||||||
double JacobianFactor::error(const VectorValues& c) const {
|
double JacobianFactor::error(const VectorValues& c) const {
|
||||||
Vector e = unweighted_error(c);
|
Vector e = unweighted_error(c);
|
||||||
// Use the noise model distance function to get the correct error if available.
|
// Use the noise model distance function to get the correct error if available.
|
||||||
if (model_) return 0.5 * model_->distance(e);
|
if (model_) return 0.5 * model_->squaredMahalanobisDistance(e);
|
||||||
return 0.5 * e.dot(e);
|
return 0.5 * e.dot(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,12 +137,12 @@ Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Fair::weight(double error) const {
|
double Fair::weight(double distance) const {
|
||||||
return 1.0 / (1.0 + std::abs(error) / c_);
|
return 1.0 / (1.0 + std::abs(distance) / c_);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Fair::residual(double error) const {
|
double Fair::loss(double distance) const {
|
||||||
const double absError = std::abs(error);
|
const double absError = std::abs(distance);
|
||||||
const double normalizedError = absError / c_;
|
const double normalizedError = absError / c_;
|
||||||
const double c_2 = c_ * c_;
|
const double c_2 = c_ * c_;
|
||||||
return c_2 * (normalizedError - std::log1p(normalizedError));
|
return c_2 * (normalizedError - std::log1p(normalizedError));
|
||||||
|
@ -170,15 +170,15 @@ Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Huber::weight(double error) const {
|
double Huber::weight(double distance) const {
|
||||||
const double absError = std::abs(error);
|
const double absError = std::abs(distance);
|
||||||
return (absError <= k_) ? (1.0) : (k_ / absError);
|
return (absError <= k_) ? (1.0) : (k_ / absError);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Huber::residual(double error) const {
|
double Huber::loss(double distance) const {
|
||||||
const double absError = std::abs(error);
|
const double absError = std::abs(distance);
|
||||||
if (absError <= k_) { // |x| <= k
|
if (absError <= k_) { // |x| <= k
|
||||||
return error*error / 2;
|
return distance*distance / 2;
|
||||||
} else { // |x| > k
|
} else { // |x| > k
|
||||||
return k_ * (absError - (k_/2));
|
return k_ * (absError - (k_/2));
|
||||||
}
|
}
|
||||||
|
@ -208,12 +208,12 @@ Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Cauchy::weight(double error) const {
|
double Cauchy::weight(double distance) const {
|
||||||
return ksquared_ / (ksquared_ + error*error);
|
return ksquared_ / (ksquared_ + distance*distance);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Cauchy::residual(double error) const {
|
double Cauchy::loss(double distance) const {
|
||||||
const double val = std::log1p(error * error / ksquared_);
|
const double val = std::log1p(distance * distance / ksquared_);
|
||||||
return ksquared_ * val * 0.5;
|
return ksquared_ * val * 0.5;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,18 +241,18 @@ Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Tukey::weight(double error) const {
|
double Tukey::weight(double distance) const {
|
||||||
if (std::abs(error) <= c_) {
|
if (std::abs(distance) <= c_) {
|
||||||
const double one_minus_xc2 = 1.0 - error*error/csquared_;
|
const double one_minus_xc2 = 1.0 - distance*distance/csquared_;
|
||||||
return one_minus_xc2 * one_minus_xc2;
|
return one_minus_xc2 * one_minus_xc2;
|
||||||
}
|
}
|
||||||
return 0.0;
|
return 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
double Tukey::residual(double error) const {
|
double Tukey::loss(double distance) const {
|
||||||
double absError = std::abs(error);
|
double absError = std::abs(distance);
|
||||||
if (absError <= c_) {
|
if (absError <= c_) {
|
||||||
const double one_minus_xc2 = 1.0 - error*error/csquared_;
|
const double one_minus_xc2 = 1.0 - distance*distance/csquared_;
|
||||||
const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2;
|
const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2;
|
||||||
return csquared_ * (1 - t) / 6.0;
|
return csquared_ * (1 - t) / 6.0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -280,13 +280,13 @@ Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
|
||||||
|
|
||||||
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
|
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
|
||||||
|
|
||||||
double Welsch::weight(double error) const {
|
double Welsch::weight(double distance) const {
|
||||||
const double xc2 = (error*error)/csquared_;
|
const double xc2 = (distance*distance)/csquared_;
|
||||||
return std::exp(-xc2);
|
return std::exp(-xc2);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Welsch::residual(double error) const {
|
double Welsch::loss(double distance) const {
|
||||||
const double xc2 = (error*error)/csquared_;
|
const double xc2 = (distance*distance)/csquared_;
|
||||||
return csquared_ * 0.5 * -std::expm1(-xc2);
|
return csquared_ * 0.5 * -std::expm1(-xc2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,16 +311,16 @@ GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight)
|
||||||
: Base(reweight), c_(c) {
|
: Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
|
|
||||||
double GemanMcClure::weight(double error) const {
|
double GemanMcClure::weight(double distance) const {
|
||||||
const double c2 = c_*c_;
|
const double c2 = c_*c_;
|
||||||
const double c4 = c2*c2;
|
const double c4 = c2*c2;
|
||||||
const double c2error = c2 + error*error;
|
const double c2error = c2 + distance*distance;
|
||||||
return c4/(c2error*c2error);
|
return c4/(c2error*c2error);
|
||||||
}
|
}
|
||||||
|
|
||||||
double GemanMcClure::residual(double error) const {
|
double GemanMcClure::loss(double distance) const {
|
||||||
const double c2 = c_*c_;
|
const double c2 = c_*c_;
|
||||||
const double error2 = error*error;
|
const double error2 = distance*distance;
|
||||||
return 0.5 * (c2 * error2) / (c2 + error2);
|
return 0.5 * (c2 * error2) / (c2 + error2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,8 +345,8 @@ DCS::DCS(double c, const ReweightScheme reweight)
|
||||||
: Base(reweight), c_(c) {
|
: Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
|
|
||||||
double DCS::weight(double error) const {
|
double DCS::weight(double distance) const {
|
||||||
const double e2 = error*error;
|
const double e2 = distance*distance;
|
||||||
if (e2 > c_)
|
if (e2 > c_)
|
||||||
{
|
{
|
||||||
const double w = 2.0*c_/(c_ + e2);
|
const double w = 2.0*c_/(c_ + e2);
|
||||||
|
@ -356,10 +356,10 @@ double DCS::weight(double error) const {
|
||||||
return 1.0;
|
return 1.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
double DCS::residual(double error) const {
|
double DCS::loss(double distance) const {
|
||||||
// This is the simplified version of Eq 9 from (Agarwal13icra)
|
// This is the simplified version of Eq 9 from (Agarwal13icra)
|
||||||
// after you simplify and cancel terms.
|
// after you simplify and cancel terms.
|
||||||
const double e2 = error*error;
|
const double e2 = distance*distance;
|
||||||
const double e4 = e2*e2;
|
const double e4 = e2*e2;
|
||||||
const double c2 = c_*c_;
|
const double c2 = c_*c_;
|
||||||
|
|
||||||
|
@ -391,17 +391,17 @@ L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double L2WithDeadZone::weight(double error) const {
|
double L2WithDeadZone::weight(double distance) const {
|
||||||
// note that this code is slightly uglier than residual, because there are three distinct
|
// note that this code is slightly uglier than residual, because there are three distinct
|
||||||
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
|
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
|
||||||
// cases (deadzone, non-deadzone) in residual.
|
// cases (deadzone, non-deadzone) in residual.
|
||||||
if (std::abs(error) <= k_) return 0.0;
|
if (std::abs(distance) <= k_) return 0.0;
|
||||||
else if (error > k_) return (-k_+error)/error;
|
else if (distance > k_) return (-k_+distance)/distance;
|
||||||
else return (k_+error)/error;
|
else return (k_+distance)/distance;
|
||||||
}
|
}
|
||||||
|
|
||||||
double L2WithDeadZone::residual(double error) const {
|
double L2WithDeadZone::loss(double distance) const {
|
||||||
const double abs_error = std::abs(error);
|
const double abs_error = std::abs(distance);
|
||||||
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
|
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,12 +36,12 @@ namespace noiseModel {
|
||||||
* The mEstimator name space contains all robust error functions.
|
* The mEstimator name space contains all robust error functions.
|
||||||
* It mirrors the exposition at
|
* It mirrors the exposition at
|
||||||
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
||||||
* which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice.
|
* which talks about minimizing \sum \rho(r_i), where \rho is a loss function of choice.
|
||||||
*
|
*
|
||||||
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
|
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
|
||||||
*
|
*
|
||||||
* Name Symbol Least-Squares L1-norm Huber
|
* Name Symbol Least-Squares L1-norm Huber
|
||||||
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
|
* Loss \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
|
||||||
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
|
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
|
||||||
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
|
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
|
||||||
*
|
*
|
||||||
|
@ -75,27 +75,31 @@ class GTSAM_EXPORT Base {
|
||||||
* the quadratic function for an L2 penalty, the absolute value function for
|
* the quadratic function for an L2 penalty, the absolute value function for
|
||||||
* an L1 penalty, etc.
|
* an L1 penalty, etc.
|
||||||
*
|
*
|
||||||
* TODO(mikebosse): When the residual function has as input the norm of the
|
* TODO(mikebosse): When the loss function has as input the norm of the
|
||||||
* residual vector, then it prevents implementations of asymmeric loss
|
* error vector, then it prevents implementations of asymmeric loss
|
||||||
* functions. It would be better for this function to accept the vector and
|
* functions. It would be better for this function to accept the vector and
|
||||||
* internally call the norm if necessary.
|
* internally call the norm if necessary.
|
||||||
*/
|
*/
|
||||||
virtual double residual(double error) const { return 0; };
|
virtual double loss(double distance) const { return 0; };
|
||||||
|
|
||||||
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
|
virtual double residual(double distance) const { return loss(distance); };
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This method is responsible for returning the weight function for a given
|
* This method is responsible for returning the weight function for a given
|
||||||
* amount of error. The weight function is related to the analytic derivative
|
* amount of error. The weight function is related to the analytic derivative
|
||||||
* of the residual function. See
|
* of the loss function. See
|
||||||
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
||||||
* for details. This method is required when optimizing cost functions with
|
* for details. This method is required when optimizing cost functions with
|
||||||
* robust penalties using iteratively re-weighted least squares.
|
* robust penalties using iteratively re-weighted least squares.
|
||||||
*/
|
*/
|
||||||
virtual double weight(double error) const = 0;
|
virtual double weight(double distance) const = 0;
|
||||||
|
|
||||||
virtual void print(const std::string &s) const = 0;
|
virtual void print(const std::string &s) const = 0;
|
||||||
virtual bool equals(const Base &expected, double tol = 1e-8) const = 0;
|
virtual bool equals(const Base &expected, double tol = 1e-8) const = 0;
|
||||||
|
|
||||||
double sqrtWeight(double error) const { return std::sqrt(weight(error)); }
|
double sqrtWeight(double distance) const { return std::sqrt(weight(distance)); }
|
||||||
|
|
||||||
/** produce a weight vector according to an error vector and the implemented
|
/** produce a weight vector according to an error vector and the implemented
|
||||||
* robust function */
|
* robust function */
|
||||||
|
@ -123,7 +127,7 @@ class GTSAM_EXPORT Base {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Null class is not robust so is a Gaussian ?
|
/// Null class should behave as Gaussian
|
||||||
class GTSAM_EXPORT Null : public Base {
|
class GTSAM_EXPORT Null : public Base {
|
||||||
public:
|
public:
|
||||||
typedef boost::shared_ptr<Null> shared_ptr;
|
typedef boost::shared_ptr<Null> shared_ptr;
|
||||||
|
@ -131,7 +135,7 @@ class GTSAM_EXPORT Null : public Base {
|
||||||
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
|
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
|
||||||
~Null() {}
|
~Null() {}
|
||||||
double weight(double /*error*/) const { return 1.0; }
|
double weight(double /*error*/) const { return 1.0; }
|
||||||
double residual(double error) const { return error; }
|
double loss(double distance) const { return 0.5 * distance * distance; }
|
||||||
void print(const std::string &s) const;
|
void print(const std::string &s) const;
|
||||||
bool equals(const Base & /*expected*/, double /*tol*/) const { return true; }
|
bool equals(const Base & /*expected*/, double /*tol*/) const { return true; }
|
||||||
static shared_ptr Create();
|
static shared_ptr Create();
|
||||||
|
@ -154,8 +158,8 @@ class GTSAM_EXPORT Fair : public Base {
|
||||||
typedef boost::shared_ptr<Fair> shared_ptr;
|
typedef boost::shared_ptr<Fair> shared_ptr;
|
||||||
|
|
||||||
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
|
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
|
||||||
|
@ -179,8 +183,8 @@ class GTSAM_EXPORT Huber : public Base {
|
||||||
typedef boost::shared_ptr<Huber> shared_ptr;
|
typedef boost::shared_ptr<Huber> shared_ptr;
|
||||||
|
|
||||||
Huber(double k = 1.345, const ReweightScheme reweight = Block);
|
Huber(double k = 1.345, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -209,8 +213,8 @@ class GTSAM_EXPORT Cauchy : public Base {
|
||||||
typedef boost::shared_ptr<Cauchy> shared_ptr;
|
typedef boost::shared_ptr<Cauchy> shared_ptr;
|
||||||
|
|
||||||
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
|
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -234,8 +238,8 @@ class GTSAM_EXPORT Tukey : public Base {
|
||||||
typedef boost::shared_ptr<Tukey> shared_ptr;
|
typedef boost::shared_ptr<Tukey> shared_ptr;
|
||||||
|
|
||||||
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
|
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -259,8 +263,8 @@ class GTSAM_EXPORT Welsch : public Base {
|
||||||
typedef boost::shared_ptr<Welsch> shared_ptr;
|
typedef boost::shared_ptr<Welsch> shared_ptr;
|
||||||
|
|
||||||
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
|
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -295,8 +299,8 @@ class GTSAM_EXPORT GemanMcClure : public Base {
|
||||||
|
|
||||||
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
|
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
|
||||||
~GemanMcClure() {}
|
~GemanMcClure() {}
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -325,8 +329,8 @@ class GTSAM_EXPORT DCS : public Base {
|
||||||
|
|
||||||
DCS(double c = 1.0, const ReweightScheme reweight = Block);
|
DCS(double c = 1.0, const ReweightScheme reweight = Block);
|
||||||
~DCS() {}
|
~DCS() {}
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -358,8 +362,8 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
|
||||||
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
|
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
|
||||||
|
|
||||||
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
|
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
|
|
@ -74,6 +74,13 @@ Vector Base::sigmas() const {
|
||||||
throw("Base::sigmas: sigmas() not implemented for this noise model");
|
throw("Base::sigmas: sigmas() not implemented for this noise model");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double Base::squaredMahalanobisDistance(const Vector& v) const {
|
||||||
|
// Note: for Diagonal, which does ediv_, will be correct for constraints
|
||||||
|
Vector w = whiten(v);
|
||||||
|
return w.dot(w);
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) {
|
Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) {
|
||||||
size_t m = R.rows(), n = R.cols();
|
size_t m = R.rows(), n = R.cols();
|
||||||
|
@ -164,13 +171,6 @@ Vector Gaussian::unwhiten(const Vector& v) const {
|
||||||
return backSubstituteUpper(thisR(), v);
|
return backSubstituteUpper(thisR(), v);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
double Gaussian::squaredMahalanobisDistance(const Vector& v) const {
|
|
||||||
// Note: for Diagonal, which does ediv_, will be correct for constraints
|
|
||||||
Vector w = whiten(v);
|
|
||||||
return w.dot(w);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Matrix Gaussian::Whiten(const Matrix& H) const {
|
Matrix Gaussian::Whiten(const Matrix& H) const {
|
||||||
return thisR() * H;
|
return thisR() * H;
|
||||||
|
@ -376,8 +376,19 @@ Vector Constrained::whiten(const Vector& v) const {
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Constrained::distance(const Vector& v) const {
|
double Constrained::error(const Vector& v) const {
|
||||||
|
Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements
|
||||||
|
for (size_t i=0; i<dim_; ++i) // add mu weights on constrained variables
|
||||||
|
if (constrained(i)) // whiten makes constrained variables zero
|
||||||
|
w[i] = v[i] * sqrt(mu_[i]); // TODO: may want to store sqrt rather than rebuild
|
||||||
|
return 0.5 * w.dot(w);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double Constrained::squaredMahalanobisDistance(const Vector& v) const {
|
||||||
Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements
|
Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements
|
||||||
for (size_t i=0; i<dim_; ++i) // add mu weights on constrained variables
|
for (size_t i=0; i<dim_; ++i) // add mu weights on constrained variables
|
||||||
if (constrained(i)) // whiten makes constrained variables zero
|
if (constrained(i)) // whiten makes constrained variables zero
|
||||||
|
@ -663,7 +674,7 @@ void Robust::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{
|
||||||
}
|
}
|
||||||
|
|
||||||
Robust::shared_ptr Robust::Create(
|
Robust::shared_ptr Robust::Create(
|
||||||
const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){
|
const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){
|
||||||
return shared_ptr(new Robust(robust,noise));
|
return shared_ptr(new Robust(robust,noise));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,27 @@ namespace gtsam {
|
||||||
/// Unwhiten an error vector.
|
/// Unwhiten an error vector.
|
||||||
virtual Vector unwhiten(const Vector& v) const = 0;
|
virtual Vector unwhiten(const Vector& v) const = 0;
|
||||||
|
|
||||||
virtual double distance(const Vector& v) const = 0;
|
/// Squared Mahalanobis distance v'*R'*R*v = <R*v,R*v>
|
||||||
|
virtual double squaredMahalanobisDistance(const Vector& v) const;
|
||||||
|
|
||||||
|
/// Mahalanobis distance
|
||||||
|
virtual double mahalanobisDistance(const Vector& v) const {
|
||||||
|
return std::sqrt(squaredMahalanobisDistance(v));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// loss function, input is Mahalanobis distance
|
||||||
|
virtual double loss(const double squared_distance) const {
|
||||||
|
return 0.5 * squared_distance;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
|
/// calculate the error value given measurement error vector
|
||||||
|
virtual double error(const Vector& v) const = 0;
|
||||||
|
|
||||||
|
virtual double distance(const Vector& v) {
|
||||||
|
return error(v) * 2;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const = 0;
|
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const = 0;
|
||||||
virtual void WhitenSystem(Matrix& A, Vector& b) const = 0;
|
virtual void WhitenSystem(Matrix& A, Vector& b) const = 0;
|
||||||
|
@ -200,39 +220,30 @@ namespace gtsam {
|
||||||
*/
|
*/
|
||||||
static shared_ptr Covariance(const Matrix& covariance, bool smart = true);
|
static shared_ptr Covariance(const Matrix& covariance, bool smart = true);
|
||||||
|
|
||||||
virtual void print(const std::string& name) const;
|
void print(const std::string& name) const override;
|
||||||
virtual bool equals(const Base& expected, double tol=1e-9) const;
|
bool equals(const Base& expected, double tol=1e-9) const override;
|
||||||
virtual Vector sigmas() const;
|
Vector sigmas() const override;
|
||||||
virtual Vector whiten(const Vector& v) const;
|
Vector whiten(const Vector& v) const override;
|
||||||
virtual Vector unwhiten(const Vector& v) const;
|
Vector unwhiten(const Vector& v) const override;
|
||||||
|
|
||||||
/**
|
|
||||||
* Squared Mahalanobis distance v'*R'*R*v = <R*v,R*v>
|
|
||||||
*/
|
|
||||||
virtual double squaredMahalanobisDistance(const Vector& v) const;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Mahalanobis distance
|
|
||||||
*/
|
|
||||||
virtual double mahalanobisDistance(const Vector& v) const {
|
|
||||||
return std::sqrt(squaredMahalanobisDistance(v));
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
virtual double Mahalanobis(const Vector& v) const {
|
virtual double Mahalanobis(const Vector& v) const {
|
||||||
return squaredMahalanobisDistance(v);
|
return squaredMahalanobisDistance(v);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
inline virtual double distance(const Vector& v) const {
|
/**
|
||||||
return squaredMahalanobisDistance(v);
|
* error value 0.5 * v'*R'*R*v
|
||||||
|
*/
|
||||||
|
inline double error(const Vector& v) const override {
|
||||||
|
return 0.5 * squaredMahalanobisDistance(v);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Multiply a derivative with R (derivative of whiten)
|
* Multiply a derivative with R (derivative of whiten)
|
||||||
* Equivalent to whitening each column of the input matrix.
|
* Equivalent to whitening each column of the input matrix.
|
||||||
*/
|
*/
|
||||||
virtual Matrix Whiten(const Matrix& H) const;
|
Matrix Whiten(const Matrix& H) const override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* In-place version
|
* In-place version
|
||||||
|
@ -247,10 +258,10 @@ namespace gtsam {
|
||||||
/**
|
/**
|
||||||
* Whiten a system, in place as well
|
* Whiten a system, in place as well
|
||||||
*/
|
*/
|
||||||
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const;
|
void WhitenSystem(std::vector<Matrix>& A, Vector& b) const override;
|
||||||
virtual void WhitenSystem(Matrix& A, Vector& b) const;
|
void WhitenSystem(Matrix& A, Vector& b) const override;
|
||||||
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const;
|
void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const override;
|
||||||
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const;
|
void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Apply appropriately weighted QR factorization to the system [A b]
|
* Apply appropriately weighted QR factorization to the system [A b]
|
||||||
|
@ -335,13 +346,13 @@ namespace gtsam {
|
||||||
return Variances(precisions.array().inverse(), smart);
|
return Variances(precisions.array().inverse(), smart);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void print(const std::string& name) const;
|
void print(const std::string& name) const override;
|
||||||
virtual Vector sigmas() const { return sigmas_; }
|
Vector sigmas() const override { return sigmas_; }
|
||||||
virtual Vector whiten(const Vector& v) const;
|
Vector whiten(const Vector& v) const override;
|
||||||
virtual Vector unwhiten(const Vector& v) const;
|
Vector unwhiten(const Vector& v) const override;
|
||||||
virtual Matrix Whiten(const Matrix& H) const;
|
Matrix Whiten(const Matrix& H) const override;
|
||||||
virtual void WhitenInPlace(Matrix& H) const;
|
void WhitenInPlace(Matrix& H) const override;
|
||||||
virtual void WhitenInPlace(Eigen::Block<Matrix> H) const;
|
void WhitenInPlace(Eigen::Block<Matrix> H) const override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return standard deviations (sqrt of diagonal)
|
* Return standard deviations (sqrt of diagonal)
|
||||||
|
@ -363,7 +374,7 @@ namespace gtsam {
|
||||||
/**
|
/**
|
||||||
* Return R itself, but note that Whiten(H) is cheaper than R*H
|
* Return R itself, but note that Whiten(H) is cheaper than R*H
|
||||||
*/
|
*/
|
||||||
virtual Matrix R() const {
|
Matrix R() const override {
|
||||||
return invsigmas().asDiagonal();
|
return invsigmas().asDiagonal();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -417,10 +428,10 @@ namespace gtsam {
|
||||||
|
|
||||||
typedef boost::shared_ptr<Constrained> shared_ptr;
|
typedef boost::shared_ptr<Constrained> shared_ptr;
|
||||||
|
|
||||||
virtual ~Constrained() {}
|
~Constrained() {}
|
||||||
|
|
||||||
/// true if a constrained noise mode, saves slow/clumsy dynamic casting
|
/// true if a constrained noise mode, saves slow/clumsy dynamic casting
|
||||||
virtual bool isConstrained() const { return true; }
|
bool isConstrained() const override { return true; }
|
||||||
|
|
||||||
/// Return true if a particular dimension is free or constrained
|
/// Return true if a particular dimension is free or constrained
|
||||||
bool constrained(size_t i) const;
|
bool constrained(size_t i) const;
|
||||||
|
@ -472,12 +483,16 @@ namespace gtsam {
|
||||||
return MixedVariances(precisions.array().inverse());
|
return MixedVariances(precisions.array().inverse());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
/**
|
/**
|
||||||
* The distance function for a constrained noisemodel,
|
* The error function for a constrained noisemodel,
|
||||||
* for non-constrained versions, uses sigmas, otherwise
|
* for non-constrained versions, uses sigmas, otherwise
|
||||||
* uses the penalty function with mu
|
* uses the penalty function with mu
|
||||||
*/
|
*/
|
||||||
virtual double distance(const Vector& v) const;
|
double error(const Vector& v) const override;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
double squaredMahalanobisDistance(const Vector& v) const override;
|
||||||
|
|
||||||
/** Fully constrained variations */
|
/** Fully constrained variations */
|
||||||
static shared_ptr All(size_t dim) {
|
static shared_ptr All(size_t dim) {
|
||||||
|
@ -494,16 +509,16 @@ namespace gtsam {
|
||||||
return shared_ptr(new Constrained(Vector::Constant(dim, mu), Vector::Constant(dim,0)));
|
return shared_ptr(new Constrained(Vector::Constant(dim, mu), Vector::Constant(dim,0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void print(const std::string& name) const;
|
void print(const std::string& name) const override;
|
||||||
|
|
||||||
/// Calculates error vector with weights applied
|
/// Calculates error vector with weights applied
|
||||||
virtual Vector whiten(const Vector& v) const;
|
Vector whiten(const Vector& v) const override;
|
||||||
|
|
||||||
/// Whitening functions will perform partial whitening on rows
|
/// Whitening functions will perform partial whitening on rows
|
||||||
/// with a non-zero sigma. Other rows remain untouched.
|
/// with a non-zero sigma. Other rows remain untouched.
|
||||||
virtual Matrix Whiten(const Matrix& H) const;
|
Matrix Whiten(const Matrix& H) const override;
|
||||||
virtual void WhitenInPlace(Matrix& H) const;
|
void WhitenInPlace(Matrix& H) const override;
|
||||||
virtual void WhitenInPlace(Eigen::Block<Matrix> H) const;
|
void WhitenInPlace(Eigen::Block<Matrix> H) const override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Apply QR factorization to the system [A b], taking into account constraints
|
* Apply QR factorization to the system [A b], taking into account constraints
|
||||||
|
@ -514,7 +529,7 @@ namespace gtsam {
|
||||||
* @param Ab is the m*(n+1) augmented system matrix [A b]
|
* @param Ab is the m*(n+1) augmented system matrix [A b]
|
||||||
* @return diagonal noise model can be all zeros, mixed, or not-constrained
|
* @return diagonal noise model can be all zeros, mixed, or not-constrained
|
||||||
*/
|
*/
|
||||||
virtual Diagonal::shared_ptr QR(Matrix& Ab) const;
|
Diagonal::shared_ptr QR(Matrix& Ab) const override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a Unit version of a constrained noisemodel in which
|
* Returns a Unit version of a constrained noisemodel in which
|
||||||
|
@ -576,14 +591,14 @@ namespace gtsam {
|
||||||
return Variance(dim, 1.0/precision, smart);
|
return Variance(dim, 1.0/precision, smart);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void print(const std::string& name) const;
|
void print(const std::string& name) const override;
|
||||||
virtual double squaredMahalanobisDistance(const Vector& v) const;
|
double squaredMahalanobisDistance(const Vector& v) const override;
|
||||||
virtual Vector whiten(const Vector& v) const;
|
Vector whiten(const Vector& v) const override;
|
||||||
virtual Vector unwhiten(const Vector& v) const;
|
Vector unwhiten(const Vector& v) const override;
|
||||||
virtual Matrix Whiten(const Matrix& H) const;
|
Matrix Whiten(const Matrix& H) const override;
|
||||||
virtual void WhitenInPlace(Matrix& H) const;
|
void WhitenInPlace(Matrix& H) const override;
|
||||||
virtual void whitenInPlace(Vector& v) const;
|
void whitenInPlace(Vector& v) const override;
|
||||||
virtual void WhitenInPlace(Eigen::Block<Matrix> H) const;
|
void WhitenInPlace(Eigen::Block<Matrix> H) const override;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return standard deviation
|
* Return standard deviation
|
||||||
|
@ -616,7 +631,7 @@ namespace gtsam {
|
||||||
|
|
||||||
typedef boost::shared_ptr<Unit> shared_ptr;
|
typedef boost::shared_ptr<Unit> shared_ptr;
|
||||||
|
|
||||||
virtual ~Unit() {}
|
~Unit() {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a unit covariance noise model
|
* Create a unit covariance noise model
|
||||||
|
@ -626,19 +641,19 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// true if a unit noise model, saves slow/clumsy dynamic casting
|
/// true if a unit noise model, saves slow/clumsy dynamic casting
|
||||||
virtual bool isUnit() const { return true; }
|
bool isUnit() const override { return true; }
|
||||||
|
|
||||||
virtual void print(const std::string& name) const;
|
void print(const std::string& name) const override;
|
||||||
virtual double squaredMahalanobisDistance(const Vector& v) const {return v.dot(v); }
|
double squaredMahalanobisDistance(const Vector& v) const override {return v.dot(v); }
|
||||||
virtual Vector whiten(const Vector& v) const { return v; }
|
Vector whiten(const Vector& v) const override { return v; }
|
||||||
virtual Vector unwhiten(const Vector& v) const { return v; }
|
Vector unwhiten(const Vector& v) const override { return v; }
|
||||||
virtual Matrix Whiten(const Matrix& H) const { return H; }
|
Matrix Whiten(const Matrix& H) const override { return H; }
|
||||||
virtual void WhitenInPlace(Matrix& /*H*/) const {}
|
void WhitenInPlace(Matrix& /*H*/) const override {}
|
||||||
virtual void WhitenInPlace(Eigen::Block<Matrix> /*H*/) const {}
|
void WhitenInPlace(Eigen::Block<Matrix> /*H*/) const override {}
|
||||||
virtual void whitenInPlace(Vector& /*v*/) const {}
|
void whitenInPlace(Vector& /*v*/) const override {}
|
||||||
virtual void unwhitenInPlace(Vector& /*v*/) const {}
|
void unwhitenInPlace(Vector& /*v*/) const override {}
|
||||||
virtual void whitenInPlace(Eigen::Block<Vector>& /*v*/) const {}
|
void whitenInPlace(Eigen::Block<Vector>& /*v*/) const override {}
|
||||||
virtual void unwhitenInPlace(Eigen::Block<Vector>& /*v*/) const {}
|
void unwhitenInPlace(Eigen::Block<Vector>& /*v*/) const override {}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/** Serialization function */
|
/** Serialization function */
|
||||||
|
@ -687,10 +702,10 @@ namespace gtsam {
|
||||||
: Base(noise->dim()), robust_(robust), noise_(noise) {}
|
: Base(noise->dim()), robust_(robust), noise_(noise) {}
|
||||||
|
|
||||||
/// Destructor
|
/// Destructor
|
||||||
virtual ~Robust() {}
|
~Robust() {}
|
||||||
|
|
||||||
virtual void print(const std::string& name) const;
|
void print(const std::string& name) const override;
|
||||||
virtual bool equals(const Base& expected, double tol=1e-9) const;
|
bool equals(const Base& expected, double tol=1e-9) const override;
|
||||||
|
|
||||||
/// Return the contained robust error function
|
/// Return the contained robust error function
|
||||||
const RobustModel::shared_ptr& robust() const { return robust_; }
|
const RobustModel::shared_ptr& robust() const { return robust_; }
|
||||||
|
@ -699,28 +714,36 @@ namespace gtsam {
|
||||||
const NoiseModel::shared_ptr& noise() const { return noise_; }
|
const NoiseModel::shared_ptr& noise() const { return noise_; }
|
||||||
|
|
||||||
// TODO: functions below are dummy but necessary for the noiseModel::Base
|
// TODO: functions below are dummy but necessary for the noiseModel::Base
|
||||||
inline virtual Vector whiten(const Vector& v) const
|
inline Vector whiten(const Vector& v) const override
|
||||||
{ Vector r = v; this->WhitenSystem(r); return r; }
|
{ Vector r = v; this->WhitenSystem(r); return r; }
|
||||||
inline virtual Matrix Whiten(const Matrix& A) const
|
inline Matrix Whiten(const Matrix& A) const override
|
||||||
{ Vector b; Matrix B=A; this->WhitenSystem(B,b); return B; }
|
{ Vector b; Matrix B=A; this->WhitenSystem(B,b); return B; }
|
||||||
inline virtual Vector unwhiten(const Vector& /*v*/) const
|
inline Vector unwhiten(const Vector& /*v*/) const override
|
||||||
{ throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); }
|
{ throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); }
|
||||||
// Fold the use of the m-estimator residual(...) function into distance(...)
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
inline virtual double distance(const Vector& v) const
|
inline double distance(const Vector& v) override {
|
||||||
{ return robust_->residual(this->unweightedWhiten(v).norm()); }
|
return robust_->loss(this->unweightedWhiten(v).norm());
|
||||||
inline virtual double distance_non_whitened(const Vector& v) const
|
}
|
||||||
{ return robust_->residual(v.norm()); }
|
// Fold the use of the m-estimator loss(...) function into error(...)
|
||||||
|
inline double error(const Vector& v) const override
|
||||||
|
{ return robust_->loss(noise_->mahalanobisDistance(v)); }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
double loss(const double squared_distance) const override {
|
||||||
|
return robust_->loss(std::sqrt(squared_distance));
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: these are really robust iterated re-weighting support functions
|
// TODO: these are really robust iterated re-weighting support functions
|
||||||
virtual void WhitenSystem(Vector& b) const;
|
virtual void WhitenSystem(Vector& b) const;
|
||||||
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const;
|
void WhitenSystem(std::vector<Matrix>& A, Vector& b) const override;
|
||||||
virtual void WhitenSystem(Matrix& A, Vector& b) const;
|
void WhitenSystem(Matrix& A, Vector& b) const override;
|
||||||
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const;
|
void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const override;
|
||||||
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const;
|
void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const override;
|
||||||
|
|
||||||
virtual Vector unweightedWhiten(const Vector& v) const {
|
Vector unweightedWhiten(const Vector& v) const override {
|
||||||
return noise_->unweightedWhiten(v);
|
return noise_->unweightedWhiten(v);
|
||||||
}
|
}
|
||||||
virtual double weight(const Vector& v) const {
|
double weight(const Vector& v) const override {
|
||||||
// Todo(mikebosse): make the robust weight function input a vector.
|
// Todo(mikebosse): make the robust weight function input a vector.
|
||||||
return robust_->weight(v.norm());
|
return robust_->weight(v.norm());
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,6 +45,17 @@ PCGSolver::PCGSolver(const PCGSolverParameters &p) {
|
||||||
preconditioner_ = createPreconditioner(p.preconditioner_);
|
preconditioner_ = createPreconditioner(p.preconditioner_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PCGSolverParameters::setPreconditionerParams(const boost::shared_ptr<PreconditionerParameters> preconditioner) {
|
||||||
|
preconditioner_ = preconditioner;
|
||||||
|
}
|
||||||
|
|
||||||
|
void PCGSolverParameters::print(const std::string &s) const {
|
||||||
|
std::cout << s << std::endl;;
|
||||||
|
std::ostringstream os;
|
||||||
|
print(os);
|
||||||
|
std::cout << os.str() << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
/*****************************************************************************/
|
/*****************************************************************************/
|
||||||
VectorValues PCGSolver::optimize(const GaussianFactorGraph &gfg,
|
VectorValues PCGSolver::optimize(const GaussianFactorGraph &gfg,
|
||||||
const KeyInfo &keyInfo, const std::map<Key, Vector> &lambda,
|
const KeyInfo &keyInfo, const std::map<Key, Vector> &lambda,
|
||||||
|
|
|
@ -48,7 +48,12 @@ public:
|
||||||
return *preconditioner_;
|
return *preconditioner_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// needed for python wrapper
|
||||||
|
void print(const std::string &s) const;
|
||||||
|
|
||||||
boost::shared_ptr<PreconditionerParameters> preconditioner_;
|
boost::shared_ptr<PreconditionerParameters> preconditioner_;
|
||||||
|
|
||||||
|
void setPreconditionerParams(const boost::shared_ptr<PreconditionerParameters> preconditioner);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -182,8 +182,9 @@ TEST(NoiseModel, ConstrainedMixed )
|
||||||
EXPECT(assert_equal(Vector3(0.5, 1.0, 0.5),d->whiten(infeasible)));
|
EXPECT(assert_equal(Vector3(0.5, 1.0, 0.5),d->whiten(infeasible)));
|
||||||
EXPECT(assert_equal(Vector3(0.5, 0.0, 0.5),d->whiten(feasible)));
|
EXPECT(assert_equal(Vector3(0.5, 0.0, 0.5),d->whiten(feasible)));
|
||||||
|
|
||||||
DOUBLES_EQUAL(1000.0 + 0.25 + 0.25,d->distance(infeasible),1e-9);
|
DOUBLES_EQUAL(0.5 * (1000.0 + 0.25 + 0.25),d->loss(d->squaredMahalanobisDistance(infeasible)),1e-9);
|
||||||
DOUBLES_EQUAL(0.5,d->distance(feasible),1e-9);
|
DOUBLES_EQUAL(0.5, d->squaredMahalanobisDistance(feasible),1e-9);
|
||||||
|
DOUBLES_EQUAL(0.5 * 0.5, d->loss(0.5),1e-9);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -197,8 +198,9 @@ TEST(NoiseModel, ConstrainedAll )
|
||||||
EXPECT(assert_equal(Vector3(1.0, 1.0, 1.0),i->whiten(infeasible)));
|
EXPECT(assert_equal(Vector3(1.0, 1.0, 1.0),i->whiten(infeasible)));
|
||||||
EXPECT(assert_equal(Vector3(0.0, 0.0, 0.0),i->whiten(feasible)));
|
EXPECT(assert_equal(Vector3(0.0, 0.0, 0.0),i->whiten(feasible)));
|
||||||
|
|
||||||
DOUBLES_EQUAL(1000.0 * 3.0,i->distance(infeasible),1e-9);
|
DOUBLES_EQUAL(0.5 * 1000.0 * 3.0,i->loss(i->squaredMahalanobisDistance(infeasible)),1e-9);
|
||||||
DOUBLES_EQUAL(0.0,i->distance(feasible),1e-9);
|
DOUBLES_EQUAL(0.0, i->squaredMahalanobisDistance(feasible), 1e-9);
|
||||||
|
DOUBLES_EQUAL(0.0, i->loss(0.0),1e-9);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -451,7 +453,7 @@ TEST(NoiseModel, WhitenInPlace)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These tests are responsible for testing the weight functions for the m-estimators in GTSAM.
|
* These tests are responsible for testing the weight functions for the m-estimators in GTSAM.
|
||||||
* The weight function is related to the analytic derivative of the residual function. See
|
* The weight function is related to the analytic derivative of the loss function. See
|
||||||
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
||||||
* for details. This weight function is required when optimizing cost functions with robust
|
* for details. This weight function is required when optimizing cost functions with robust
|
||||||
* penalties using iteratively re-weighted least squares.
|
* penalties using iteratively re-weighted least squares.
|
||||||
|
@ -467,10 +469,10 @@ TEST(NoiseModel, robustFunctionFair)
|
||||||
DOUBLES_EQUAL(0.3333333333333333, fair->weight(error3), 1e-8);
|
DOUBLES_EQUAL(0.3333333333333333, fair->weight(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.8333333333333333, fair->weight(error4), 1e-8);
|
DOUBLES_EQUAL(0.8333333333333333, fair->weight(error4), 1e-8);
|
||||||
|
|
||||||
DOUBLES_EQUAL(0.441961080151135, fair->residual(error1), 1e-8);
|
DOUBLES_EQUAL(0.441961080151135, fair->loss(error1), 1e-8);
|
||||||
DOUBLES_EQUAL(22.534692783297260, fair->residual(error2), 1e-8);
|
DOUBLES_EQUAL(22.534692783297260, fair->loss(error2), 1e-8);
|
||||||
DOUBLES_EQUAL(22.534692783297260, fair->residual(error3), 1e-8);
|
DOUBLES_EQUAL(22.534692783297260, fair->loss(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.441961080151135, fair->residual(error4), 1e-8);
|
DOUBLES_EQUAL(0.441961080151135, fair->loss(error4), 1e-8);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(NoiseModel, robustFunctionHuber)
|
TEST(NoiseModel, robustFunctionHuber)
|
||||||
|
@ -483,10 +485,10 @@ TEST(NoiseModel, robustFunctionHuber)
|
||||||
DOUBLES_EQUAL(0.5, huber->weight(error3), 1e-8);
|
DOUBLES_EQUAL(0.5, huber->weight(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(1.0, huber->weight(error4), 1e-8);
|
DOUBLES_EQUAL(1.0, huber->weight(error4), 1e-8);
|
||||||
|
|
||||||
DOUBLES_EQUAL(0.5000, huber->residual(error1), 1e-8);
|
DOUBLES_EQUAL(0.5000, huber->loss(error1), 1e-8);
|
||||||
DOUBLES_EQUAL(37.5000, huber->residual(error2), 1e-8);
|
DOUBLES_EQUAL(37.5000, huber->loss(error2), 1e-8);
|
||||||
DOUBLES_EQUAL(37.5000, huber->residual(error3), 1e-8);
|
DOUBLES_EQUAL(37.5000, huber->loss(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.5000, huber->residual(error4), 1e-8);
|
DOUBLES_EQUAL(0.5000, huber->loss(error4), 1e-8);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(NoiseModel, robustFunctionCauchy)
|
TEST(NoiseModel, robustFunctionCauchy)
|
||||||
|
@ -499,10 +501,10 @@ TEST(NoiseModel, robustFunctionCauchy)
|
||||||
DOUBLES_EQUAL(0.2000, cauchy->weight(error3), 1e-8);
|
DOUBLES_EQUAL(0.2000, cauchy->weight(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error4), 1e-8);
|
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error4), 1e-8);
|
||||||
|
|
||||||
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error1), 1e-8);
|
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error1), 1e-8);
|
||||||
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error2), 1e-8);
|
DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error2), 1e-8);
|
||||||
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error3), 1e-8);
|
DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error4), 1e-8);
|
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error4), 1e-8);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(NoiseModel, robustFunctionGemanMcClure)
|
TEST(NoiseModel, robustFunctionGemanMcClure)
|
||||||
|
@ -514,10 +516,10 @@ TEST(NoiseModel, robustFunctionGemanMcClure)
|
||||||
DOUBLES_EQUAL(9.80296e-5, gmc->weight(error3), 1e-8);
|
DOUBLES_EQUAL(9.80296e-5, gmc->weight(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.25 , gmc->weight(error4), 1e-8);
|
DOUBLES_EQUAL(0.25 , gmc->weight(error4), 1e-8);
|
||||||
|
|
||||||
DOUBLES_EQUAL(0.2500, gmc->residual(error1), 1e-8);
|
DOUBLES_EQUAL(0.2500, gmc->loss(error1), 1e-8);
|
||||||
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error2), 1e-8);
|
DOUBLES_EQUAL(0.495049504950495, gmc->loss(error2), 1e-8);
|
||||||
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error3), 1e-8);
|
DOUBLES_EQUAL(0.495049504950495, gmc->loss(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.2500, gmc->residual(error4), 1e-8);
|
DOUBLES_EQUAL(0.2500, gmc->loss(error4), 1e-8);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(NoiseModel, robustFunctionWelsch)
|
TEST(NoiseModel, robustFunctionWelsch)
|
||||||
|
@ -530,10 +532,10 @@ TEST(NoiseModel, robustFunctionWelsch)
|
||||||
DOUBLES_EQUAL(0.018315638888734, welsch->weight(error3), 1e-8);
|
DOUBLES_EQUAL(0.018315638888734, welsch->weight(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.960789439152323, welsch->weight(error4), 1e-8);
|
DOUBLES_EQUAL(0.960789439152323, welsch->weight(error4), 1e-8);
|
||||||
|
|
||||||
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error1), 1e-8);
|
DOUBLES_EQUAL(0.490132010595960, welsch->loss(error1), 1e-8);
|
||||||
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error2), 1e-8);
|
DOUBLES_EQUAL(12.271054513890823, welsch->loss(error2), 1e-8);
|
||||||
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error3), 1e-8);
|
DOUBLES_EQUAL(12.271054513890823, welsch->loss(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error4), 1e-8);
|
DOUBLES_EQUAL(0.490132010595960, welsch->loss(error4), 1e-8);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(NoiseModel, robustFunctionTukey)
|
TEST(NoiseModel, robustFunctionTukey)
|
||||||
|
@ -546,10 +548,10 @@ TEST(NoiseModel, robustFunctionTukey)
|
||||||
DOUBLES_EQUAL(0.0, tukey->weight(error3), 1e-8);
|
DOUBLES_EQUAL(0.0, tukey->weight(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.9216, tukey->weight(error4), 1e-8);
|
DOUBLES_EQUAL(0.9216, tukey->weight(error4), 1e-8);
|
||||||
|
|
||||||
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error1), 1e-8);
|
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error1), 1e-8);
|
||||||
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error2), 1e-8);
|
DOUBLES_EQUAL(4.166666666666667, tukey->loss(error2), 1e-8);
|
||||||
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error3), 1e-8);
|
DOUBLES_EQUAL(4.166666666666667, tukey->loss(error3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error4), 1e-8);
|
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error4), 1e-8);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(NoiseModel, robustFunctionDCS)
|
TEST(NoiseModel, robustFunctionDCS)
|
||||||
|
@ -560,8 +562,8 @@ TEST(NoiseModel, robustFunctionDCS)
|
||||||
DOUBLES_EQUAL(1.0 , dcs->weight(error1), 1e-8);
|
DOUBLES_EQUAL(1.0 , dcs->weight(error1), 1e-8);
|
||||||
DOUBLES_EQUAL(0.00039211, dcs->weight(error2), 1e-8);
|
DOUBLES_EQUAL(0.00039211, dcs->weight(error2), 1e-8);
|
||||||
|
|
||||||
DOUBLES_EQUAL(0.5 , dcs->residual(error1), 1e-8);
|
DOUBLES_EQUAL(0.5 , dcs->loss(error1), 1e-8);
|
||||||
DOUBLES_EQUAL(0.9900990099, dcs->residual(error2), 1e-8);
|
DOUBLES_EQUAL(0.9900990099, dcs->loss(error2), 1e-8);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(NoiseModel, robustFunctionL2WithDeadZone)
|
TEST(NoiseModel, robustFunctionL2WithDeadZone)
|
||||||
|
@ -576,12 +578,12 @@ TEST(NoiseModel, robustFunctionL2WithDeadZone)
|
||||||
DOUBLES_EQUAL(0.00990099009, lsdz->weight(e4), 1e-8);
|
DOUBLES_EQUAL(0.00990099009, lsdz->weight(e4), 1e-8);
|
||||||
DOUBLES_EQUAL(0.9, lsdz->weight(e5), 1e-8);
|
DOUBLES_EQUAL(0.9, lsdz->weight(e5), 1e-8);
|
||||||
|
|
||||||
DOUBLES_EQUAL(40.5, lsdz->residual(e0), 1e-8);
|
DOUBLES_EQUAL(40.5, lsdz->loss(e0), 1e-8);
|
||||||
DOUBLES_EQUAL(0.00005, lsdz->residual(e1), 1e-8);
|
DOUBLES_EQUAL(0.00005, lsdz->loss(e1), 1e-8);
|
||||||
DOUBLES_EQUAL(0.0, lsdz->residual(e2), 1e-8);
|
DOUBLES_EQUAL(0.0, lsdz->loss(e2), 1e-8);
|
||||||
DOUBLES_EQUAL(0.0, lsdz->residual(e3), 1e-8);
|
DOUBLES_EQUAL(0.0, lsdz->loss(e3), 1e-8);
|
||||||
DOUBLES_EQUAL(0.00005, lsdz->residual(e4), 1e-8);
|
DOUBLES_EQUAL(0.00005, lsdz->loss(e4), 1e-8);
|
||||||
DOUBLES_EQUAL(40.5, lsdz->residual(e5), 1e-8);
|
DOUBLES_EQUAL(40.5, lsdz->loss(e5), 1e-8);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -665,11 +667,11 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes
|
* TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes
|
||||||
* implement a residual function, and GTSAM calls the weight function to evaluate the
|
* implement a loss function, and GTSAM calls the weight function to evaluate the
|
||||||
* total penalty, rather than calling the residual function. The weight function should be
|
* total penalty, rather than calling the loss function. The weight function should be
|
||||||
* used during iteratively reweighted least squares optimization, but should not be used to
|
* used during iteratively reweighted least squares optimization, but should not be used to
|
||||||
* evaluate the total penalty. The long-term solution is for all mEstimators to implement
|
* evaluate the total penalty. The long-term solution is for all mEstimators to implement
|
||||||
* both a weight and a residual function, and for GTSAM to call the residual function when
|
* both a weight and a loss function, and for GTSAM to call the loss function when
|
||||||
* evaluating the total penalty. This bug causes the test below to fail, so I'm leaving it
|
* evaluating the total penalty. This bug causes the test below to fail, so I'm leaving it
|
||||||
* commented out until the underlying bug in GTSAM is fixed.
|
* commented out until the underlying bug in GTSAM is fixed.
|
||||||
*
|
*
|
||||||
|
@ -681,13 +683,44 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(NoiseModel, lossFunctionAtZero)
|
||||||
|
{
|
||||||
|
const double k = 5.0;
|
||||||
|
auto fair = mEstimator::Fair::Create(k);
|
||||||
|
DOUBLES_EQUAL(fair->loss(0), 0, 1e-8);
|
||||||
|
DOUBLES_EQUAL(fair->weight(0), 1, 1e-8);
|
||||||
|
auto huber = mEstimator::Huber::Create(k);
|
||||||
|
DOUBLES_EQUAL(huber->loss(0), 0, 1e-8);
|
||||||
|
DOUBLES_EQUAL(huber->weight(0), 1, 1e-8);
|
||||||
|
auto cauchy = mEstimator::Cauchy::Create(k);
|
||||||
|
DOUBLES_EQUAL(cauchy->loss(0), 0, 1e-8);
|
||||||
|
DOUBLES_EQUAL(cauchy->weight(0), 1, 1e-8);
|
||||||
|
auto gmc = mEstimator::GemanMcClure::Create(k);
|
||||||
|
DOUBLES_EQUAL(gmc->loss(0), 0, 1e-8);
|
||||||
|
DOUBLES_EQUAL(gmc->weight(0), 1, 1e-8);
|
||||||
|
auto welsch = mEstimator::Welsch::Create(k);
|
||||||
|
DOUBLES_EQUAL(welsch->loss(0), 0, 1e-8);
|
||||||
|
DOUBLES_EQUAL(welsch->weight(0), 1, 1e-8);
|
||||||
|
auto tukey = mEstimator::Tukey::Create(k);
|
||||||
|
DOUBLES_EQUAL(tukey->loss(0), 0, 1e-8);
|
||||||
|
DOUBLES_EQUAL(tukey->weight(0), 1, 1e-8);
|
||||||
|
auto dcs = mEstimator::DCS::Create(k);
|
||||||
|
DOUBLES_EQUAL(dcs->loss(0), 0, 1e-8);
|
||||||
|
DOUBLES_EQUAL(dcs->weight(0), 1, 1e-8);
|
||||||
|
// auto lsdz = mEstimator::L2WithDeadZone::Create(k);
|
||||||
|
// DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
|
||||||
|
// DOUBLES_EQUAL(lsdz->weight(0), 1, 1e-8);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
#define TEST_GAUSSIAN(gaussian)\
|
#define TEST_GAUSSIAN(gaussian)\
|
||||||
EQUALITY(info, gaussian->information());\
|
EQUALITY(info, gaussian->information());\
|
||||||
EQUALITY(cov, gaussian->covariance());\
|
EQUALITY(cov, gaussian->covariance());\
|
||||||
EXPECT(assert_equal(white, gaussian->whiten(e)));\
|
EXPECT(assert_equal(white, gaussian->whiten(e)));\
|
||||||
EXPECT(assert_equal(e, gaussian->unwhiten(white)));\
|
EXPECT(assert_equal(e, gaussian->unwhiten(white)));\
|
||||||
EXPECT_DOUBLES_EQUAL(251, gaussian->distance(e), 1e-9);\
|
EXPECT_DOUBLES_EQUAL(251.0, gaussian->squaredMahalanobisDistance(e), 1e-9);\
|
||||||
|
EXPECT_DOUBLES_EQUAL(0.5 * 251.0, gaussian->loss(251.0), 1e-9);\
|
||||||
Matrix A = R.inverse(); Vector b = e;\
|
Matrix A = R.inverse(); Vector b = e;\
|
||||||
gaussian->WhitenSystem(A, b);\
|
gaussian->WhitenSystem(A, b);\
|
||||||
EXPECT(assert_equal(I, A));\
|
EXPECT(assert_equal(I, A));\
|
||||||
|
|
|
@ -17,9 +17,11 @@
|
||||||
* @author Vadim Indelman
|
* @author Vadim Indelman
|
||||||
* @author David Jensen
|
* @author David Jensen
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
|
* @author Varun Agrawal
|
||||||
**/
|
**/
|
||||||
|
|
||||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||||
|
#include <boost/serialization/export.hpp>
|
||||||
|
|
||||||
/* External or standard includes */
|
/* External or standard includes */
|
||||||
#include <ostream>
|
#include <ostream>
|
||||||
|
@ -28,6 +30,31 @@ namespace gtsam {
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
// Inner class PreintegrationCombinedParams
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
void PreintegrationCombinedParams::print(const string& s) const {
|
||||||
|
PreintegrationParams::print(s);
|
||||||
|
cout << "biasAccCovariance:\n[\n" << biasAccCovariance << "\n]"
|
||||||
|
<< endl;
|
||||||
|
cout << "biasOmegaCovariance:\n[\n" << biasOmegaCovariance << "\n]"
|
||||||
|
<< endl;
|
||||||
|
cout << "biasAccOmegaInt:\n[\n" << biasAccOmegaInt << "\n]"
|
||||||
|
<< endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
bool PreintegrationCombinedParams::equals(const PreintegratedRotationParams& other,
|
||||||
|
double tol) const {
|
||||||
|
auto e = dynamic_cast<const PreintegrationCombinedParams*>(&other);
|
||||||
|
return e != nullptr && PreintegrationParams::equals(other, tol) &&
|
||||||
|
equal_with_abs_tol(biasAccCovariance, e->biasAccCovariance,
|
||||||
|
tol) &&
|
||||||
|
equal_with_abs_tol(biasOmegaCovariance, e->biasOmegaCovariance,
|
||||||
|
tol) &&
|
||||||
|
equal_with_abs_tol(biasAccOmegaInt, e->biasAccOmegaInt, tol);
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Inner class PreintegratedCombinedMeasurements
|
// Inner class PreintegratedCombinedMeasurements
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
@ -242,6 +269,13 @@ Vector CombinedImuFactor::evaluateError(const Pose3& pose_i,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
std::ostream& operator<<(std::ostream& os, const CombinedImuFactor& f) {
|
||||||
|
f._PIM_.print("combined preintegrated measurements:\n");
|
||||||
|
os << " noise model sigmas: " << f.noiseModel_->sigmas().transpose();
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
CombinedImuFactor::CombinedImuFactor(
|
CombinedImuFactor::CombinedImuFactor(
|
||||||
|
@ -279,3 +313,6 @@ void CombinedImuFactor::Predict(const Pose3& pose_i, const Vector3& vel_i,
|
||||||
}
|
}
|
||||||
/// namespace gtsam
|
/// namespace gtsam
|
||||||
|
|
||||||
|
/// Boost serialization export definition for derived class
|
||||||
|
BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams);
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
* @author Vadim Indelman
|
* @author Vadim Indelman
|
||||||
* @author David Jensen
|
* @author David Jensen
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
|
* @author Varun Agrawal
|
||||||
**/
|
**/
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
@ -26,6 +27,7 @@
|
||||||
#include <gtsam/navigation/TangentPreintegration.h>
|
#include <gtsam/navigation/TangentPreintegration.h>
|
||||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||||
#include <gtsam/base/Matrix.h>
|
#include <gtsam/base/Matrix.h>
|
||||||
|
#include <gtsam/base/serialization.h>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
|
@ -61,10 +63,18 @@ struct GTSAM_EXPORT PreintegrationCombinedParams : PreintegrationParams {
|
||||||
Matrix3 biasOmegaCovariance; ///< continuous-time "Covariance" describing gyroscope bias random walk
|
Matrix3 biasOmegaCovariance; ///< continuous-time "Covariance" describing gyroscope bias random walk
|
||||||
Matrix6 biasAccOmegaInt; ///< covariance of bias used for pre-integration
|
Matrix6 biasAccOmegaInt; ///< covariance of bias used for pre-integration
|
||||||
|
|
||||||
|
/// Default constructor makes uninitialized params struct.
|
||||||
|
/// Used for serialization.
|
||||||
|
PreintegrationCombinedParams()
|
||||||
|
: biasAccCovariance(I_3x3),
|
||||||
|
biasOmegaCovariance(I_3x3),
|
||||||
|
biasAccOmegaInt(I_6x6) {}
|
||||||
|
|
||||||
/// See two named constructors below for good values of n_gravity in body frame
|
/// See two named constructors below for good values of n_gravity in body frame
|
||||||
PreintegrationCombinedParams(const Vector3& n_gravity) :
|
PreintegrationCombinedParams(const Vector3& n_gravity) :
|
||||||
PreintegrationParams(n_gravity), biasAccCovariance(I_3x3), biasOmegaCovariance(
|
PreintegrationParams(n_gravity), biasAccCovariance(I_3x3),
|
||||||
I_3x3), biasAccOmegaInt(I_6x6) {
|
biasOmegaCovariance(I_3x3), biasAccOmegaInt(I_6x6) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default Params for a Z-down navigation frame, such as NED: gravity points along positive Z-axis
|
// Default Params for a Z-down navigation frame, such as NED: gravity points along positive Z-axis
|
||||||
|
@ -77,6 +87,9 @@ PreintegrationCombinedParams(const Vector3& n_gravity) :
|
||||||
return boost::shared_ptr<PreintegrationCombinedParams>(new PreintegrationCombinedParams(Vector3(0, 0, -g)));
|
return boost::shared_ptr<PreintegrationCombinedParams>(new PreintegrationCombinedParams(Vector3(0, 0, -g)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void print(const std::string& s="") const;
|
||||||
|
bool equals(const PreintegratedRotationParams& other, double tol) const;
|
||||||
|
|
||||||
void setBiasAccCovariance(const Matrix3& cov) { biasAccCovariance=cov; }
|
void setBiasAccCovariance(const Matrix3& cov) { biasAccCovariance=cov; }
|
||||||
void setBiasOmegaCovariance(const Matrix3& cov) { biasOmegaCovariance=cov; }
|
void setBiasOmegaCovariance(const Matrix3& cov) { biasOmegaCovariance=cov; }
|
||||||
void setBiasAccOmegaInt(const Matrix6& cov) { biasAccOmegaInt=cov; }
|
void setBiasAccOmegaInt(const Matrix6& cov) { biasAccOmegaInt=cov; }
|
||||||
|
@ -86,24 +99,22 @@ PreintegrationCombinedParams(const Vector3& n_gravity) :
|
||||||
const Matrix6& getBiasAccOmegaInt() const { return biasAccOmegaInt; }
|
const Matrix6& getBiasAccOmegaInt() const { return biasAccOmegaInt; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Default constructor makes unitialized params struct
|
|
||||||
PreintegrationCombinedParams() {}
|
|
||||||
|
|
||||||
/** Serialization function */
|
/** Serialization function */
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template <class ARCHIVE>
|
template <class ARCHIVE>
|
||||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||||
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegratedRotation::Params);
|
namespace bs = ::boost::serialization;
|
||||||
ar& BOOST_SERIALIZATION_NVP(biasAccCovariance);
|
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationParams);
|
||||||
ar& BOOST_SERIALIZATION_NVP(biasOmegaCovariance);
|
ar & BOOST_SERIALIZATION_NVP(biasAccCovariance);
|
||||||
ar& BOOST_SERIALIZATION_NVP(biasAccOmegaInt);
|
ar & BOOST_SERIALIZATION_NVP(biasOmegaCovariance);
|
||||||
|
ar & BOOST_SERIALIZATION_NVP(biasAccOmegaInt);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* PreintegratedCombinedMeasurements integrates the IMU measurements
|
* PreintegratedCombinedMeasurements integrates the IMU measurements
|
||||||
* (rotation rates and accelerations) and the corresponding covariance matrix.
|
* (rotation rates and accelerations) and the corresponding covariance matrix.
|
||||||
|
@ -128,7 +139,6 @@ public:
|
||||||
*/
|
*/
|
||||||
Eigen::Matrix<double, 15, 15> preintMeasCov_;
|
Eigen::Matrix<double, 15, 15> preintMeasCov_;
|
||||||
|
|
||||||
|
|
||||||
friend class CombinedImuFactor;
|
friend class CombinedImuFactor;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -136,11 +146,14 @@ public:
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// Default constructor only for serialization and Cython wrapper
|
/// Default constructor only for serialization and Cython wrapper
|
||||||
PreintegratedCombinedMeasurements() {}
|
PreintegratedCombinedMeasurements() {
|
||||||
|
preintMeasCov_.setZero();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default constructor, initializes the class with no measurements
|
* Default constructor, initializes the class with no measurements
|
||||||
* @param bias Current estimate of acceleration and rotation rate biases
|
* @param p Parameters, typically fixed in a single application
|
||||||
|
* @param biasHat Current estimate of acceleration and rotation rate biases
|
||||||
*/
|
*/
|
||||||
PreintegratedCombinedMeasurements(
|
PreintegratedCombinedMeasurements(
|
||||||
const boost::shared_ptr<Params>& p,
|
const boost::shared_ptr<Params>& p,
|
||||||
|
@ -149,6 +162,19 @@ public:
|
||||||
preintMeasCov_.setZero();
|
preintMeasCov_.setZero();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct preintegrated directly from members: base class and preintMeasCov
|
||||||
|
* @param base PreintegrationType instance
|
||||||
|
* @param preintMeasCov Covariance matrix used in noise model.
|
||||||
|
*/
|
||||||
|
PreintegratedCombinedMeasurements(const PreintegrationType& base, const Eigen::Matrix<double, 15, 15>& preintMeasCov)
|
||||||
|
: PreintegrationType(base),
|
||||||
|
preintMeasCov_(preintMeasCov) {
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Virtual destructor
|
||||||
|
virtual ~PreintegratedCombinedMeasurements() {}
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
/// @name Basic utilities
|
/// @name Basic utilities
|
||||||
|
@ -158,20 +184,25 @@ public:
|
||||||
void resetIntegration() override;
|
void resetIntegration() override;
|
||||||
|
|
||||||
/// const reference to params, shadows definition in base class
|
/// const reference to params, shadows definition in base class
|
||||||
Params& p() const { return *boost::static_pointer_cast<Params>(this->p_);}
|
Params& p() const { return *boost::static_pointer_cast<Params>(this->p_); }
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
/// @name Access instance variables
|
/// @name Access instance variables
|
||||||
/// @{
|
/// @{
|
||||||
|
/// Return pre-integrated measurement covariance
|
||||||
Matrix preintMeasCov() const { return preintMeasCov_; }
|
Matrix preintMeasCov() const { return preintMeasCov_; }
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
/// @name Testable
|
/// @name Testable
|
||||||
/// @{
|
/// @{
|
||||||
|
/// print
|
||||||
void print(const std::string& s = "Preintegrated Measurements:") const override;
|
void print(const std::string& s = "Preintegrated Measurements:") const override;
|
||||||
bool equals(const PreintegratedCombinedMeasurements& expected, double tol = 1e-9) const;
|
/// equals
|
||||||
|
bool equals(const PreintegratedCombinedMeasurements& expected,
|
||||||
|
double tol = 1e-9) const;
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
|
|
||||||
/// @name Main functionality
|
/// @name Main functionality
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
@ -205,6 +236,7 @@ public:
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template <class ARCHIVE>
|
template <class ARCHIVE>
|
||||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||||
|
namespace bs = ::boost::serialization;
|
||||||
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationType);
|
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationType);
|
||||||
ar& BOOST_SERIALIZATION_NVP(preintMeasCov_);
|
ar& BOOST_SERIALIZATION_NVP(preintMeasCov_);
|
||||||
}
|
}
|
||||||
|
@ -244,9 +276,6 @@ private:
|
||||||
|
|
||||||
PreintegratedCombinedMeasurements _PIM_;
|
PreintegratedCombinedMeasurements _PIM_;
|
||||||
|
|
||||||
/** Default constructor - only use for serialization */
|
|
||||||
CombinedImuFactor() {}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/** Shorthand for a smart pointer to a factor */
|
/** Shorthand for a smart pointer to a factor */
|
||||||
|
@ -256,6 +285,9 @@ public:
|
||||||
typedef boost::shared_ptr<CombinedImuFactor> shared_ptr;
|
typedef boost::shared_ptr<CombinedImuFactor> shared_ptr;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/** Default constructor - only use for serialization */
|
||||||
|
CombinedImuFactor() {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
* @param pose_i Previous pose key
|
* @param pose_i Previous pose key
|
||||||
|
@ -277,12 +309,17 @@ public:
|
||||||
|
|
||||||
/** implement functions needed for Testable */
|
/** implement functions needed for Testable */
|
||||||
|
|
||||||
|
/// @name Testable
|
||||||
|
/// @{
|
||||||
|
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
|
||||||
|
const CombinedImuFactor&);
|
||||||
/// print
|
/// print
|
||||||
virtual void print(const std::string& s, const KeyFormatter& keyFormatter =
|
virtual void print(const std::string& s, const KeyFormatter& keyFormatter =
|
||||||
DefaultKeyFormatter) const;
|
DefaultKeyFormatter) const;
|
||||||
|
|
||||||
/// equals
|
/// equals
|
||||||
virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const;
|
virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const;
|
||||||
|
/// @}
|
||||||
|
|
||||||
/** Access the preintegrated measurements. */
|
/** Access the preintegrated measurements. */
|
||||||
|
|
||||||
|
@ -321,14 +358,12 @@ public:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
/** Serialization function */
|
/** Serialization function */
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template<class ARCHIVE>
|
template <class ARCHIVE>
|
||||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||||
ar & boost::serialization::make_nvp("NoiseModelFactor6",
|
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(NoiseModelFactor6);
|
||||||
boost::serialization::base_object<Base>(*this));
|
ar& BOOST_SERIALIZATION_NVP(_PIM_);
|
||||||
ar & BOOST_SERIALIZATION_NVP(_PIM_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -336,4 +371,18 @@ public:
|
||||||
};
|
};
|
||||||
// class CombinedImuFactor
|
// class CombinedImuFactor
|
||||||
|
|
||||||
} /// namespace gtsam
|
template <>
|
||||||
|
struct traits<PreintegrationCombinedParams>
|
||||||
|
: public Testable<PreintegrationCombinedParams> {};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct traits<PreintegratedCombinedMeasurements>
|
||||||
|
: public Testable<PreintegratedCombinedMeasurements> {};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct traits<CombinedImuFactor> : public Testable<CombinedImuFactor> {};
|
||||||
|
|
||||||
|
} // namespace gtsam
|
||||||
|
|
||||||
|
/// Add Boost serialization export key (declaration) for derived class
|
||||||
|
BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams);
|
||||||
|
|
|
@ -59,7 +59,7 @@ typedef ManifoldPreintegration PreintegrationType;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* PreintegratedIMUMeasurements accumulates (integrates) the IMU measurements
|
* PreintegratedImuMeasurements accumulates (integrates) the IMU measurements
|
||||||
* (rotation rates and accelerations) and the corresponding covariance matrix.
|
* (rotation rates and accelerations) and the corresponding covariance matrix.
|
||||||
* The measurements are then used to build the Preintegrated IMU factor.
|
* The measurements are then used to build the Preintegrated IMU factor.
|
||||||
* Integration is done incrementally (ideally, one integrates the measurement
|
* Integration is done incrementally (ideally, one integrates the measurement
|
||||||
|
@ -87,8 +87,8 @@ public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor, initializes the class with no measurements
|
* Constructor, initializes the class with no measurements
|
||||||
* @param bias Current estimate of acceleration and rotation rate biases
|
* @param p Parameters, typically fixed in a single application
|
||||||
* @param p Parameters, typically fixed in a single application
|
* @param biasHat Current estimate of acceleration and rotation rate biases
|
||||||
*/
|
*/
|
||||||
PreintegratedImuMeasurements(const boost::shared_ptr<PreintegrationParams>& p,
|
PreintegratedImuMeasurements(const boost::shared_ptr<PreintegrationParams>& p,
|
||||||
const imuBias::ConstantBias& biasHat = imuBias::ConstantBias()) :
|
const imuBias::ConstantBias& biasHat = imuBias::ConstantBias()) :
|
||||||
|
@ -164,7 +164,7 @@ private:
|
||||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
||||||
namespace bs = ::boost::serialization;
|
namespace bs = ::boost::serialization;
|
||||||
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationType);
|
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationType);
|
||||||
ar & bs::make_nvp("preintMeasCov_", bs::make_array(preintMeasCov_.data(), preintMeasCov_.size()));
|
ar & BOOST_SERIALIZATION_NVP(preintMeasCov_);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -118,15 +118,13 @@ private:
|
||||||
template<class ARCHIVE>
|
template<class ARCHIVE>
|
||||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
||||||
namespace bs = ::boost::serialization;
|
namespace bs = ::boost::serialization;
|
||||||
ar & BOOST_SERIALIZATION_NVP(p_);
|
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationBase);
|
||||||
ar & BOOST_SERIALIZATION_NVP(deltaTij_);
|
|
||||||
ar & BOOST_SERIALIZATION_NVP(deltaXij_);
|
ar & BOOST_SERIALIZATION_NVP(deltaXij_);
|
||||||
ar & BOOST_SERIALIZATION_NVP(biasHat_);
|
ar & BOOST_SERIALIZATION_NVP(delRdelBiasOmega_);
|
||||||
ar & bs::make_nvp("delRdelBiasOmega_", bs::make_array(delRdelBiasOmega_.data(), delRdelBiasOmega_.size()));
|
ar & BOOST_SERIALIZATION_NVP(delPdelBiasAcc_);
|
||||||
ar & bs::make_nvp("delPdelBiasAcc_", bs::make_array(delPdelBiasAcc_.data(), delPdelBiasAcc_.size()));
|
ar & BOOST_SERIALIZATION_NVP(delPdelBiasOmega_);
|
||||||
ar & bs::make_nvp("delPdelBiasOmega_", bs::make_array(delPdelBiasOmega_.data(), delPdelBiasOmega_.size()));
|
ar & BOOST_SERIALIZATION_NVP(delVdelBiasAcc_);
|
||||||
ar & bs::make_nvp("delVdelBiasAcc_", bs::make_array(delVdelBiasAcc_.data(), delVdelBiasAcc_.size()));
|
ar & BOOST_SERIALIZATION_NVP(delVdelBiasOmega_);
|
||||||
ar & bs::make_nvp("delVdelBiasOmega_", bs::make_array(delVdelBiasOmega_.data(), delVdelBiasOmega_.size()));
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ using namespace std;
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
void PreintegratedRotation::Params::print(const string& s) const {
|
void PreintegratedRotationParams::print(const string& s) const {
|
||||||
cout << s << endl;
|
cout << s << endl;
|
||||||
cout << "gyroscopeCovariance:\n[\n" << gyroscopeCovariance << "\n]" << endl;
|
cout << "gyroscopeCovariance:\n[\n" << gyroscopeCovariance << "\n]" << endl;
|
||||||
if (omegaCoriolis)
|
if (omegaCoriolis)
|
||||||
|
@ -34,8 +34,8 @@ void PreintegratedRotation::Params::print(const string& s) const {
|
||||||
body_P_sensor->print("body_P_sensor");
|
body_P_sensor->print("body_P_sensor");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PreintegratedRotation::Params::equals(
|
bool PreintegratedRotationParams::equals(
|
||||||
const PreintegratedRotation::Params& other, double tol) const {
|
const PreintegratedRotationParams& other, double tol) const {
|
||||||
if (body_P_sensor) {
|
if (body_P_sensor) {
|
||||||
if (!other.body_P_sensor
|
if (!other.body_P_sensor
|
||||||
|| !assert_equal(*body_P_sensor, *other.body_P_sensor, tol))
|
|| !assert_equal(*body_P_sensor, *other.body_P_sensor, tol))
|
||||||
|
|
|
@ -61,9 +61,15 @@ struct GTSAM_EXPORT PreintegratedRotationParams {
|
||||||
template<class ARCHIVE>
|
template<class ARCHIVE>
|
||||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
||||||
namespace bs = ::boost::serialization;
|
namespace bs = ::boost::serialization;
|
||||||
ar & bs::make_nvp("gyroscopeCovariance", bs::make_array(gyroscopeCovariance.data(), gyroscopeCovariance.size()));
|
ar & BOOST_SERIALIZATION_NVP(gyroscopeCovariance);
|
||||||
ar & BOOST_SERIALIZATION_NVP(omegaCoriolis);
|
|
||||||
ar & BOOST_SERIALIZATION_NVP(body_P_sensor);
|
ar & BOOST_SERIALIZATION_NVP(body_P_sensor);
|
||||||
|
|
||||||
|
// Provide support for Eigen::Matrix in boost::optional
|
||||||
|
bool omegaCoriolisFlag = omegaCoriolis.is_initialized();
|
||||||
|
ar & boost::serialization::make_nvp("omegaCoriolisFlag", omegaCoriolisFlag);
|
||||||
|
if (omegaCoriolisFlag) {
|
||||||
|
ar & BOOST_SERIALIZATION_NVP(*omegaCoriolis);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef GTSAM_USE_QUATERNIONS
|
#ifdef GTSAM_USE_QUATERNIONS
|
||||||
|
|
|
@ -213,6 +213,16 @@ class GTSAM_EXPORT PreintegrationBase {
|
||||||
/// @}
|
/// @}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
private:
|
||||||
|
/** Serialization function */
|
||||||
|
friend class boost::serialization::access;
|
||||||
|
template<class ARCHIVE>
|
||||||
|
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
||||||
|
ar & BOOST_SERIALIZATION_NVP(p_);
|
||||||
|
ar & BOOST_SERIALIZATION_NVP(biasHat_);
|
||||||
|
ar & BOOST_SERIALIZATION_NVP(deltaTij_);
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
};
|
};
|
||||||
|
|
|
@ -27,7 +27,7 @@ namespace gtsam {
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
void PreintegrationParams::print(const string& s) const {
|
void PreintegrationParams::print(const string& s) const {
|
||||||
PreintegratedRotation::Params::print(s);
|
PreintegratedRotationParams::print(s);
|
||||||
cout << "accelerometerCovariance:\n[\n" << accelerometerCovariance << "\n]"
|
cout << "accelerometerCovariance:\n[\n" << accelerometerCovariance << "\n]"
|
||||||
<< endl;
|
<< endl;
|
||||||
cout << "integrationCovariance:\n[\n" << integrationCovariance << "\n]"
|
cout << "integrationCovariance:\n[\n" << integrationCovariance << "\n]"
|
||||||
|
@ -39,10 +39,10 @@ void PreintegrationParams::print(const string& s) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
bool PreintegrationParams::equals(const PreintegratedRotation::Params& other,
|
bool PreintegrationParams::equals(const PreintegratedRotationParams& other,
|
||||||
double tol) const {
|
double tol) const {
|
||||||
auto e = dynamic_cast<const PreintegrationParams*>(&other);
|
auto e = dynamic_cast<const PreintegrationParams*>(&other);
|
||||||
return e != nullptr && PreintegratedRotation::Params::equals(other, tol) &&
|
return e != nullptr && PreintegratedRotationParams::equals(other, tol) &&
|
||||||
use2ndOrderCoriolis == e->use2ndOrderCoriolis &&
|
use2ndOrderCoriolis == e->use2ndOrderCoriolis &&
|
||||||
equal_with_abs_tol(accelerometerCovariance, e->accelerometerCovariance,
|
equal_with_abs_tol(accelerometerCovariance, e->accelerometerCovariance,
|
||||||
tol) &&
|
tol) &&
|
||||||
|
|
|
@ -31,7 +31,8 @@ struct GTSAM_EXPORT PreintegrationParams: PreintegratedRotationParams {
|
||||||
|
|
||||||
/// Default constructor for serialization only
|
/// Default constructor for serialization only
|
||||||
PreintegrationParams()
|
PreintegrationParams()
|
||||||
: accelerometerCovariance(I_3x3),
|
: PreintegratedRotationParams(),
|
||||||
|
accelerometerCovariance(I_3x3),
|
||||||
integrationCovariance(I_3x3),
|
integrationCovariance(I_3x3),
|
||||||
use2ndOrderCoriolis(false),
|
use2ndOrderCoriolis(false),
|
||||||
n_gravity(0, 0, -1) {}
|
n_gravity(0, 0, -1) {}
|
||||||
|
@ -39,7 +40,8 @@ struct GTSAM_EXPORT PreintegrationParams: PreintegratedRotationParams {
|
||||||
/// The Params constructor insists on getting the navigation frame gravity vector
|
/// The Params constructor insists on getting the navigation frame gravity vector
|
||||||
/// For convenience, two commonly used conventions are provided by named constructors below
|
/// For convenience, two commonly used conventions are provided by named constructors below
|
||||||
PreintegrationParams(const Vector3& n_gravity)
|
PreintegrationParams(const Vector3& n_gravity)
|
||||||
: accelerometerCovariance(I_3x3),
|
: PreintegratedRotationParams(),
|
||||||
|
accelerometerCovariance(I_3x3),
|
||||||
integrationCovariance(I_3x3),
|
integrationCovariance(I_3x3),
|
||||||
use2ndOrderCoriolis(false),
|
use2ndOrderCoriolis(false),
|
||||||
n_gravity(n_gravity) {}
|
n_gravity(n_gravity) {}
|
||||||
|
@ -54,8 +56,8 @@ struct GTSAM_EXPORT PreintegrationParams: PreintegratedRotationParams {
|
||||||
return boost::shared_ptr<PreintegrationParams>(new PreintegrationParams(Vector3(0, 0, -g)));
|
return boost::shared_ptr<PreintegrationParams>(new PreintegrationParams(Vector3(0, 0, -g)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void print(const std::string& s) const;
|
void print(const std::string& s="") const;
|
||||||
bool equals(const PreintegratedRotation::Params& other, double tol) const;
|
bool equals(const PreintegratedRotationParams& other, double tol) const;
|
||||||
|
|
||||||
void setAccelerometerCovariance(const Matrix3& cov) { accelerometerCovariance = cov; }
|
void setAccelerometerCovariance(const Matrix3& cov) { accelerometerCovariance = cov; }
|
||||||
void setIntegrationCovariance(const Matrix3& cov) { integrationCovariance = cov; }
|
void setIntegrationCovariance(const Matrix3& cov) { integrationCovariance = cov; }
|
||||||
|
@ -73,10 +75,9 @@ protected:
|
||||||
template<class ARCHIVE>
|
template<class ARCHIVE>
|
||||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
||||||
namespace bs = ::boost::serialization;
|
namespace bs = ::boost::serialization;
|
||||||
ar & boost::serialization::make_nvp("PreintegratedRotation_Params",
|
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegratedRotationParams);
|
||||||
boost::serialization::base_object<PreintegratedRotationParams>(*this));
|
ar & BOOST_SERIALIZATION_NVP(accelerometerCovariance);
|
||||||
ar & bs::make_nvp("accelerometerCovariance", bs::make_array(accelerometerCovariance.data(), accelerometerCovariance.size()));
|
ar & BOOST_SERIALIZATION_NVP(integrationCovariance);
|
||||||
ar & bs::make_nvp("integrationCovariance", bs::make_array(integrationCovariance.data(), integrationCovariance.size()));
|
|
||||||
ar & BOOST_SERIALIZATION_NVP(use2ndOrderCoriolis);
|
ar & BOOST_SERIALIZATION_NVP(use2ndOrderCoriolis);
|
||||||
ar & BOOST_SERIALIZATION_NVP(n_gravity);
|
ar & BOOST_SERIALIZATION_NVP(n_gravity);
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ Vector9 TangentPreintegration::UpdatePreintegrated(const Vector3& a_body,
|
||||||
Matrix3 w_tangent_H_theta, invH;
|
Matrix3 w_tangent_H_theta, invH;
|
||||||
const Vector3 w_tangent = // angular velocity mapped back to tangent space
|
const Vector3 w_tangent = // angular velocity mapped back to tangent space
|
||||||
local.applyInvDexp(w_body, A ? &w_tangent_H_theta : 0, C ? &invH : 0);
|
local.applyInvDexp(w_body, A ? &w_tangent_H_theta : 0, C ? &invH : 0);
|
||||||
const Rot3 R(local.expmap());
|
const Rot3 R(local.expmap()); // nRb: rotation of body in nav frame
|
||||||
const Vector3 a_nav = R * a_body;
|
const Vector3 a_nav = R * a_body;
|
||||||
const double dt22 = 0.5 * dt * dt;
|
const double dt22 = 0.5 * dt * dt;
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ void TangentPreintegration::update(const Vector3& measuredAcc,
|
||||||
Vector3 acc = biasHat_.correctAccelerometer(measuredAcc);
|
Vector3 acc = biasHat_.correctAccelerometer(measuredAcc);
|
||||||
Vector3 omega = biasHat_.correctGyroscope(measuredOmega);
|
Vector3 omega = biasHat_.correctGyroscope(measuredOmega);
|
||||||
|
|
||||||
// Possibly correct for sensor pose
|
// Possibly correct for sensor pose by converting to body frame
|
||||||
Matrix3 D_correctedAcc_acc, D_correctedAcc_omega, D_correctedOmega_omega;
|
Matrix3 D_correctedAcc_acc, D_correctedAcc_omega, D_correctedOmega_omega;
|
||||||
if (p().body_P_sensor)
|
if (p().body_P_sensor)
|
||||||
boost::tie(acc, omega) = correctMeasurementsBySensorPose(acc, omega,
|
boost::tie(acc, omega) = correctMeasurementsBySensorPose(acc, omega,
|
||||||
|
|
|
@ -132,12 +132,10 @@ private:
|
||||||
template<class ARCHIVE>
|
template<class ARCHIVE>
|
||||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
||||||
namespace bs = ::boost::serialization;
|
namespace bs = ::boost::serialization;
|
||||||
ar & BOOST_SERIALIZATION_NVP(p_);
|
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationBase);
|
||||||
ar & BOOST_SERIALIZATION_NVP(biasHat_);
|
ar & BOOST_SERIALIZATION_NVP(preintegrated_);
|
||||||
ar & BOOST_SERIALIZATION_NVP(deltaTij_);
|
ar & BOOST_SERIALIZATION_NVP(preintegrated_H_biasAcc_);
|
||||||
ar & bs::make_nvp("preintegrated_", bs::make_array(preintegrated_.data(), preintegrated_.size()));
|
ar & BOOST_SERIALIZATION_NVP(preintegrated_H_biasOmega_);
|
||||||
ar & bs::make_nvp("preintegrated_H_biasAcc_", bs::make_array(preintegrated_H_biasAcc_.data(), preintegrated_H_biasAcc_.size()));
|
|
||||||
ar & bs::make_nvp("preintegrated_H_biasOmega_", bs::make_array(preintegrated_H_biasOmega_.data(), preintegrated_H_biasOmega_.size()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -16,15 +16,19 @@
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
* @author Richard Roberts
|
* @author Richard Roberts
|
||||||
* @author Stephen Williams
|
* @author Stephen Williams
|
||||||
|
* @author Varun Agrawal
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <gtsam/navigation/ImuFactor.h>
|
|
||||||
#include <gtsam/base/serializationTestHelpers.h>
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
#include <gtsam/base/serializationTestHelpers.h>
|
||||||
|
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||||
|
#include <gtsam/navigation/ImuFactor.h>
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
using namespace gtsam::serializationTestHelpers;
|
||||||
|
|
||||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained,
|
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained,
|
||||||
"gtsam_noiseModel_Constrained");
|
"gtsam_noiseModel_Constrained");
|
||||||
|
@ -38,23 +42,23 @@ BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,
|
||||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||||
|
|
||||||
TEST(ImuFactor, serialization) {
|
template <typename P>
|
||||||
using namespace gtsam::serializationTestHelpers;
|
P getPreintegratedMeasurements() {
|
||||||
|
|
||||||
// Create default parameters with Z-down and above noise paramaters
|
// Create default parameters with Z-down and above noise paramaters
|
||||||
auto p = PreintegrationParams::MakeSharedD(9.81);
|
auto p = P::Params::MakeSharedD(9.81);
|
||||||
p->body_P_sensor = Pose3(Rot3::Ypr(0, 0, M_PI), Point3(0,0,0));
|
p->body_P_sensor = Pose3(Rot3::Ypr(0, 0, M_PI), Point3(0, 0, 0));
|
||||||
p->accelerometerCovariance = 1e-7 * I_3x3;
|
p->accelerometerCovariance = 1e-7 * I_3x3;
|
||||||
p->gyroscopeCovariance = 1e-8 * I_3x3;
|
p->gyroscopeCovariance = 1e-8 * I_3x3;
|
||||||
p->integrationCovariance = 1e-9 * I_3x3;
|
p->integrationCovariance = 1e-9 * I_3x3;
|
||||||
|
|
||||||
const double deltaT = 0.005;
|
const double deltaT = 0.005;
|
||||||
const imuBias::ConstantBias priorBias(
|
|
||||||
Vector3(0, 0, 0), Vector3(0, 0.01, 0)); // Biases (acc, rot)
|
|
||||||
|
|
||||||
PreintegratedImuMeasurements pim(p, priorBias);
|
// Biases (acc, rot)
|
||||||
|
const imuBias::ConstantBias priorBias(Vector3(0, 0, 0), Vector3(0, 0.01, 0));
|
||||||
|
|
||||||
// measurements are needed for non-inf noise model, otherwise will throw err
|
P pim(p, priorBias);
|
||||||
|
|
||||||
|
// measurements are needed for non-inf noise model, otherwise will throw error
|
||||||
// when deserialize
|
// when deserialize
|
||||||
const Vector3 measuredOmega(0, 0.01, 0);
|
const Vector3 measuredOmega(0, 0.01, 0);
|
||||||
const Vector3 measuredAcc(0, 0, -9.81);
|
const Vector3 measuredAcc(0, 0, -9.81);
|
||||||
|
@ -62,6 +66,16 @@ TEST(ImuFactor, serialization) {
|
||||||
for (int j = 0; j < 200; ++j)
|
for (int j = 0; j < 200; ++j)
|
||||||
pim.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
pim.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||||
|
|
||||||
|
return pim;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(ImuFactor, serialization) {
|
||||||
|
auto pim = getPreintegratedMeasurements<PreintegratedImuMeasurements>();
|
||||||
|
|
||||||
|
EXPECT(equalsObj<PreintegratedImuMeasurements>(pim));
|
||||||
|
EXPECT(equalsXML<PreintegratedImuMeasurements>(pim));
|
||||||
|
EXPECT(equalsBinary<PreintegratedImuMeasurements>(pim));
|
||||||
|
|
||||||
ImuFactor factor(1, 2, 3, 4, 5, pim);
|
ImuFactor factor(1, 2, 3, 4, 5, pim);
|
||||||
|
|
||||||
EXPECT(equalsObj<ImuFactor>(factor));
|
EXPECT(equalsObj<ImuFactor>(factor));
|
||||||
|
@ -69,6 +83,30 @@ TEST(ImuFactor, serialization) {
|
||||||
EXPECT(equalsBinary<ImuFactor>(factor));
|
EXPECT(equalsBinary<ImuFactor>(factor));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(ImuFactor2, serialization) {
|
||||||
|
auto pim = getPreintegratedMeasurements<PreintegratedImuMeasurements>();
|
||||||
|
|
||||||
|
ImuFactor2 factor(1, 2, 3, pim);
|
||||||
|
|
||||||
|
EXPECT(equalsObj<ImuFactor2>(factor));
|
||||||
|
EXPECT(equalsXML<ImuFactor2>(factor));
|
||||||
|
EXPECT(equalsBinary<ImuFactor2>(factor));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(CombinedImuFactor, Serialization) {
|
||||||
|
auto pim = getPreintegratedMeasurements<PreintegratedCombinedMeasurements>();
|
||||||
|
|
||||||
|
EXPECT(equalsObj<PreintegratedCombinedMeasurements>(pim));
|
||||||
|
EXPECT(equalsXML<PreintegratedCombinedMeasurements>(pim));
|
||||||
|
EXPECT(equalsBinary<PreintegratedCombinedMeasurements>(pim));
|
||||||
|
|
||||||
|
const CombinedImuFactor factor(1, 2, 3, 4, 5, 6, pim);
|
||||||
|
|
||||||
|
EXPECT(equalsObj<CombinedImuFactor>(factor));
|
||||||
|
EXPECT(equalsXML<CombinedImuFactor>(factor));
|
||||||
|
EXPECT(equalsBinary<CombinedImuFactor>(factor));
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() {
|
int main() {
|
||||||
TestResult tr;
|
TestResult tr;
|
||||||
|
|
|
@ -57,7 +57,7 @@ class AdaptAutoDiff {
|
||||||
if (H1 || H2) {
|
if (H1 || H2) {
|
||||||
// Get derivatives with AutoDiff
|
// Get derivatives with AutoDiff
|
||||||
const double* parameters[] = {v1.data(), v2.data()};
|
const double* parameters[] = {v1.data(), v2.data()};
|
||||||
double rowMajor1[M * N1], rowMajor2[M * N2]; // on the stack
|
double rowMajor1[M * N1] = {}, rowMajor2[M * N2] = {}; // on the stack
|
||||||
double* jacobians[] = {rowMajor1, rowMajor2};
|
double* jacobians[] = {rowMajor1, rowMajor2};
|
||||||
success = AutoDiff<FUNCTOR, double, N1, N2>::Differentiate(
|
success = AutoDiff<FUNCTOR, double, N1, N2>::Differentiate(
|
||||||
f, parameters, M, result.data(), jacobians);
|
f, parameters, M, result.data(), jacobians);
|
||||||
|
|
|
@ -0,0 +1,148 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file FunctorizedFactor.h
|
||||||
|
* @date May 31, 2020
|
||||||
|
* @author Varun Agrawal
|
||||||
|
**/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gtsam/base/Testable.h>
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factor which evaluates provided unary functor and uses the result to compute
|
||||||
|
* error with respect to the provided measurement.
|
||||||
|
*
|
||||||
|
* Template parameters are
|
||||||
|
* @param R: The return type of the functor after evaluation.
|
||||||
|
* @param T: The argument type for the functor.
|
||||||
|
*
|
||||||
|
* Example:
|
||||||
|
* Key key = Symbol('X', 0);
|
||||||
|
* auto model = noiseModel::Isotropic::Sigma(9, 1);
|
||||||
|
*
|
||||||
|
* /// Functor that takes a matrix and multiplies every element by m
|
||||||
|
* class MultiplyFunctor {
|
||||||
|
* double m_; ///< simple multiplier
|
||||||
|
* public:
|
||||||
|
* MultiplyFunctor(double m) : m_(m) {}
|
||||||
|
* Matrix operator()(const Matrix &X,
|
||||||
|
* OptionalJacobian<-1, -1> H = boost::none) const {
|
||||||
|
* if (H)
|
||||||
|
* *H = m_ * Matrix::Identity(X.rows()*X.cols(), X.rows()*X.cols());
|
||||||
|
* return m_ * X;
|
||||||
|
* }
|
||||||
|
* };
|
||||||
|
*
|
||||||
|
* Matrix measurement = Matrix::Identity(3, 3);
|
||||||
|
* double multiplier = 2.0;
|
||||||
|
*
|
||||||
|
* FunctorizedFactor<Matrix, Matrix> factor(keyX, measurement, model,
|
||||||
|
* MultiplyFunctor(multiplier));
|
||||||
|
*/
|
||||||
|
template <typename R, typename T>
|
||||||
|
class GTSAM_EXPORT FunctorizedFactor : public NoiseModelFactor1<T> {
|
||||||
|
private:
|
||||||
|
using Base = NoiseModelFactor1<T>;
|
||||||
|
|
||||||
|
R measured_; ///< value that is compared with functor return value
|
||||||
|
SharedNoiseModel noiseModel_; ///< noise model
|
||||||
|
std::function<R(T, boost::optional<Matrix &>)> func_; ///< functor instance
|
||||||
|
|
||||||
|
public:
|
||||||
|
/** default constructor - only use for serialization */
|
||||||
|
FunctorizedFactor() {}
|
||||||
|
|
||||||
|
/** Construct with given x and the parameters of the basis
|
||||||
|
*
|
||||||
|
* @param key: Factor key
|
||||||
|
* @param z: Measurement object of same type as that returned by functor
|
||||||
|
* @param model: Noise model
|
||||||
|
* @param func: The instance of the functor object
|
||||||
|
*/
|
||||||
|
FunctorizedFactor(Key key, const R &z, const SharedNoiseModel &model,
|
||||||
|
const std::function<R(T, boost::optional<Matrix &>)> func)
|
||||||
|
: Base(model, key), measured_(z), noiseModel_(model), func_(func) {}
|
||||||
|
|
||||||
|
virtual ~FunctorizedFactor() {}
|
||||||
|
|
||||||
|
/// @return a deep copy of this factor
|
||||||
|
virtual NonlinearFactor::shared_ptr clone() const {
|
||||||
|
return boost::static_pointer_cast<NonlinearFactor>(
|
||||||
|
NonlinearFactor::shared_ptr(new FunctorizedFactor<R, T>(*this)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Vector evaluateError(const T ¶ms,
|
||||||
|
boost::optional<Matrix &> H = boost::none) const {
|
||||||
|
R x = func_(params, H);
|
||||||
|
Vector error = traits<R>::Local(measured_, x);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @name Testable
|
||||||
|
/// @{
|
||||||
|
void print(const std::string &s = "",
|
||||||
|
const KeyFormatter &keyFormatter = DefaultKeyFormatter) const {
|
||||||
|
Base::print(s, keyFormatter);
|
||||||
|
std::cout << s << (s != "" ? " " : "") << "FunctorizedFactor("
|
||||||
|
<< keyFormatter(this->key()) << ")" << std::endl;
|
||||||
|
traits<R>::Print(measured_, " measurement: ");
|
||||||
|
std::cout << " noise model sigmas: " << noiseModel_->sigmas().transpose()
|
||||||
|
<< std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual bool equals(const NonlinearFactor &other, double tol = 1e-9) const {
|
||||||
|
const FunctorizedFactor<R, T> *e =
|
||||||
|
dynamic_cast<const FunctorizedFactor<R, T> *>(&other);
|
||||||
|
const bool base = Base::equals(*e, tol);
|
||||||
|
return e && Base::equals(other, tol) &&
|
||||||
|
traits<R>::Equals(this->measured_, e->measured_, tol);
|
||||||
|
}
|
||||||
|
/// @}
|
||||||
|
|
||||||
|
private:
|
||||||
|
/** Serialization function */
|
||||||
|
friend class boost::serialization::access;
|
||||||
|
template <class ARCHIVE>
|
||||||
|
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
|
||||||
|
ar &boost::serialization::make_nvp(
|
||||||
|
"NoiseModelFactor1", boost::serialization::base_object<Base>(*this));
|
||||||
|
ar &BOOST_SERIALIZATION_NVP(measured_);
|
||||||
|
ar &BOOST_SERIALIZATION_NVP(func_);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/// traits
|
||||||
|
template <typename R, typename T>
|
||||||
|
struct traits<FunctorizedFactor<R, T>>
|
||||||
|
: public Testable<FunctorizedFactor<R, T>> {};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function to create a functorized factor.
|
||||||
|
*
|
||||||
|
* Uses function template deduction to identify return type and functor type, so
|
||||||
|
* template list only needs the functor argument type.
|
||||||
|
*/
|
||||||
|
template <typename T, typename R, typename FUNC>
|
||||||
|
FunctorizedFactor<R, T> MakeFunctorizedFactor(Key key, const R &z,
|
||||||
|
const SharedNoiseModel &model,
|
||||||
|
const FUNC func) {
|
||||||
|
return FunctorizedFactor<R, T>(key, z, model, func);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace gtsam
|
|
@ -121,7 +121,7 @@ double NoiseModelFactor::error(const Values& c) const {
|
||||||
const Vector b = unwhitenedError(c);
|
const Vector b = unwhitenedError(c);
|
||||||
check(noiseModel_, b.size());
|
check(noiseModel_, b.size());
|
||||||
if (noiseModel_)
|
if (noiseModel_)
|
||||||
return 0.5 * noiseModel_->distance(b);
|
return noiseModel_->loss(noiseModel_->squaredMahalanobisDistance(b));
|
||||||
else
|
else
|
||||||
return 0.5 * b.squaredNorm();
|
return 0.5 * b.squaredNorm();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -168,7 +168,11 @@ Camera camera(Pose3(Rot3().retract(Vector3(0.1, 0.2, 0.3)), Point3(0, 5, 0)),
|
||||||
Point3 point(10, 0, -5); // negative Z-axis convention of Snavely!
|
Point3 point(10, 0, -5); // negative Z-axis convention of Snavely!
|
||||||
Vector9 P = Camera().localCoordinates(camera);
|
Vector9 P = Camera().localCoordinates(camera);
|
||||||
Vector3 X = point;
|
Vector3 X = point;
|
||||||
|
#ifdef GTSAM_POSE3_EXPMAP
|
||||||
|
Vector2 expectedMeasurement(1.3124675, 1.2057287);
|
||||||
|
#else
|
||||||
Vector2 expectedMeasurement(1.2431567, 1.2525694);
|
Vector2 expectedMeasurement(1.2431567, 1.2525694);
|
||||||
|
#endif
|
||||||
Matrix E1 = numericalDerivative21<Vector2, Vector9, Vector3>(adapted, P, X);
|
Matrix E1 = numericalDerivative21<Vector2, Vector9, Vector3>(adapted, P, X);
|
||||||
Matrix E2 = numericalDerivative22<Vector2, Vector9, Vector3>(adapted, P, X);
|
Matrix E2 = numericalDerivative22<Vector2, Vector9, Vector3>(adapted, P, X);
|
||||||
}
|
}
|
||||||
|
@ -177,7 +181,11 @@ Matrix E2 = numericalDerivative22<Vector2, Vector9, Vector3>(adapted, P, X);
|
||||||
// Check that Local worked as expected
|
// Check that Local worked as expected
|
||||||
TEST(AdaptAutoDiff, Local) {
|
TEST(AdaptAutoDiff, Local) {
|
||||||
using namespace example;
|
using namespace example;
|
||||||
|
#ifdef GTSAM_POSE3_EXPMAP
|
||||||
|
Vector9 expectedP = (Vector9() << 0.1, 0.2, 0.3, 0.7583528428, 4.9582357859, -0.224941471539, 1, 0, 0).finished();
|
||||||
|
#else
|
||||||
Vector9 expectedP = (Vector9() << 0.1, 0.2, 0.3, 0, 5, 0, 1, 0, 0).finished();
|
Vector9 expectedP = (Vector9() << 0.1, 0.2, 0.3, 0, 5, 0, 1, 0, 0).finished();
|
||||||
|
#endif
|
||||||
EXPECT(equal_with_abs_tol(expectedP, P));
|
EXPECT(equal_with_abs_tol(expectedP, P));
|
||||||
Vector3 expectedX(10, 0, -5); // negative Z-axis convention of Snavely!
|
Vector3 expectedX(10, 0, -5); // negative Z-axis convention of Snavely!
|
||||||
EXPECT(equal_with_abs_tol(expectedX, X));
|
EXPECT(equal_with_abs_tol(expectedX, X));
|
||||||
|
|
|
@ -0,0 +1,185 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------1-------------------------------------------
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file testFunctorizedFactor.cpp
|
||||||
|
* @date May 31, 2020
|
||||||
|
* @author Varun Agrawal
|
||||||
|
* @brief unit tests for FunctorizedFactor class
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
#include <gtsam/base/Testable.h>
|
||||||
|
#include <gtsam/inference/Symbol.h>
|
||||||
|
#include <gtsam/nonlinear/FunctorizedFactor.h>
|
||||||
|
#include <gtsam/nonlinear/factorTesting.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
Key key = Symbol('X', 0);
|
||||||
|
auto model = noiseModel::Isotropic::Sigma(9, 1);
|
||||||
|
|
||||||
|
/// Functor that takes a matrix and multiplies every element by m
|
||||||
|
class MultiplyFunctor {
|
||||||
|
double m_; ///< simple multiplier
|
||||||
|
|
||||||
|
public:
|
||||||
|
MultiplyFunctor(double m) : m_(m) {}
|
||||||
|
|
||||||
|
Matrix operator()(const Matrix &X,
|
||||||
|
OptionalJacobian<-1, -1> H = boost::none) const {
|
||||||
|
if (H) *H = m_ * Matrix::Identity(X.rows() * X.cols(), X.rows() * X.cols());
|
||||||
|
return m_ * X;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Test identity operation for FunctorizedFactor.
|
||||||
|
TEST(FunctorizedFactor, Identity) {
|
||||||
|
Matrix X = Matrix::Identity(3, 3), measurement = Matrix::Identity(3, 3);
|
||||||
|
|
||||||
|
double multiplier = 1.0;
|
||||||
|
auto functor = MultiplyFunctor(multiplier);
|
||||||
|
auto factor = MakeFunctorizedFactor<Matrix>(key, measurement, model, functor);
|
||||||
|
|
||||||
|
Vector error = factor.evaluateError(X);
|
||||||
|
|
||||||
|
EXPECT(assert_equal(Vector::Zero(9), error, 1e-9));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Test FunctorizedFactor with multiplier value of 2.
|
||||||
|
TEST(FunctorizedFactor, Multiply2) {
|
||||||
|
double multiplier = 2.0;
|
||||||
|
Matrix X = Matrix::Identity(3, 3);
|
||||||
|
Matrix measurement = multiplier * Matrix::Identity(3, 3);
|
||||||
|
|
||||||
|
auto factor = MakeFunctorizedFactor<Matrix>(key, measurement, model,
|
||||||
|
MultiplyFunctor(multiplier));
|
||||||
|
|
||||||
|
Vector error = factor.evaluateError(X);
|
||||||
|
|
||||||
|
EXPECT(assert_equal(Vector::Zero(9), error, 1e-9));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Test equality function for FunctorizedFactor.
|
||||||
|
TEST(FunctorizedFactor, Equality) {
|
||||||
|
Matrix measurement = Matrix::Identity(2, 2);
|
||||||
|
|
||||||
|
double multiplier = 2.0;
|
||||||
|
|
||||||
|
auto factor1 = MakeFunctorizedFactor<Matrix>(key, measurement, model,
|
||||||
|
MultiplyFunctor(multiplier));
|
||||||
|
auto factor2 = MakeFunctorizedFactor<Matrix>(key, measurement, model,
|
||||||
|
MultiplyFunctor(multiplier));
|
||||||
|
|
||||||
|
EXPECT(factor1.equals(factor2));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* *************************************************************************** */
|
||||||
|
// Test Jacobians of FunctorizedFactor.
|
||||||
|
TEST(FunctorizedFactor, Jacobians) {
|
||||||
|
Matrix X = Matrix::Identity(3, 3);
|
||||||
|
Matrix actualH;
|
||||||
|
|
||||||
|
double multiplier = 2.0;
|
||||||
|
|
||||||
|
auto factor =
|
||||||
|
MakeFunctorizedFactor<Matrix>(key, X, model, MultiplyFunctor(multiplier));
|
||||||
|
|
||||||
|
Values values;
|
||||||
|
values.insert<Matrix>(key, X);
|
||||||
|
|
||||||
|
// Check Jacobians
|
||||||
|
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Test print result of FunctorizedFactor.
|
||||||
|
TEST(FunctorizedFactor, Print) {
|
||||||
|
Matrix X = Matrix::Identity(2, 2);
|
||||||
|
|
||||||
|
double multiplier = 2.0;
|
||||||
|
|
||||||
|
auto factor =
|
||||||
|
MakeFunctorizedFactor<Matrix>(key, X, model, MultiplyFunctor(multiplier));
|
||||||
|
|
||||||
|
// redirect output to buffer so we can compare
|
||||||
|
stringstream buffer;
|
||||||
|
streambuf *old = cout.rdbuf(buffer.rdbuf());
|
||||||
|
|
||||||
|
factor.print();
|
||||||
|
|
||||||
|
// get output string and reset stdout
|
||||||
|
string actual = buffer.str();
|
||||||
|
cout.rdbuf(old);
|
||||||
|
|
||||||
|
string expected =
|
||||||
|
" keys = { X0 }\n"
|
||||||
|
" noise model: unit (9) \n"
|
||||||
|
"FunctorizedFactor(X0)\n"
|
||||||
|
" measurement: [\n"
|
||||||
|
" 1, 0;\n"
|
||||||
|
" 0, 1\n"
|
||||||
|
"]\n"
|
||||||
|
" noise model sigmas: 1 1 1 1 1 1 1 1 1\n";
|
||||||
|
|
||||||
|
CHECK_EQUAL(expected, actual);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Test FunctorizedFactor using a std::function type.
|
||||||
|
TEST(FunctorizedFactor, Functional) {
|
||||||
|
double multiplier = 2.0;
|
||||||
|
Matrix X = Matrix::Identity(3, 3);
|
||||||
|
Matrix measurement = multiplier * Matrix::Identity(3, 3);
|
||||||
|
|
||||||
|
std::function<Matrix(Matrix, boost::optional<Matrix &>)> functional =
|
||||||
|
MultiplyFunctor(multiplier);
|
||||||
|
auto factor =
|
||||||
|
MakeFunctorizedFactor<Matrix>(key, measurement, model, functional);
|
||||||
|
|
||||||
|
Vector error = factor.evaluateError(X);
|
||||||
|
|
||||||
|
EXPECT(assert_equal(Vector::Zero(9), error, 1e-9));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Test FunctorizedFactor with a lambda function.
|
||||||
|
TEST(FunctorizedFactor, Lambda) {
|
||||||
|
double multiplier = 2.0;
|
||||||
|
Matrix X = Matrix::Identity(3, 3);
|
||||||
|
Matrix measurement = multiplier * Matrix::Identity(3, 3);
|
||||||
|
|
||||||
|
auto lambda = [multiplier](const Matrix &X,
|
||||||
|
OptionalJacobian<-1, -1> H = boost::none) {
|
||||||
|
if (H)
|
||||||
|
*H = multiplier *
|
||||||
|
Matrix::Identity(X.rows() * X.cols(), X.rows() * X.cols());
|
||||||
|
return multiplier * X;
|
||||||
|
};
|
||||||
|
// FunctorizedFactor<Matrix> factor(key, measurement, model, lambda);
|
||||||
|
auto factor = MakeFunctorizedFactor<Matrix>(key, measurement, model, lambda);
|
||||||
|
|
||||||
|
Vector error = factor.evaluateError(X);
|
||||||
|
|
||||||
|
EXPECT(assert_equal(Vector::Zero(9), error, 1e-9));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
int main() {
|
||||||
|
TestResult tr;
|
||||||
|
return TestRegistry::runAllTests(tr);
|
||||||
|
}
|
||||||
|
/* ************************************************************************* */
|
|
@ -459,11 +459,12 @@ TEST(NonlinearOptimizer, RobustMeanCalculation) {
|
||||||
init.insert(0, 100.0);
|
init.insert(0, 100.0);
|
||||||
expected.insert(0, 3.33333333);
|
expected.insert(0, 3.33333333);
|
||||||
|
|
||||||
LevenbergMarquardtParams params;
|
DoglegParams params_dl;
|
||||||
|
params_dl.setRelativeErrorTol(1e-10);
|
||||||
|
|
||||||
auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
|
auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
|
||||||
auto lm_result = LevenbergMarquardtOptimizer(fg, init, params).optimize();
|
auto lm_result = LevenbergMarquardtOptimizer(fg, init).optimize();
|
||||||
auto dl_result = DoglegOptimizer(fg, init).optimize();
|
auto dl_result = DoglegOptimizer(fg, init, params_dl).optimize();
|
||||||
|
|
||||||
EXPECT(assert_equal(expected, gn_result, tol));
|
EXPECT(assert_equal(expected, gn_result, tol));
|
||||||
EXPECT(assert_equal(expected, lm_result, tol));
|
EXPECT(assert_equal(expected, lm_result, tol));
|
||||||
|
|
Loading…
Reference in New Issue