Merge branch 'develop' into imu-examples
commit
5da2108cd4
|
@ -34,7 +34,7 @@ cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \
|
||||||
|
|
||||||
make -j$(nproc) install
|
make -j$(nproc) install
|
||||||
|
|
||||||
cd $CURRDIR/../gtsam_install/cython
|
cd cython
|
||||||
|
|
||||||
sudo $PYTHON setup.py install
|
sudo $PYTHON setup.py install
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
language: cpp
|
language: cpp
|
||||||
cache: ccache
|
cache: ccache
|
||||||
sudo: required
|
|
||||||
dist: xenial
|
dist: xenial
|
||||||
|
|
||||||
addons:
|
addons:
|
||||||
|
@ -33,7 +32,7 @@ stages:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
global:
|
global:
|
||||||
- MAKEFLAGS="-j2"
|
- MAKEFLAGS="-j3"
|
||||||
- CCACHE_SLOPPINESS=pch_defines,time_macros
|
- CCACHE_SLOPPINESS=pch_defines,time_macros
|
||||||
|
|
||||||
# Compile stage without building examples/tests to populate the caches.
|
# Compile stage without building examples/tests to populate the caches.
|
||||||
|
|
|
@ -10,7 +10,7 @@ endif()
|
||||||
# Set the version number for the library
|
# Set the version number for the library
|
||||||
set (GTSAM_VERSION_MAJOR 4)
|
set (GTSAM_VERSION_MAJOR 4)
|
||||||
set (GTSAM_VERSION_MINOR 0)
|
set (GTSAM_VERSION_MINOR 0)
|
||||||
set (GTSAM_VERSION_PATCH 2)
|
set (GTSAM_VERSION_PATCH 3)
|
||||||
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
||||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||||
|
|
||||||
|
@ -68,8 +68,8 @@ if(GTSAM_UNSTABLE_AVAILABLE)
|
||||||
endif()
|
endif()
|
||||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." OFF)
|
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." OFF)
|
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||||
|
@ -454,12 +454,11 @@ endif()
|
||||||
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
|
set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
|
||||||
# Set up cache options
|
# Set up cache options
|
||||||
set(GTSAM_CYTHON_INSTALL_PATH "" CACHE PATH "Cython toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/cython")
|
# Cython install path appended with Build type (e.g. cython, cythonDebug, etc).
|
||||||
if(NOT GTSAM_CYTHON_INSTALL_PATH)
|
# This does not override custom values set from the command line
|
||||||
set(GTSAM_CYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/cython")
|
set(GTSAM_CYTHON_INSTALL_PATH "${PROJECT_BINARY_DIR}/cython${GTSAM_BUILD_TAG}" CACHE PATH "Cython toolbox destination, blank defaults to PROJECT_BINARY_DIR/cython<GTSAM_BUILD_TAG>")
|
||||||
endif()
|
|
||||||
set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
|
set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
|
||||||
add_subdirectory(cython)
|
add_subdirectory(cython ${GTSAM_CYTHON_INSTALL_PATH})
|
||||||
else()
|
else()
|
||||||
set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
|
set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
|
||||||
endif()
|
endif()
|
||||||
|
@ -537,54 +536,54 @@ endif()
|
||||||
|
|
||||||
print_build_options_for_target(gtsam)
|
print_build_options_for_target(gtsam)
|
||||||
|
|
||||||
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
||||||
|
|
||||||
if(GTSAM_USE_TBB)
|
if(GTSAM_USE_TBB)
|
||||||
message(STATUS " Use Intel TBB : Yes")
|
message(STATUS " Use Intel TBB : Yes")
|
||||||
elseif(TBB_FOUND)
|
elseif(TBB_FOUND)
|
||||||
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
|
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Use Intel TBB : TBB not found")
|
message(STATUS " Use Intel TBB : TBB not found")
|
||||||
endif()
|
endif()
|
||||||
if(GTSAM_USE_EIGEN_MKL)
|
if(GTSAM_USE_EIGEN_MKL)
|
||||||
message(STATUS " Eigen will use MKL : Yes")
|
message(STATUS " Eigen will use MKL : Yes")
|
||||||
elseif(MKL_FOUND)
|
elseif(MKL_FOUND)
|
||||||
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
|
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Eigen will use MKL : MKL not found")
|
message(STATUS " Eigen will use MKL : MKL not found")
|
||||||
endif()
|
endif()
|
||||||
if(GTSAM_USE_EIGEN_MKL_OPENMP)
|
if(GTSAM_USE_EIGEN_MKL_OPENMP)
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : Yes")
|
message(STATUS " Eigen will use MKL and OpenMP : Yes")
|
||||||
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
|
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
|
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
|
||||||
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
|
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
|
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
|
||||||
elseif(OPENMP_FOUND)
|
elseif(OPENMP_FOUND)
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
|
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
|
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
|
||||||
endif()
|
endif()
|
||||||
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
|
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
|
||||||
|
|
||||||
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
|
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
|
||||||
message(STATUS " Cheirality exceptions enabled : YES")
|
message(STATUS " Cheirality exceptions enabled : YES")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Cheirality exceptions enabled : NO")
|
message(STATUS " Cheirality exceptions enabled : NO")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||||
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
|
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
|
||||||
message(STATUS " Build with ccache : Yes")
|
message(STATUS " Build with ccache : Yes")
|
||||||
elseif(CCACHE_FOUND)
|
elseif(CCACHE_FOUND)
|
||||||
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
|
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Build with ccache : No")
|
message(STATUS " Build with ccache : No")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Packaging flags ")
|
message(STATUS "Packaging flags ")
|
||||||
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
|
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
|
||||||
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
|
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
|
||||||
|
|
||||||
message(STATUS "GTSAM flags ")
|
message(STATUS "GTSAM flags ")
|
||||||
print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
|
print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
|
||||||
|
@ -595,15 +594,19 @@ print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 al
|
||||||
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
|
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
|
||||||
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
||||||
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
||||||
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
||||||
|
|
||||||
message(STATUS "MATLAB toolbox flags ")
|
message(STATUS "MATLAB toolbox flags ")
|
||||||
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
|
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
|
||||||
|
if (${GTSAM_INSTALL_MATLAB_TOOLBOX})
|
||||||
|
message(STATUS " MATLAB root : ${MATLAB_ROOT}")
|
||||||
|
message(STATUS " MEX binary : ${MEX_COMMAND}")
|
||||||
|
endif()
|
||||||
|
|
||||||
message(STATUS "Cython toolbox flags ")
|
message(STATUS "Cython toolbox flags ")
|
||||||
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
||||||
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
|
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
||||||
endif()
|
endif()
|
||||||
message(STATUS "===============================================================")
|
message(STATUS "===============================================================")
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ protected:
|
||||||
class testGroup##testName##Test : public Test \
|
class testGroup##testName##Test : public Test \
|
||||||
{ public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, true) {} \
|
{ public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, true) {} \
|
||||||
virtual ~testGroup##testName##Test () {};\
|
virtual ~testGroup##testName##Test () {};\
|
||||||
void run (TestResult& result_);} \
|
void run (TestResult& result_) override;} \
|
||||||
testGroup##testName##Instance; \
|
testGroup##testName##Instance; \
|
||||||
void testGroup##testName##Test::run (TestResult& result_)
|
void testGroup##testName##Test::run (TestResult& result_)
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ protected:
|
||||||
class testGroup##testName##Test : public Test \
|
class testGroup##testName##Test : public Test \
|
||||||
{ public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, false) {} \
|
{ public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, false) {} \
|
||||||
virtual ~testGroup##testName##Test () {};\
|
virtual ~testGroup##testName##Test () {};\
|
||||||
void run (TestResult& result_);} \
|
void run (TestResult& result_) override;} \
|
||||||
testGroup##testName##Instance; \
|
testGroup##testName##Instance; \
|
||||||
void testGroup##testName##Test::run (TestResult& result_)
|
void testGroup##testName##Test::run (TestResult& result_)
|
||||||
|
|
||||||
|
|
|
@ -72,9 +72,9 @@ A Lie group is both a manifold *and* a group. Hence, a LIE_GROUP type should imp
|
||||||
However, we now also need to be able to evaluate the derivatives of compose and inverse.
|
However, we now also need to be able to evaluate the derivatives of compose and inverse.
|
||||||
Hence, we have the following extra valid static functions defined in the struct `gtsam::traits<T>`:
|
Hence, we have the following extra valid static functions defined in the struct `gtsam::traits<T>`:
|
||||||
|
|
||||||
* `r = traits<T>::Compose(p,q,Hq,Hp)`
|
* `r = traits<T>::Compose(p,q,Hp,Hq)`
|
||||||
* `q = traits<T>::Inverse(p,Hp)`
|
* `q = traits<T>::Inverse(p,Hp)`
|
||||||
* `r = traits<T>::Between(p,q,Hq,H2p)`
|
* `r = traits<T>::Between(p,q,Hp,Hq)`
|
||||||
|
|
||||||
where above the *H* arguments stand for optional Jacobian arguments.
|
where above the *H* arguments stand for optional Jacobian arguments.
|
||||||
That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor).
|
That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor).
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# version format
|
# version format
|
||||||
version: 4.0.2-{branch}-build{build}
|
version: 4.0.3-{branch}-build{build}
|
||||||
|
|
||||||
os: Visual Studio 2019
|
os: Visual Studio 2019
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
|
# Set cmake policy to recognize the AppleClang compiler
|
||||||
|
# independently from the Clang compiler.
|
||||||
|
if(POLICY CMP0025)
|
||||||
|
cmake_policy(SET CMP0025 NEW)
|
||||||
|
endif()
|
||||||
|
|
||||||
# function: list_append_cache(var [new_values ...])
|
# function: list_append_cache(var [new_values ...])
|
||||||
# Like "list(APPEND ...)" but working for CACHE variables.
|
# Like "list(APPEND ...)" but working for CACHE variables.
|
||||||
|
@ -99,8 +104,24 @@ if(MSVC)
|
||||||
set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING /MD /O2 CACHE STRING "(User editable) Private compiler flags for Timing configuration.")
|
set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING /MD /O2 CACHE STRING "(User editable) Private compiler flags for Timing configuration.")
|
||||||
else()
|
else()
|
||||||
# Common to all configurations, next for each configuration:
|
# Common to all configurations, next for each configuration:
|
||||||
# "-fPIC" is to ensure proper code generation for shared libraries
|
|
||||||
set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON -Wall -fPIC CACHE STRING "(User editable) Private compiler flags for all configurations.")
|
if (
|
||||||
|
((CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0.0)) OR
|
||||||
|
(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
|
||||||
|
)
|
||||||
|
set(flag_override_ -Wsuggest-override) # -Werror=suggest-override: Add again someday
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON
|
||||||
|
-Wall # Enable common warnings
|
||||||
|
-fPIC # ensure proper code generation for shared libraries
|
||||||
|
$<$<CXX_COMPILER_ID:GNU>:-Wreturn-local-addr -Werror=return-local-addr> # Error: return local address
|
||||||
|
$<$<CXX_COMPILER_ID:Clang>:-Wreturn-stack-address -Werror=return-stack-address> # Error: return local address
|
||||||
|
-Wreturn-type -Werror=return-type # Error on missing return()
|
||||||
|
-Wformat -Werror=format-security # Error on wrong printf() arguments
|
||||||
|
$<$<COMPILE_LANGUAGE:CXX>:${flag_override_}> # Enforce the use of the override keyword
|
||||||
|
#
|
||||||
|
CACHE STRING "(User editable) Private compiler flags for all configurations.")
|
||||||
set(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG -g -fno-inline CACHE STRING "(User editable) Private compiler flags for Debug configuration.")
|
set(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG -g -fno-inline CACHE STRING "(User editable) Private compiler flags for Debug configuration.")
|
||||||
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELWITHDEBINFO -g -O3 CACHE STRING "(User editable) Private compiler flags for RelWithDebInfo configuration.")
|
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELWITHDEBINFO -g -O3 CACHE STRING "(User editable) Private compiler flags for RelWithDebInfo configuration.")
|
||||||
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELEASE -O3 CACHE STRING "(User editable) Private compiler flags for Release configuration.")
|
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELEASE -O3 CACHE STRING "(User editable) Private compiler flags for Release configuration.")
|
||||||
|
|
|
@ -41,9 +41,8 @@ execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||||
function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
|
function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
|
||||||
# Paths for generated files
|
# Paths for generated files
|
||||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
get_filename_component(module_name "${interface_header}" NAME_WE)
|
||||||
set(generated_files_path "${PROJECT_BINARY_DIR}/cython/${module_name}")
|
set(generated_files_path "${install_path}")
|
||||||
wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
|
wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
|
||||||
install_cython_wrapped_library("${interface_header}" "${generated_files_path}" "${install_path}")
|
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
function(set_up_required_cython_packages)
|
function(set_up_required_cython_packages)
|
||||||
|
@ -138,6 +137,10 @@ function(cythonize target pyx_file output_lib_we output_dir include_dirs libs in
|
||||||
target_link_libraries(${target} "${libs}")
|
target_link_libraries(${target} "${libs}")
|
||||||
endif()
|
endif()
|
||||||
add_dependencies(${target} ${target}_pyx2cpp)
|
add_dependencies(${target} ${target}_pyx2cpp)
|
||||||
|
|
||||||
|
if(TARGET ${python_install_target})
|
||||||
|
add_dependencies(${python_install_target} ${target})
|
||||||
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Internal function that wraps a library and compiles the wrapper
|
# Internal function that wraps a library and compiles the wrapper
|
||||||
|
@ -150,9 +153,12 @@ function(wrap_library_cython interface_header generated_files_path extra_imports
|
||||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
get_filename_component(module_name "${interface_header}" NAME_WE)
|
||||||
|
|
||||||
# Wrap module to Cython pyx
|
# Wrap module to Cython pyx
|
||||||
message(STATUS "Cython wrapper generating ${module_name}.pyx")
|
message(STATUS "Cython wrapper generating ${generated_files_path}/${module_name}.pyx")
|
||||||
set(generated_pyx "${generated_files_path}/${module_name}.pyx")
|
set(generated_pyx "${generated_files_path}/${module_name}.pyx")
|
||||||
file(MAKE_DIRECTORY "${generated_files_path}")
|
if(NOT EXISTS ${generated_files_path})
|
||||||
|
file(MAKE_DIRECTORY "${generated_files_path}")
|
||||||
|
endif()
|
||||||
|
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT ${generated_pyx}
|
OUTPUT ${generated_pyx}
|
||||||
DEPENDS ${interface_header} wrap
|
DEPENDS ${interface_header} wrap
|
||||||
|
@ -175,46 +181,6 @@ function(wrap_library_cython interface_header generated_files_path extra_imports
|
||||||
COMMAND cmake -E remove_directory ${generated_files_path})
|
COMMAND cmake -E remove_directory ${generated_files_path})
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Internal function that installs a wrap toolbox
|
|
||||||
function(install_cython_wrapped_library interface_header generated_files_path install_path)
|
|
||||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
|
||||||
|
|
||||||
# NOTE: only installs .pxd and .pyx and binary files (not .cpp) - the trailing slash on the directory name
|
|
||||||
# here prevents creating the top-level module name directory in the destination.
|
|
||||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH/subdirectory if there is one
|
|
||||||
get_filename_component(location "${install_path}" PATH)
|
|
||||||
get_filename_component(name "${install_path}" NAME)
|
|
||||||
message(STATUS "Installing Cython Toolbox to ${location}${GTSAM_BUILD_TAG}/${name}") #${GTSAM_CYTHON_INSTALL_PATH}"
|
|
||||||
|
|
||||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
|
||||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
|
||||||
string(TOUPPER "${build_type}" build_type_upper)
|
|
||||||
if(${build_type_upper} STREQUAL "RELEASE")
|
|
||||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
|
||||||
else()
|
|
||||||
set(build_type_tag "${build_type}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
install(DIRECTORY "${generated_files_path}/" DESTINATION "${location}${build_type_tag}/${name}"
|
|
||||||
CONFIGURATIONS "${build_type}"
|
|
||||||
PATTERN "build" EXCLUDE
|
|
||||||
PATTERN "CMakeFiles" EXCLUDE
|
|
||||||
PATTERN "Makefile" EXCLUDE
|
|
||||||
PATTERN "*.cmake" EXCLUDE
|
|
||||||
PATTERN "*.cpp" EXCLUDE
|
|
||||||
PATTERN "*.py" EXCLUDE)
|
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
install(DIRECTORY "${generated_files_path}/" DESTINATION ${install_path}
|
|
||||||
PATTERN "build" EXCLUDE
|
|
||||||
PATTERN "CMakeFiles" EXCLUDE
|
|
||||||
PATTERN "Makefile" EXCLUDE
|
|
||||||
PATTERN "*.cmake" EXCLUDE
|
|
||||||
PATTERN "*.cpp" EXCLUDE
|
|
||||||
PATTERN "*.py" EXCLUDE)
|
|
||||||
endif()
|
|
||||||
endfunction()
|
|
||||||
|
|
||||||
# Helper function to install Cython scripts and handle multiple build types where the scripts
|
# Helper function to install Cython scripts and handle multiple build types where the scripts
|
||||||
# should be installed to all build type toolboxes
|
# should be installed to all build type toolboxes
|
||||||
#
|
#
|
||||||
|
@ -232,50 +198,7 @@ function(install_cython_scripts source_directory dest_directory patterns)
|
||||||
foreach(pattern ${patterns})
|
foreach(pattern ${patterns})
|
||||||
list(APPEND patterns_args PATTERN "${pattern}")
|
list(APPEND patterns_args PATTERN "${pattern}")
|
||||||
endforeach()
|
endforeach()
|
||||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
|
||||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
file(COPY "${source_directory}" DESTINATION "${dest_directory}"
|
||||||
string(TOUPPER "${build_type}" build_type_upper)
|
|
||||||
if(${build_type_upper} STREQUAL "RELEASE")
|
|
||||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
|
||||||
else()
|
|
||||||
set(build_type_tag "${build_type}")
|
|
||||||
endif()
|
|
||||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
|
|
||||||
get_filename_component(location "${dest_directory}" PATH)
|
|
||||||
get_filename_component(name "${dest_directory}" NAME)
|
|
||||||
install(DIRECTORY "${source_directory}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}"
|
|
||||||
FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
install(DIRECTORY "${source_directory}" DESTINATION "${dest_directory}" FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Helper function to install specific files and handle multiple build types where the scripts
|
|
||||||
# should be installed to all build type toolboxes
|
|
||||||
#
|
|
||||||
# Arguments:
|
|
||||||
# source_files: The source files to be installed.
|
|
||||||
# dest_directory: The destination directory to install to.
|
|
||||||
function(install_cython_files source_files dest_directory)
|
|
||||||
|
|
||||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
|
||||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
|
||||||
string(TOUPPER "${build_type}" build_type_upper)
|
|
||||||
if(${build_type_upper} STREQUAL "RELEASE")
|
|
||||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
|
||||||
else()
|
|
||||||
set(build_type_tag "${build_type}")
|
|
||||||
endif()
|
|
||||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
|
|
||||||
get_filename_component(location "${dest_directory}" PATH)
|
|
||||||
get_filename_component(name "${dest_directory}" NAME)
|
|
||||||
install(FILES "${source_files}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}")
|
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
install(FILES "${source_files}" DESTINATION "${dest_directory}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
endfunction()
|
|
||||||
|
|
||||||
|
|
|
@ -3,16 +3,32 @@ include(GtsamCythonWrap)
|
||||||
|
|
||||||
# Create the cython toolbox for the gtsam library
|
# Create the cython toolbox for the gtsam library
|
||||||
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
|
# Add the new make target command
|
||||||
|
set(python_install_target python-install)
|
||||||
|
add_custom_target(${python_install_target}
|
||||||
|
COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_CYTHON_INSTALL_PATH}/setup.py install
|
||||||
|
WORKING_DIRECTORY ${GTSAM_CYTHON_INSTALL_PATH})
|
||||||
|
|
||||||
# build and include the eigency version of eigency
|
# build and include the eigency version of eigency
|
||||||
add_subdirectory(gtsam_eigency)
|
add_subdirectory(gtsam_eigency)
|
||||||
include_directories(${PROJECT_BINARY_DIR}/cython/gtsam_eigency)
|
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
|
||||||
|
|
||||||
# Fix for error "C1128: number of sections exceeded object file format limit"
|
# Fix for error "C1128: number of sections exceeded object file format limit"
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
add_compile_options(/bigobj)
|
add_compile_options(/bigobj)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# wrap gtsam
|
# First set up all the package related files.
|
||||||
|
# This also ensures the below wrap operations work correctly.
|
||||||
|
set(CYTHON_INSTALL_REQUIREMENTS_FILE "${PROJECT_SOURCE_DIR}/cython/requirements.txt")
|
||||||
|
|
||||||
|
# Install the custom-generated __init__.py
|
||||||
|
# This makes the cython (sub-)directories into python packages, so gtsam can be found while wrapping gtsam_unstable
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam/__init__.py COPYONLY)
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable/__init__.py COPYONLY)
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${GTSAM_CYTHON_INSTALL_PATH}/setup.py)
|
||||||
|
|
||||||
|
# Wrap gtsam
|
||||||
add_custom_target(gtsam_header DEPENDS "../gtsam.h")
|
add_custom_target(gtsam_header DEPENDS "../gtsam.h")
|
||||||
wrap_and_install_library_cython("../gtsam.h" # interface_header
|
wrap_and_install_library_cython("../gtsam.h" # interface_header
|
||||||
"" # extra imports
|
"" # extra imports
|
||||||
|
@ -20,8 +36,9 @@ if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
gtsam # library to link with
|
gtsam # library to link with
|
||||||
"wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
|
"wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
|
||||||
)
|
)
|
||||||
|
add_dependencies(${python_install_target} gtsam gtsam_header)
|
||||||
|
|
||||||
# wrap gtsam_unstable
|
# Wrap gtsam_unstable
|
||||||
if(GTSAM_BUILD_UNSTABLE)
|
if(GTSAM_BUILD_UNSTABLE)
|
||||||
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
|
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
|
||||||
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
|
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
|
||||||
|
@ -30,17 +47,9 @@ if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
gtsam_unstable # library to link with
|
gtsam_unstable # library to link with
|
||||||
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
|
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
|
||||||
)
|
)
|
||||||
|
add_dependencies(${python_install_target} gtsam_unstable gtsam_unstable_header)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
file(READ "${PROJECT_SOURCE_DIR}/cython/requirements.txt" CYTHON_INSTALL_REQUIREMENTS)
|
|
||||||
file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
|
|
||||||
|
|
||||||
# Install the custom-generated __init__.py
|
|
||||||
# This is to make the build/cython/gtsam folder a python package, so gtsam can be found while wrapping gtsam_unstable
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py COPYONLY)
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py COPYONLY)
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${PROJECT_BINARY_DIR}/cython/setup.py)
|
|
||||||
install_cython_files("${PROJECT_BINARY_DIR}/cython/setup.py" "${GTSAM_CYTHON_INSTALL_PATH}")
|
|
||||||
# install scripts and tests
|
# install scripts and tests
|
||||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||||
|
|
134
cython/README.md
134
cython/README.md
|
@ -1,43 +1,51 @@
|
||||||
# Python Wrapper
|
# Python Wrapper
|
||||||
|
|
||||||
This is the Cython/Python wrapper around the GTSAM C++ library.
|
This is the Python wrapper around the GTSAM C++ library. We use Cython to generate the bindings to the underlying C++ code.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- If you want to build the GTSAM python library for a specific python version (eg 3.6),
|
||||||
|
use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used.
|
||||||
|
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment),
|
||||||
|
then the environment should be active while building GTSAM.
|
||||||
|
- This wrapper needs `Cython(>=0.25.2)`, `backports_abc(>=0.5)`, and `numpy(>=1.11.0)`. These can be installed as follows:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -r <gtsam_folder>/cython/requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
- For compatibility with GTSAM's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
|
||||||
|
named `gtsam_eigency`, to interface between C++'s Eigen and Python's numpy.
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
- if you want to build the gtsam python library for a specific python version (eg 2.7), use the `-DGTSAM_PYTHON_VERSION=2.7` option when running `cmake` otherwise the default interpreter will be used.
|
- Run cmake with the `GTSAM_INSTALL_CYTHON_TOOLBOX` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory defined by `GTSAM_CYTHON_INSTALL_PATH`, which is by default `<PROJECT_BINARY_DIR>/cython` in Release mode and `<PROJECT_BINARY_DIR>/cython<CMAKE_BUILD_TYPE>` for other modes.
|
||||||
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment) then the environment should be active while building gtsam.
|
|
||||||
- This wrapper needs Cython(>=0.25.2), backports_abc>=0.5, and numpy. These can be installed as follows:
|
|
||||||
|
|
||||||
```bash
|
- Build GTSAM and the wrapper with `make`.
|
||||||
pip install -r <gtsam_folder>/cython/requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
- For compatibility with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
|
- To install, simply run `make python-install`.
|
||||||
named **gtsam_eigency**, to interface between C++'s Eigen and Python's numpy.
|
- The same command can be used to install into a virtual environment if it is active.
|
||||||
|
- **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory.
|
||||||
|
|
||||||
- Build and install gtsam using cmake with `GTSAM_INSTALL_CYTHON_TOOLBOX` enabled.
|
- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly.
|
||||||
The wrapped module will be installed to `GTSAM_CYTHON_INSTALL_PATH`, which is
|
|
||||||
by default: `<your CMAKE_INSTALL_PREFIX>/cython`
|
|
||||||
|
|
||||||
- To use the library without installing system-wide: modify your `PYTHONPATH` to include the `GTSAM_CYTHON_INSTALL_PATH`:
|
|
||||||
```bash
|
|
||||||
export PYTHONPATH=$PYTHONPATH:<GTSAM_CYTHON_INSTALL_PATH>
|
|
||||||
```
|
|
||||||
- To install system-wide: run `make install` then navigate to `GTSAM_CYTHON_INSTALL_PATH` and run `python setup.py install`
|
|
||||||
- (the same command can be used to install into a virtual environment if it is active)
|
|
||||||
- note: if you don't want gtsam to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install gtsam to a subdirectory of the build directory.
|
|
||||||
- if you run `setup.py` from the build directory rather than the installation directory, the script will warn you with the message: `setup.py is being run from an unexpected location`.
|
|
||||||
Before `make install` is run, not all the components of the package have been copied across, so running `setup.py` from the build directory would result in an incomplete package.
|
|
||||||
|
|
||||||
## Unit Tests
|
## Unit Tests
|
||||||
|
|
||||||
The Cython toolbox also has a small set of unit tests located in the
|
The Cython toolbox also has a small set of unit tests located in the
|
||||||
test directory. To run them:
|
test directory. To run them:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd <your GTSAM_CYTHON_INSTALL_PATH>
|
cd <GTSAM_CYTHON_INSTALL_PATH>
|
||||||
python -m unittest discover
|
python -m unittest discover
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Utils
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
## Writing Your Own Scripts
|
## Writing Your Own Scripts
|
||||||
|
|
||||||
|
@ -46,79 +54,63 @@ See the tests for examples.
|
||||||
### Some Important Notes:
|
### Some Important Notes:
|
||||||
|
|
||||||
- Vector/Matrix:
|
- Vector/Matrix:
|
||||||
+ GTSAM expects double-precision floating point vectors and matrices.
|
|
||||||
Hence, you should pass numpy matrices with dtype=float, or 'float64'.
|
- GTSAM expects double-precision floating point vectors and matrices.
|
||||||
+ Also, GTSAM expects *column-major* matrices, unlike the default storage
|
Hence, you should pass numpy matrices with `dtype=float`, or `float64`.
|
||||||
scheme in numpy. Hence, you should pass column-major matrices to gtsam using
|
- Also, GTSAM expects _column-major_ matrices, unlike the default storage
|
||||||
|
scheme in numpy. Hence, you should pass column-major matrices to GTSAM using
|
||||||
the flag order='F'. And you always get column-major matrices back.
|
the flag order='F'. And you always get column-major matrices back.
|
||||||
For more details, see: https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed
|
For more details, see [this link](https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed).
|
||||||
+ Passing row-major matrices of different dtype, e.g. 'int', will also work
|
- Passing row-major matrices of different dtype, e.g. `int`, will also work
|
||||||
as the wrapper converts them to column-major and dtype float for you,
|
as the wrapper converts them to column-major and dtype float for you,
|
||||||
using numpy.array.astype(float, order='F', copy=False).
|
using numpy.array.astype(float, order='F', copy=False).
|
||||||
However, this will result a copy if your matrix is not in the expected type
|
However, this will result a copy if your matrix is not in the expected type
|
||||||
and storage order.
|
and storage order.
|
||||||
|
|
||||||
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>_ in Python.
|
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>\_ in Python.
|
||||||
Examples: noiseModel_Gaussian, noiseModel_mEstimator_Tukey
|
|
||||||
|
Examples: `noiseModel_Gaussian`, `noiseModel_mEstimator_Tukey`
|
||||||
|
|
||||||
- Casting from a base class to a derive class must be done explicitly.
|
- Casting from a base class to a derive class must be done explicitly.
|
||||||
Examples:
|
|
||||||
```Python
|
|
||||||
noiseBase = factor.noiseModel()
|
|
||||||
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Wrapping Your Own Project That Uses GTSAM
|
Examples:
|
||||||
|
|
||||||
- Set PYTHONPATH to include ${GTSAM_CYTHON_INSTALL_PATH}
|
```python
|
||||||
+ so that it can find gtsam Cython header: gtsam/gtsam.pxd
|
noiseBase = factor.noiseModel()
|
||||||
|
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
|
||||||
|
```
|
||||||
|
|
||||||
- In your CMakeList.txt
|
## Wrapping Custom GTSAM-based Project
|
||||||
```cmake
|
|
||||||
find_package(GTSAM REQUIRED) # Make sure gtsam's install folder is in your PATH
|
|
||||||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${GTSAM_DIR}/../GTSAMCMakeTools")
|
|
||||||
|
|
||||||
# Wrap
|
Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python).
|
||||||
include(GtsamCythonWrap)
|
|
||||||
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
|
|
||||||
wrap_and_install_library_cython("your_project_interface.h"
|
|
||||||
"from gtsam.gtsam cimport *" # extra import of gtsam/gtsam.pxd Cython header
|
|
||||||
"your_install_path"
|
|
||||||
"libraries_to_link_with_the_cython_module"
|
|
||||||
"dependencies_which_need_to_be_built_before_the_wrapper"
|
|
||||||
)
|
|
||||||
#Optional: install_cython_scripts and install_cython_files. See GtsamCythonWrap.cmake.
|
|
||||||
```
|
|
||||||
|
|
||||||
## KNOWN ISSUES
|
## KNOWN ISSUES
|
||||||
|
|
||||||
- Doesn't work with python3 installed from homebrew
|
- Doesn't work with python3 installed from homebrew
|
||||||
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
|
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
|
||||||
- Guess: 64 vs 32b? disutils Compiler flags?
|
- Guess: 64 vs 32b? disutils Compiler flags?
|
||||||
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
|
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
|
||||||
- Upgrading to 0.25 solves the problem
|
- Upgrading to 0.25 solves the problem
|
||||||
- Need default constructor and default copy constructor for almost every classes... :(
|
- Need default constructor and default copy constructor for almost every classes... :(
|
||||||
- support these constructors by default and declare "delete" for special classes?
|
- support these constructors by default and declare "delete" for special classes?
|
||||||
|
|
||||||
|
|
||||||
### TODO
|
### TODO
|
||||||
|
|
||||||
- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
|
- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
|
||||||
- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in gtsam?)
|
- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in GTSAM?)
|
||||||
- [ ] inner namespaces ==> inner packages?
|
- [ ] inner namespaces ==> inner packages?
|
||||||
- [ ] Wrap fixed-size Matrices/Vectors?
|
- [ ] Wrap fixed-size Matrices/Vectors?
|
||||||
|
|
||||||
|
|
||||||
### Completed/Cancelled:
|
### Completed/Cancelled:
|
||||||
|
|
||||||
- [x] Fix Python tests: don't use " import <package> * ": Bad style!!! (18-03-17 19:50)
|
- [x] Fix Python tests: don't use " import <package> \* ": Bad style!!! (18-03-17 19:50)
|
||||||
- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
|
- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
|
||||||
- [x] Wrap unstable @done (18-03-17 15:30)
|
- [x] Wrap unstable @done (18-03-17 15:30)
|
||||||
- [x] Unify cython/gtsam.h and the original gtsam.h @done (18-03-17 15:30)
|
- [x] Unify cython/GTSAM.h and the original GTSAM.h @done (18-03-17 15:30)
|
||||||
- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
|
- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
|
||||||
- [x] 06-03-17: manage to remove the requirements for default and copy constructors
|
- [x] 06-03-17: manage to remove the requirements for default and copy constructors
|
||||||
- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>.
|
- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>.
|
||||||
- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: ```gtsam::JointMarginal __pyx_t_1;``` Users don't have to wrap this constructor, however.
|
- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: `GTSAM::JointMarginal __pyx_t_1;` Users don't have to wrap this constructor, however.
|
||||||
- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
|
- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
|
||||||
- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
|
- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
|
||||||
- [x] CMake install script @done (25-11-16 02:30)
|
- [x] CMake install script @done (25-11-16 02:30)
|
||||||
|
@ -132,7 +124,7 @@ wrap_and_install_library_cython("your_project_interface.h"
|
||||||
- [x] Casting from parent and grandparents @done (16-11-16 17:00)
|
- [x] Casting from parent and grandparents @done (16-11-16 17:00)
|
||||||
- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
|
- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
|
||||||
- [x] Support "print obj" @done (16-11-16 17:00)
|
- [x] Support "print obj" @done (16-11-16 17:00)
|
||||||
- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
|
- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
|
||||||
- [x] Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34)
|
- [x] Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34)
|
||||||
- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
|
- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
|
||||||
- [x] [Nice to have] parse typedef @done (16-09-13 17:19)
|
- [x] [Nice to have] parse typedef @done (16-09-13 17:19)
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
"""
|
||||||
|
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||||
|
Atlanta, Georgia 30332-0415
|
||||||
|
All Rights Reserved
|
||||||
|
|
||||||
|
See LICENSE for the license information
|
||||||
|
|
||||||
|
FrobeniusFactor unit tests.
|
||||||
|
Author: Frank Dellaert
|
||||||
|
"""
|
||||||
|
# pylint: disable=no-name-in-module, import-error, invalid-name
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4,
|
||||||
|
FrobeniusWormholeFactor, SOn)
|
||||||
|
|
||||||
|
id = SO4()
|
||||||
|
v1 = np.array([0, 0, 0, 0.1, 0, 0])
|
||||||
|
Q1 = SO4.Expmap(v1)
|
||||||
|
v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03])
|
||||||
|
Q2 = SO4.Expmap(v2)
|
||||||
|
|
||||||
|
|
||||||
|
class TestFrobeniusFactorSO4(unittest.TestCase):
|
||||||
|
"""Test FrobeniusFactor factors."""
|
||||||
|
|
||||||
|
def test_frobenius_factor(self):
|
||||||
|
"""Test creation of a factor that calculates the Frobenius norm."""
|
||||||
|
factor = FrobeniusFactorSO4(1, 2)
|
||||||
|
actual = factor.evaluateError(Q1, Q2)
|
||||||
|
expected = (Q2.matrix()-Q1.matrix()).transpose().reshape((16,))
|
||||||
|
np.testing.assert_array_equal(actual, expected)
|
||||||
|
|
||||||
|
def test_frobenius_between_factor(self):
|
||||||
|
"""Test creation of a Frobenius BetweenFactor."""
|
||||||
|
factor = FrobeniusBetweenFactorSO4(1, 2, Q1.between(Q2))
|
||||||
|
actual = factor.evaluateError(Q1, Q2)
|
||||||
|
expected = np.zeros((16,))
|
||||||
|
np.testing.assert_array_almost_equal(actual, expected)
|
||||||
|
|
||||||
|
def test_frobenius_wormhole_factor(self):
|
||||||
|
"""Test creation of a factor that calculates Shonan error."""
|
||||||
|
R1 = SO3.Expmap(v1[3:])
|
||||||
|
R2 = SO3.Expmap(v2[3:])
|
||||||
|
factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4)
|
||||||
|
I4 = SOn(4)
|
||||||
|
Q1 = I4.retract(v1)
|
||||||
|
Q2 = I4.retract(v2)
|
||||||
|
actual = factor.evaluateError(Q1, Q2)
|
||||||
|
expected = np.zeros((12,))
|
||||||
|
np.testing.assert_array_almost_equal(actual, expected, decimal=4)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
|
@ -17,7 +17,8 @@ import unittest
|
||||||
import gtsam
|
import gtsam
|
||||||
from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer,
|
from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer,
|
||||||
GaussNewtonParams, LevenbergMarquardtOptimizer,
|
GaussNewtonParams, LevenbergMarquardtOptimizer,
|
||||||
LevenbergMarquardtParams, NonlinearFactorGraph, Ordering,
|
LevenbergMarquardtParams, PCGSolverParameters,
|
||||||
|
DummyPreconditionerParameters, NonlinearFactorGraph, Ordering,
|
||||||
Point2, PriorFactorPoint2, Values)
|
Point2, PriorFactorPoint2, Values)
|
||||||
from gtsam.utils.test_case import GtsamTestCase
|
from gtsam.utils.test_case import GtsamTestCase
|
||||||
|
|
||||||
|
@ -61,6 +62,16 @@ class TestScenario(GtsamTestCase):
|
||||||
fg, initial_values, lmParams).optimize()
|
fg, initial_values, lmParams).optimize()
|
||||||
self.assertAlmostEqual(0, fg.error(actual2))
|
self.assertAlmostEqual(0, fg.error(actual2))
|
||||||
|
|
||||||
|
# Levenberg-Marquardt
|
||||||
|
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||||
|
lmParams.setLinearSolverType("ITERATIVE")
|
||||||
|
cgParams = PCGSolverParameters()
|
||||||
|
cgParams.setPreconditionerParams(DummyPreconditionerParameters())
|
||||||
|
lmParams.setIterativeParams(cgParams)
|
||||||
|
actual2 = LevenbergMarquardtOptimizer(
|
||||||
|
fg, initial_values, lmParams).optimize()
|
||||||
|
self.assertAlmostEqual(0, fg.error(actual2))
|
||||||
|
|
||||||
# Dogleg
|
# Dogleg
|
||||||
dlParams = DoglegParams()
|
dlParams = DoglegParams()
|
||||||
dlParams.setOrdering(ordering)
|
dlParams.setOrdering(ordering)
|
||||||
|
|
|
@ -4,6 +4,12 @@ Author: Jing Wu and Frank Dellaert
|
||||||
"""
|
"""
|
||||||
# pylint: disable=invalid-name
|
# pylint: disable=invalid-name
|
||||||
|
|
||||||
|
import sys
|
||||||
|
if sys.version_info.major >= 3:
|
||||||
|
from io import StringIO
|
||||||
|
else:
|
||||||
|
from cStringIO import StringIO
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
@ -37,11 +43,24 @@ class TestOptimizeComet(GtsamTestCase):
|
||||||
self.optimizer = gtsam.GaussNewtonOptimizer(
|
self.optimizer = gtsam.GaussNewtonOptimizer(
|
||||||
graph, initial, self.params)
|
graph, initial, self.params)
|
||||||
|
|
||||||
|
self.lmparams = gtsam.LevenbergMarquardtParams()
|
||||||
|
self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer(
|
||||||
|
graph, initial, self.lmparams
|
||||||
|
)
|
||||||
|
|
||||||
|
# setup output capture
|
||||||
|
self.capturedOutput = StringIO()
|
||||||
|
sys.stdout = self.capturedOutput
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
"""Reset print capture."""
|
||||||
|
sys.stdout = sys.__stdout__
|
||||||
|
|
||||||
def test_simple_printing(self):
|
def test_simple_printing(self):
|
||||||
"""Test with a simple hook."""
|
"""Test with a simple hook."""
|
||||||
|
|
||||||
# Provide a hook that just prints
|
# Provide a hook that just prints
|
||||||
def hook(_, error: float):
|
def hook(_, error):
|
||||||
print(error)
|
print(error)
|
||||||
|
|
||||||
# Only thing we require from optimizer is an iterate method
|
# Only thing we require from optimizer is an iterate method
|
||||||
|
@ -51,6 +70,16 @@ class TestOptimizeComet(GtsamTestCase):
|
||||||
actual = self.optimizer.values()
|
actual = self.optimizer.values()
|
||||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||||
|
|
||||||
|
def test_lm_simple_printing(self):
|
||||||
|
"""Make sure we are properly terminating LM"""
|
||||||
|
def hook(_, error):
|
||||||
|
print(error)
|
||||||
|
|
||||||
|
gtsam_optimize(self.lmoptimizer, self.lmparams, hook)
|
||||||
|
|
||||||
|
actual = self.lmoptimizer.values()
|
||||||
|
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||||
|
|
||||||
@unittest.skip("Not a test we want run every time, as needs comet.ml account")
|
@unittest.skip("Not a test we want run every time, as needs comet.ml account")
|
||||||
def test_comet(self):
|
def test_comet(self):
|
||||||
"""Test with a comet hook."""
|
"""Test with a comet hook."""
|
||||||
|
@ -65,7 +94,7 @@ class TestOptimizeComet(GtsamTestCase):
|
||||||
+ str(time.hour)+":"+str(time.minute)+":"+str(time.second))
|
+ str(time.hour)+":"+str(time.minute)+":"+str(time.second))
|
||||||
|
|
||||||
# I want to do some comet thing here
|
# I want to do some comet thing here
|
||||||
def hook(optimizer, error: float):
|
def hook(optimizer, error):
|
||||||
comet.log_metric("Karcher error",
|
comet.log_metric("Karcher error",
|
||||||
error, optimizer.iterations())
|
error, optimizer.iterations())
|
||||||
|
|
||||||
|
|
|
@ -4,15 +4,11 @@ Author: Jing Wu and Frank Dellaert
|
||||||
"""
|
"""
|
||||||
# pylint: disable=invalid-name
|
# pylint: disable=invalid-name
|
||||||
|
|
||||||
from typing import TypeVar
|
|
||||||
|
|
||||||
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
|
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
|
||||||
import gtsam
|
import gtsam
|
||||||
|
|
||||||
T = TypeVar('T')
|
|
||||||
|
|
||||||
|
def optimize(optimizer, check_convergence, hook):
|
||||||
def optimize(optimizer: T, check_convergence, hook):
|
|
||||||
""" Given an optimizer and a convergence check, iterate until convergence.
|
""" Given an optimizer and a convergence check, iterate until convergence.
|
||||||
After each iteration, hook(optimizer, error) is called.
|
After each iteration, hook(optimizer, error) is called.
|
||||||
After the function, use values and errors to get the result.
|
After the function, use values and errors to get the result.
|
||||||
|
@ -36,8 +32,8 @@ def optimize(optimizer: T, check_convergence, hook):
|
||||||
current_error = new_error
|
current_error = new_error
|
||||||
|
|
||||||
|
|
||||||
def gtsam_optimize(optimizer: NonlinearOptimizer,
|
def gtsam_optimize(optimizer,
|
||||||
params: NonlinearOptimizerParams,
|
params,
|
||||||
hook):
|
hook):
|
||||||
""" Given an optimizer and params, iterate until convergence.
|
""" Given an optimizer and params, iterate until convergence.
|
||||||
After each iteration, hook(optimizer) is called.
|
After each iteration, hook(optimizer) is called.
|
||||||
|
@ -50,5 +46,7 @@ def gtsam_optimize(optimizer: NonlinearOptimizer,
|
||||||
def check_convergence(optimizer, current_error, new_error):
|
def check_convergence(optimizer, current_error, new_error):
|
||||||
return (optimizer.iterations() >= params.getMaxIterations()) or (
|
return (optimizer.iterations() >= params.getMaxIterations()) or (
|
||||||
gtsam.checkConvergence(params.getRelativeErrorTol(), params.getAbsoluteErrorTol(), params.getErrorTol(),
|
gtsam.checkConvergence(params.getRelativeErrorTol(), params.getAbsoluteErrorTol(), params.getErrorTol(),
|
||||||
current_error, new_error))
|
current_error, new_error)) or (
|
||||||
|
isinstance(optimizer, gtsam.LevenbergMarquardtOptimizer) and optimizer.lambda_() > params.getlambdaUpperBound())
|
||||||
optimize(optimizer, check_convergence, hook)
|
optimize(optimizer, check_convergence, hook)
|
||||||
|
return optimizer.values()
|
||||||
|
|
|
@ -135,7 +135,8 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None):
|
||||||
axes.add_patch(e1)
|
axes.add_patch(e1)
|
||||||
|
|
||||||
|
|
||||||
def plot_pose2(fignum, pose, axis_length=0.1, covariance=None):
|
def plot_pose2(fignum, pose, axis_length=0.1, covariance=None,
|
||||||
|
axis_labels=('X axis', 'Y axis', 'Z axis')):
|
||||||
"""
|
"""
|
||||||
Plot a 2D pose on given figure with given `axis_length`.
|
Plot a 2D pose on given figure with given `axis_length`.
|
||||||
|
|
||||||
|
@ -144,6 +145,7 @@ def plot_pose2(fignum, pose, axis_length=0.1, covariance=None):
|
||||||
pose (gtsam.Pose2): The pose to be plotted.
|
pose (gtsam.Pose2): The pose to be plotted.
|
||||||
axis_length (float): The length of the camera axes.
|
axis_length (float): The length of the camera axes.
|
||||||
covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
|
covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
|
||||||
|
axis_labels (iterable[string]): List of axis labels to set.
|
||||||
"""
|
"""
|
||||||
# get figure object
|
# get figure object
|
||||||
fig = plt.figure(fignum)
|
fig = plt.figure(fignum)
|
||||||
|
@ -151,6 +153,12 @@ def plot_pose2(fignum, pose, axis_length=0.1, covariance=None):
|
||||||
plot_pose2_on_axes(axes, pose, axis_length=axis_length,
|
plot_pose2_on_axes(axes, pose, axis_length=axis_length,
|
||||||
covariance=covariance)
|
covariance=covariance)
|
||||||
|
|
||||||
|
axes.set_xlabel(axis_labels[0])
|
||||||
|
axes.set_ylabel(axis_labels[1])
|
||||||
|
axes.set_zlabel(axis_labels[2])
|
||||||
|
|
||||||
|
return fig
|
||||||
|
|
||||||
|
|
||||||
def plot_point3_on_axes(axes, point, linespec, P=None):
|
def plot_point3_on_axes(axes, point, linespec, P=None):
|
||||||
"""
|
"""
|
||||||
|
@ -167,7 +175,8 @@ def plot_point3_on_axes(axes, point, linespec, P=None):
|
||||||
plot_covariance_ellipse_3d(axes, point.vector(), P)
|
plot_covariance_ellipse_3d(axes, point.vector(), P)
|
||||||
|
|
||||||
|
|
||||||
def plot_point3(fignum, point, linespec, P=None):
|
def plot_point3(fignum, point, linespec, P=None,
|
||||||
|
axis_labels=('X axis', 'Y axis', 'Z axis')):
|
||||||
"""
|
"""
|
||||||
Plot a 3D point on given figure with given `linespec`.
|
Plot a 3D point on given figure with given `linespec`.
|
||||||
|
|
||||||
|
@ -176,13 +185,25 @@ def plot_point3(fignum, point, linespec, P=None):
|
||||||
point (gtsam.Point3): The point to be plotted.
|
point (gtsam.Point3): The point to be plotted.
|
||||||
linespec (string): String representing formatting options for Matplotlib.
|
linespec (string): String representing formatting options for Matplotlib.
|
||||||
P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
|
P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
|
||||||
|
axis_labels (iterable[string]): List of axis labels to set.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
fig: The matplotlib figure.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
fig = plt.figure(fignum)
|
fig = plt.figure(fignum)
|
||||||
axes = fig.gca(projection='3d')
|
axes = fig.gca(projection='3d')
|
||||||
plot_point3_on_axes(axes, point, linespec, P)
|
plot_point3_on_axes(axes, point, linespec, P)
|
||||||
|
|
||||||
|
axes.set_xlabel(axis_labels[0])
|
||||||
|
axes.set_ylabel(axis_labels[1])
|
||||||
|
axes.set_zlabel(axis_labels[2])
|
||||||
|
|
||||||
def plot_3d_points(fignum, values, linespec="g*", marginals=None):
|
return fig
|
||||||
|
|
||||||
|
|
||||||
|
def plot_3d_points(fignum, values, linespec="g*", marginals=None,
|
||||||
|
title="3D Points", axis_labels=('X axis', 'Y axis', 'Z axis')):
|
||||||
"""
|
"""
|
||||||
Plots the Point3s in `values`, with optional covariances.
|
Plots the Point3s in `values`, with optional covariances.
|
||||||
Finds all the Point3 objects in the given Values object and plots them.
|
Finds all the Point3 objects in the given Values object and plots them.
|
||||||
|
@ -193,7 +214,9 @@ def plot_3d_points(fignum, values, linespec="g*", marginals=None):
|
||||||
fignum (int): Integer representing the figure number to use for plotting.
|
fignum (int): Integer representing the figure number to use for plotting.
|
||||||
values (gtsam.Values): Values dictionary consisting of points to be plotted.
|
values (gtsam.Values): Values dictionary consisting of points to be plotted.
|
||||||
linespec (string): String representing formatting options for Matplotlib.
|
linespec (string): String representing formatting options for Matplotlib.
|
||||||
covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
|
marginals (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
|
||||||
|
title (string): The title of the plot.
|
||||||
|
axis_labels (iterable[string]): List of axis labels to set.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
keys = values.keys()
|
keys = values.keys()
|
||||||
|
@ -208,12 +231,16 @@ def plot_3d_points(fignum, values, linespec="g*", marginals=None):
|
||||||
else:
|
else:
|
||||||
covariance = None
|
covariance = None
|
||||||
|
|
||||||
plot_point3(fignum, point, linespec, covariance)
|
fig = plot_point3(fignum, point, linespec, covariance,
|
||||||
|
axis_labels=axis_labels)
|
||||||
|
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
continue
|
continue
|
||||||
# I guess it's not a Point3
|
# I guess it's not a Point3
|
||||||
|
|
||||||
|
fig.suptitle(title)
|
||||||
|
fig.canvas.set_window_title(title.lower())
|
||||||
|
|
||||||
|
|
||||||
def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1):
|
def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1):
|
||||||
"""
|
"""
|
||||||
|
@ -251,7 +278,8 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1):
|
||||||
plot_covariance_ellipse_3d(axes, origin, gPp)
|
plot_covariance_ellipse_3d(axes, origin, gPp)
|
||||||
|
|
||||||
|
|
||||||
def plot_pose3(fignum, pose, axis_length=0.1, P=None):
|
def plot_pose3(fignum, pose, axis_length=0.1, P=None,
|
||||||
|
axis_labels=('X axis', 'Y axis', 'Z axis')):
|
||||||
"""
|
"""
|
||||||
Plot a 3D pose on given figure with given `axis_length`.
|
Plot a 3D pose on given figure with given `axis_length`.
|
||||||
|
|
||||||
|
@ -260,6 +288,10 @@ def plot_pose3(fignum, pose, axis_length=0.1, P=None):
|
||||||
pose (gtsam.Pose3): 3D pose to be plotted.
|
pose (gtsam.Pose3): 3D pose to be plotted.
|
||||||
linespec (string): String representing formatting options for Matplotlib.
|
linespec (string): String representing formatting options for Matplotlib.
|
||||||
P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
|
P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
|
||||||
|
axis_labels (iterable[string]): List of axis labels to set.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
fig: The matplotlib figure.
|
||||||
"""
|
"""
|
||||||
# get figure object
|
# get figure object
|
||||||
fig = plt.figure(fignum)
|
fig = plt.figure(fignum)
|
||||||
|
@ -267,9 +299,15 @@ def plot_pose3(fignum, pose, axis_length=0.1, P=None):
|
||||||
plot_pose3_on_axes(axes, pose, P=P,
|
plot_pose3_on_axes(axes, pose, P=P,
|
||||||
axis_length=axis_length)
|
axis_length=axis_length)
|
||||||
|
|
||||||
|
axes.set_xlabel(axis_labels[0])
|
||||||
|
axes.set_ylabel(axis_labels[1])
|
||||||
|
axes.set_zlabel(axis_labels[2])
|
||||||
|
|
||||||
|
return fig
|
||||||
|
|
||||||
|
|
||||||
def plot_trajectory(fignum, values, scale=1, marginals=None,
|
def plot_trajectory(fignum, values, scale=1, marginals=None,
|
||||||
animation_interval=0.0):
|
title="Plot Trajectory", axis_labels=('X axis', 'Y axis', 'Z axis')):
|
||||||
"""
|
"""
|
||||||
Plot a complete 3D trajectory using poses in `values`.
|
Plot a complete 3D trajectory using poses in `values`.
|
||||||
|
|
||||||
|
@ -279,6 +317,8 @@ def plot_trajectory(fignum, values, scale=1, marginals=None,
|
||||||
scale (float): Value to scale the poses by.
|
scale (float): Value to scale the poses by.
|
||||||
marginals (gtsam.Marginals): Marginalized probability values of the estimation.
|
marginals (gtsam.Marginals): Marginalized probability values of the estimation.
|
||||||
Used to plot uncertainty bounds.
|
Used to plot uncertainty bounds.
|
||||||
|
title (string): The title of the plot.
|
||||||
|
axis_labels (iterable[string]): List of axis labels to set.
|
||||||
"""
|
"""
|
||||||
pose3Values = gtsam.utilities_allPose3s(values)
|
pose3Values = gtsam.utilities_allPose3s(values)
|
||||||
keys = gtsam.KeyVector(pose3Values.keys())
|
keys = gtsam.KeyVector(pose3Values.keys())
|
||||||
|
@ -304,8 +344,8 @@ def plot_trajectory(fignum, values, scale=1, marginals=None,
|
||||||
else:
|
else:
|
||||||
covariance = None
|
covariance = None
|
||||||
|
|
||||||
plot_pose3(fignum, lastPose, P=covariance,
|
fig = plot_pose3(fignum, lastPose, P=covariance,
|
||||||
axis_length=scale)
|
axis_length=scale, axis_labels=axis_labels)
|
||||||
|
|
||||||
lastIndex = i
|
lastIndex = i
|
||||||
|
|
||||||
|
@ -319,12 +359,15 @@ def plot_trajectory(fignum, values, scale=1, marginals=None,
|
||||||
else:
|
else:
|
||||||
covariance = None
|
covariance = None
|
||||||
|
|
||||||
plot_pose3(fignum, lastPose, P=covariance,
|
fig = plot_pose3(fignum, lastPose, P=covariance,
|
||||||
axis_length=scale)
|
axis_length=scale, axis_labels=axis_labels)
|
||||||
|
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
fig.suptitle(title)
|
||||||
|
fig.canvas.set_window_title(title.lower())
|
||||||
|
|
||||||
|
|
||||||
def plot_incremental_trajectory(fignum, values, start=0,
|
def plot_incremental_trajectory(fignum, values, start=0,
|
||||||
scale=1, marginals=None,
|
scale=1, marginals=None,
|
||||||
|
|
|
@ -4,11 +4,11 @@ include(GtsamCythonWrap)
|
||||||
# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
|
# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
|
||||||
# and eigency's cython pxd headers can be found when cythonizing gtsam
|
# and eigency's cython pxd headers can be found when cythonizing gtsam
|
||||||
file(COPY "." DESTINATION ".")
|
file(COPY "." DESTINATION ".")
|
||||||
set(OUTPUT_DIR "${PROJECT_BINARY_DIR}/cython/gtsam_eigency")
|
set(OUTPUT_DIR "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency")
|
||||||
set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
|
set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
|
||||||
|
|
||||||
# This is to make the build/cython/gtsam_eigency folder a python package
|
# This is to make the build/cython/gtsam_eigency folder a python package
|
||||||
configure_file(__init__.py.in ${PROJECT_BINARY_DIR}/cython/gtsam_eigency/__init__.py)
|
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
|
||||||
|
|
||||||
# include eigency headers
|
# include eigency headers
|
||||||
include_directories(${EIGENCY_INCLUDE_DIR})
|
include_directories(${EIGENCY_INCLUDE_DIR})
|
||||||
|
@ -16,8 +16,8 @@ include_directories(${EIGENCY_INCLUDE_DIR})
|
||||||
# Cythonize and build eigency
|
# Cythonize and build eigency
|
||||||
message(STATUS "Cythonize and build eigency")
|
message(STATUS "Cythonize and build eigency")
|
||||||
# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
|
# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
|
||||||
# a part of the gtsam_eigency package and generate the function call import_gtsam_igency__conversions()
|
# a part of the gtsam_eigency package and generate the function call import_gtsam_eigency__conversions()
|
||||||
# in conversions_api.h correctly!!!
|
# in conversions_api.h correctly!
|
||||||
cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
|
cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
|
||||||
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
||||||
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
|
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
|
||||||
|
@ -37,13 +37,6 @@ add_dependencies(cythonize_eigency_core cythonize_eigency_conversions)
|
||||||
add_custom_target(cythonize_eigency)
|
add_custom_target(cythonize_eigency)
|
||||||
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
|
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
|
||||||
|
|
||||||
# install
|
if(TARGET ${python_install_target})
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
add_dependencies(${python_install_target} cythonize_eigency)
|
||||||
DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}"
|
endif()
|
||||||
PATTERN "CMakeLists.txt" EXCLUDE
|
|
||||||
PATTERN "__init__.py.in" EXCLUDE)
|
|
||||||
install(TARGETS cythonize_eigency_core cythonize_eigency_conversions
|
|
||||||
DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency")
|
|
||||||
install(FILES ${OUTPUT_DIR}/conversions_api.h DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
|
|
||||||
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
|
|
||||||
install(FILES ${OUTPUT_DIR}/__init__.py DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
|
|
||||||
|
|
|
@ -7,6 +7,22 @@ except ImportError:
|
||||||
|
|
||||||
packages = find_packages()
|
packages = find_packages()
|
||||||
|
|
||||||
|
package_data = {
|
||||||
|
package:
|
||||||
|
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')]
|
||||||
|
for package in packages
|
||||||
|
}
|
||||||
|
|
||||||
|
cython_install_requirements = open("${CYTHON_INSTALL_REQUIREMENTS_FILE}").readlines()
|
||||||
|
|
||||||
|
install_requires = [line.strip() \
|
||||||
|
for line in cython_install_requirements \
|
||||||
|
if len(line.strip()) > 0 and not line.strip().startswith('#')
|
||||||
|
]
|
||||||
|
|
||||||
|
# Cleaner to read in the contents rather than copy them over.
|
||||||
|
readme_contents = open("${PROJECT_SOURCE_DIR}/README.md").read()
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='gtsam',
|
name='gtsam',
|
||||||
description='Georgia Tech Smoothing And Mapping library',
|
description='Georgia Tech Smoothing And Mapping library',
|
||||||
|
@ -16,7 +32,7 @@ setup(
|
||||||
author_email='frank.dellaert@gtsam.org',
|
author_email='frank.dellaert@gtsam.org',
|
||||||
license='Simplified BSD license',
|
license='Simplified BSD license',
|
||||||
keywords='slam sam robotics localization mapping optimization',
|
keywords='slam sam robotics localization mapping optimization',
|
||||||
long_description='''${README_CONTENTS}''',
|
long_description=readme_contents,
|
||||||
long_description_content_type='text/markdown',
|
long_description_content_type='text/markdown',
|
||||||
python_requires='>=2.7',
|
python_requires='>=2.7',
|
||||||
# https://pypi.org/pypi?%3Aaction=list_classifiers
|
# https://pypi.org/pypi?%3Aaction=list_classifiers
|
||||||
|
@ -34,11 +50,6 @@ setup(
|
||||||
],
|
],
|
||||||
|
|
||||||
packages=packages,
|
packages=packages,
|
||||||
package_data={package:
|
package_data=package_data,
|
||||||
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')]
|
install_requires=install_requires
|
||||||
for package in packages
|
|
||||||
},
|
|
||||||
install_requires=[line.strip() for line in '''
|
|
||||||
${CYTHON_INSTALL_REQUIREMENTS}
|
|
||||||
'''.splitlines() if len(line.strip()) > 0 and not line.strip().startswith('#')]
|
|
||||||
)
|
)
|
||||||
|
|
Binary file not shown.
|
@ -0,0 +1,21 @@
|
||||||
|
# Instructions
|
||||||
|
|
||||||
|
Build all docker images, in order:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
(cd ubuntu-boost-tbb && ./build.sh)
|
||||||
|
(cd ubuntu-gtsam && ./build.sh)
|
||||||
|
(cd ubuntu-gtsam-python && ./build.sh)
|
||||||
|
(cd ubuntu-gtsam-python-vnc && ./build.sh)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then launch with:
|
||||||
|
|
||||||
|
docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic
|
||||||
|
|
||||||
|
Then open a remote VNC X client, for example:
|
||||||
|
|
||||||
|
sudo apt-get install tigervnc-viewer
|
||||||
|
xtigervncviewer :5900
|
||||||
|
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Get the base Ubuntu image from Docker Hub
|
|
||||||
FROM ubuntu:bionic
|
|
||||||
|
|
||||||
# Update apps on the base image
|
|
||||||
RUN apt-get -y update && apt-get install -y
|
|
||||||
|
|
||||||
# Install C++
|
|
||||||
RUN apt-get -y install build-essential
|
|
||||||
|
|
||||||
# Install boost and cmake
|
|
||||||
RUN apt-get -y install libboost-all-dev cmake
|
|
||||||
|
|
||||||
# Install TBB
|
|
||||||
RUN apt-get -y install libtbb-dev
|
|
||||||
|
|
||||||
# Install latest Eigen
|
|
||||||
RUN apt-get install -y libeigen3-dev
|
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
# Basic Ubuntu 18.04 image with Boost and TBB installed. To be used for building further downstream packages.
|
||||||
|
|
||||||
|
# Get the base Ubuntu image from Docker Hub
|
||||||
|
FROM ubuntu:bionic
|
||||||
|
|
||||||
|
# Disable GUI prompts
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
|
||||||
|
# Update apps on the base image
|
||||||
|
RUN apt-get -y update && apt-get -y install
|
||||||
|
|
||||||
|
# Install C++
|
||||||
|
RUN apt-get -y install build-essential apt-utils
|
||||||
|
|
||||||
|
# Install boost and cmake
|
||||||
|
RUN apt-get -y install libboost-all-dev cmake
|
||||||
|
|
||||||
|
# Install TBB
|
||||||
|
RUN apt-get -y install libtbb-dev
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic .
|
|
@ -0,0 +1,20 @@
|
||||||
|
# This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction.
|
||||||
|
|
||||||
|
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||||
|
FROM dellaert/ubuntu-gtsam-python:bionic
|
||||||
|
|
||||||
|
# Things needed to get a python GUI
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
RUN apt install -y python-tk
|
||||||
|
RUN python3 -m pip install matplotlib
|
||||||
|
|
||||||
|
# Install a VNC X-server, Frame buffer, and windows manager
|
||||||
|
RUN apt install -y x11vnc xvfb fluxbox
|
||||||
|
|
||||||
|
# Finally, install wmctrl needed for bootstrap script
|
||||||
|
RUN apt install -y wmctrl
|
||||||
|
|
||||||
|
# Copy bootstrap script and make sure it runs
|
||||||
|
COPY bootstrap.sh /
|
||||||
|
|
||||||
|
CMD '/bootstrap.sh'
|
|
@ -0,0 +1,111 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Based on: http://www.richud.com/wiki/Ubuntu_Fluxbox_GUI_with_x11vnc_and_Xvfb
|
||||||
|
|
||||||
|
main() {
|
||||||
|
log_i "Starting xvfb virtual display..."
|
||||||
|
launch_xvfb
|
||||||
|
log_i "Starting window manager..."
|
||||||
|
launch_window_manager
|
||||||
|
log_i "Starting VNC server..."
|
||||||
|
run_vnc_server
|
||||||
|
}
|
||||||
|
|
||||||
|
launch_xvfb() {
|
||||||
|
local xvfbLockFilePath="/tmp/.X1-lock"
|
||||||
|
if [ -f "${xvfbLockFilePath}" ]
|
||||||
|
then
|
||||||
|
log_i "Removing xvfb lock file '${xvfbLockFilePath}'..."
|
||||||
|
if ! rm -v "${xvfbLockFilePath}"
|
||||||
|
then
|
||||||
|
log_e "Failed to remove xvfb lock file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set defaults if the user did not specify envs.
|
||||||
|
export DISPLAY=${XVFB_DISPLAY:-:1}
|
||||||
|
local screen=${XVFB_SCREEN:-0}
|
||||||
|
local resolution=${XVFB_RESOLUTION:-1280x960x24}
|
||||||
|
local timeout=${XVFB_TIMEOUT:-5}
|
||||||
|
|
||||||
|
# Start and wait for either Xvfb to be fully up or we hit the timeout.
|
||||||
|
Xvfb ${DISPLAY} -screen ${screen} ${resolution} &
|
||||||
|
local loopCount=0
|
||||||
|
until xdpyinfo -display ${DISPLAY} > /dev/null 2>&1
|
||||||
|
do
|
||||||
|
loopCount=$((loopCount+1))
|
||||||
|
sleep 1
|
||||||
|
if [ ${loopCount} -gt ${timeout} ]
|
||||||
|
then
|
||||||
|
log_e "xvfb failed to start"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
launch_window_manager() {
|
||||||
|
local timeout=${XVFB_TIMEOUT:-5}
|
||||||
|
|
||||||
|
# Start and wait for either fluxbox to be fully up or we hit the timeout.
|
||||||
|
fluxbox &
|
||||||
|
local loopCount=0
|
||||||
|
until wmctrl -m > /dev/null 2>&1
|
||||||
|
do
|
||||||
|
loopCount=$((loopCount+1))
|
||||||
|
sleep 1
|
||||||
|
if [ ${loopCount} -gt ${timeout} ]
|
||||||
|
then
|
||||||
|
log_e "fluxbox failed to start"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
run_vnc_server() {
|
||||||
|
local passwordArgument='-nopw'
|
||||||
|
|
||||||
|
if [ -n "${VNC_SERVER_PASSWORD}" ]
|
||||||
|
then
|
||||||
|
local passwordFilePath="${HOME}/.x11vnc.pass"
|
||||||
|
if ! x11vnc -storepasswd "${VNC_SERVER_PASSWORD}" "${passwordFilePath}"
|
||||||
|
then
|
||||||
|
log_e "Failed to store x11vnc password"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
passwordArgument=-"-rfbauth ${passwordFilePath}"
|
||||||
|
log_i "The VNC server will ask for a password"
|
||||||
|
else
|
||||||
|
log_w "The VNC server will NOT ask for a password"
|
||||||
|
fi
|
||||||
|
|
||||||
|
x11vnc -ncache 10 -ncache_cr -display ${DISPLAY} -forever ${passwordArgument} &
|
||||||
|
wait $!
|
||||||
|
}
|
||||||
|
|
||||||
|
log_i() {
|
||||||
|
log "[INFO] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_w() {
|
||||||
|
log "[WARN] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_e() {
|
||||||
|
log "[ERROR] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log() {
|
||||||
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
control_c() {
|
||||||
|
echo ""
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
trap control_c SIGINT SIGTERM SIGHUP
|
||||||
|
|
||||||
|
main
|
||||||
|
|
||||||
|
exit
|
|
@ -0,0 +1,4 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
# Needs to be run in docker/ubuntu-gtsam-python-vnc directory
|
||||||
|
docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic .
|
|
@ -0,0 +1,5 @@
|
||||||
|
# After running this script, connect VNC client to 0.0.0.0:5900
|
||||||
|
docker run -it \
|
||||||
|
--workdir="/usr/src/gtsam" \
|
||||||
|
-p 5900:5900 \
|
||||||
|
dellaert/ubuntu-gtsam-python-vnc:bionic
|
|
@ -0,0 +1,31 @@
|
||||||
|
# GTSAM Ubuntu image with Python wrapper support.
|
||||||
|
|
||||||
|
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||||
|
FROM dellaert/ubuntu-gtsam:bionic
|
||||||
|
|
||||||
|
# Install pip
|
||||||
|
RUN apt-get install -y python3-pip python3-dev
|
||||||
|
|
||||||
|
# Install python wrapper requirements
|
||||||
|
RUN python3 -m pip install -U -r /usr/src/gtsam/cython/requirements.txt
|
||||||
|
|
||||||
|
# Run cmake again, now with cython toolbox on
|
||||||
|
WORKDIR /usr/src/gtsam/build
|
||||||
|
RUN cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGTSAM_WITH_EIGEN_MKL=OFF \
|
||||||
|
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TESTS=OFF \
|
||||||
|
-DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \
|
||||||
|
-DGTSAM_PYTHON_VERSION=3\
|
||||||
|
..
|
||||||
|
|
||||||
|
# Build again, as ubuntu-gtsam image cleaned
|
||||||
|
RUN make -j4 install && make clean
|
||||||
|
|
||||||
|
# Needed to run python wrapper:
|
||||||
|
RUN echo 'export PYTHONPATH=/usr/local/cython/:$PYTHONPATH' >> /root/.bashrc
|
||||||
|
|
||||||
|
# Run bash
|
||||||
|
CMD ["bash"]
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic .
|
|
@ -0,0 +1,36 @@
|
||||||
|
# Ubuntu image with GTSAM installed. Configured with Boost and TBB support.
|
||||||
|
|
||||||
|
# Get the base Ubuntu image from Docker Hub
|
||||||
|
FROM dellaert/ubuntu-boost-tbb:bionic
|
||||||
|
|
||||||
|
# Install git
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y git
|
||||||
|
|
||||||
|
# Install compiler
|
||||||
|
RUN apt-get install -y build-essential
|
||||||
|
|
||||||
|
# Clone GTSAM (develop branch)
|
||||||
|
WORKDIR /usr/src/
|
||||||
|
RUN git clone --single-branch --branch develop https://github.com/borglab/gtsam.git
|
||||||
|
|
||||||
|
# Change to build directory. Will be created automatically.
|
||||||
|
WORKDIR /usr/src/gtsam/build
|
||||||
|
# Run cmake
|
||||||
|
RUN cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGTSAM_WITH_EIGEN_MKL=OFF \
|
||||||
|
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TESTS=OFF \
|
||||||
|
-DGTSAM_INSTALL_CYTHON_TOOLBOX=OFF \
|
||||||
|
..
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN make -j4 install && make clean
|
||||||
|
|
||||||
|
# Needed to link with GTSAM
|
||||||
|
RUN echo 'export LD_LIBRARY_PATH=/usr/local/lib:LD_LIBRARY_PATH' >> /root/.bashrc
|
||||||
|
|
||||||
|
# Run bash
|
||||||
|
CMD ["bash"]
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
docker build --no-cache -t dellaert/ubuntu-gtsam:bionic .
|
|
@ -1,7 +1,4 @@
|
||||||
set (excluded_examples
|
set (excluded_examples
|
||||||
DiscreteBayesNet_FG.cpp
|
|
||||||
UGM_chain.cpp
|
|
||||||
UGM_small.cpp
|
|
||||||
elaboratePoint2KalmanFilter.cpp
|
elaboratePoint2KalmanFilter.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -46,8 +46,8 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// evaluate the error
|
/// evaluate the error
|
||||||
virtual Vector evaluateError(const Pose3& pose, boost::optional<Matrix&> H =
|
Vector evaluateError(const Pose3& pose, boost::optional<Matrix&> H =
|
||||||
boost::none) const {
|
boost::none) const override {
|
||||||
PinholeCamera<Cal3_S2> camera(pose, *K_);
|
PinholeCamera<Cal3_S2> camera(pose, *K_);
|
||||||
return camera.project(P_, H, boost::none, boost::none) - p_;
|
return camera.project(P_, H, boost::none, boost::none) - p_;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109
|
||||||
|
VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809
|
||||||
|
VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403
|
||||||
|
EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
|
||||||
|
EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
|
||||||
|
EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
|
|
@ -0,0 +1,16 @@
|
||||||
|
VERTEX_SE3:QUAT 8646911284551352320 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162
|
||||||
|
VERTEX_SE3:QUAT 8646911284551352321 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508
|
||||||
|
VERTEX_SE3:QUAT 8646911284551352322 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10
|
||||||
|
VERTEX_SE3:QUAT 8646911284551352323 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508
|
||||||
|
VERTEX_SE3:QUAT 8646911284551352324 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162
|
||||||
|
VERTEX_SE3:QUAT 8646911284551352325 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567
|
||||||
|
VERTEX_SE3:QUAT 8646911284551352326 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412
|
||||||
|
VERTEX_SE3:QUAT 8646911284551352327 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567
|
||||||
|
VERTEX_TRACKXYZ 7782220156096217088 10 10 10
|
||||||
|
VERTEX_TRACKXYZ 7782220156096217089 -10 10 10
|
||||||
|
VERTEX_TRACKXYZ 7782220156096217090 -10 -10 10
|
||||||
|
VERTEX_TRACKXYZ 7782220156096217091 10 -10 10
|
||||||
|
VERTEX_TRACKXYZ 7782220156096217092 10 10 -10
|
||||||
|
VERTEX_TRACKXYZ 7782220156096217093 -10 10 -10
|
||||||
|
VERTEX_TRACKXYZ 7782220156096217094 -10 -10 -10
|
||||||
|
VERTEX_TRACKXYZ 7782220156096217095 10 -10 -10
|
|
@ -0,0 +1,11 @@
|
||||||
|
VERTEX_SE3:QUAT 0 0 0 0 0 0 0 1
|
||||||
|
VERTEX_SE3:QUAT 1 0 0 0 0 0 0 1
|
||||||
|
VERTEX_SE3:QUAT 2 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
|
||||||
|
VERTEX_SE3:QUAT 3 0 0 0 -0.00499994 -0.00499994 -0.00499994 0.999963
|
||||||
|
VERTEX_SE3:QUAT 4 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
|
||||||
|
EDGE_SE3:QUAT 1 2 1 2 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 2 3 -3.26795e-07 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 3 4 1 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 3 1 6.9282e-07 2 0 0 0 1 1.73205e-07 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 1 4 -1 1 0 0 0 -0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 0 1 0 0 0 0 0 0 1 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
|
@ -0,0 +1,83 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file DiscreteBayesNetExample.cpp
|
||||||
|
* @brief Discrete Bayes Net example with famous Asia Bayes Network
|
||||||
|
* @author Frank Dellaert
|
||||||
|
* @date JULY 10, 2020
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
|
||||||
|
#include <iomanip>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
int main(int argc, char **argv) {
|
||||||
|
DiscreteBayesNet asia;
|
||||||
|
DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2),
|
||||||
|
Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
|
||||||
|
asia.add(Asia % "99/1");
|
||||||
|
asia.add(Smoking % "50/50");
|
||||||
|
|
||||||
|
asia.add(Tuberculosis | Asia = "99/1 95/5");
|
||||||
|
asia.add(LungCancer | Smoking = "99/1 90/10");
|
||||||
|
asia.add(Bronchitis | Smoking = "70/30 40/60");
|
||||||
|
|
||||||
|
asia.add((Either | Tuberculosis, LungCancer) = "F T T T");
|
||||||
|
|
||||||
|
asia.add(XRay | Either = "95/5 2/98");
|
||||||
|
asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9");
|
||||||
|
|
||||||
|
// print
|
||||||
|
vector<string> pretty = {"Asia", "Dyspnea", "XRay", "Tuberculosis",
|
||||||
|
"Smoking", "Either", "LungCancer", "Bronchitis"};
|
||||||
|
auto formatter = [pretty](Key key) { return pretty[key]; };
|
||||||
|
asia.print("Asia", formatter);
|
||||||
|
|
||||||
|
// Convert to factor graph
|
||||||
|
DiscreteFactorGraph fg(asia);
|
||||||
|
|
||||||
|
// Create solver and eliminate
|
||||||
|
Ordering ordering;
|
||||||
|
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
|
||||||
|
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||||
|
|
||||||
|
// solve
|
||||||
|
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||||
|
GTSAM_PRINT(*mpe);
|
||||||
|
|
||||||
|
// We can also build a Bayes tree (directed junction tree).
|
||||||
|
// The elimination order above will do fine:
|
||||||
|
auto bayesTree = fg.eliminateMultifrontal(ordering);
|
||||||
|
bayesTree->print("bayesTree", formatter);
|
||||||
|
|
||||||
|
// add evidence, we were in Asia and we have dyspnea
|
||||||
|
fg.add(Asia, "0 1");
|
||||||
|
fg.add(Dyspnea, "0 1");
|
||||||
|
|
||||||
|
// solve again, now with evidence
|
||||||
|
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||||
|
DiscreteFactor::sharedValues mpe2 = chordal2->optimize();
|
||||||
|
GTSAM_PRINT(*mpe2);
|
||||||
|
|
||||||
|
// We can also sample from it
|
||||||
|
cout << "\n10 samples:" << endl;
|
||||||
|
for (size_t i = 0; i < 10; i++) {
|
||||||
|
DiscreteFactor::sharedValues sample = chordal2->sample();
|
||||||
|
GTSAM_PRINT(*sample);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -15,105 +15,106 @@
|
||||||
* @author Abhijit
|
* @author Abhijit
|
||||||
* @date Jun 4, 2012
|
* @date Jun 4, 2012
|
||||||
*
|
*
|
||||||
* We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, p529]
|
* We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009,
|
||||||
* You may be familiar with other graphical model packages like BNT (available
|
* p529] You may be familiar with other graphical model packages like BNT
|
||||||
* at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this is used as an
|
* (available at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this
|
||||||
* example. The following demo is same as that in the above link, except that
|
* is used as an example. The following demo is same as that in the above link,
|
||||||
* everything is using GTSAM.
|
* except that everything is using GTSAM.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteSequentialSolver.h>
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
int main(int argc, char **argv) {
|
||||||
|
// Define keys and a print function
|
||||||
|
Key C(1), S(2), R(3), W(4);
|
||||||
|
auto print = [=](DiscreteFactor::sharedValues values) {
|
||||||
|
cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C])
|
||||||
|
<< " Sprinkler = " << static_cast<bool>((*values)[S])
|
||||||
|
<< " Rain = " << boolalpha << static_cast<bool>((*values)[R])
|
||||||
|
<< " WetGrass = " << static_cast<bool>((*values)[W]) << endl;
|
||||||
|
};
|
||||||
|
|
||||||
// We assume binary state variables
|
// We assume binary state variables
|
||||||
// we have 0 == "False" and 1 == "True"
|
// we have 0 == "False" and 1 == "True"
|
||||||
const size_t nrStates = 2;
|
const size_t nrStates = 2;
|
||||||
|
|
||||||
// define variables
|
// define variables
|
||||||
DiscreteKey Cloudy(1, nrStates), Sprinkler(2, nrStates), Rain(3, nrStates),
|
DiscreteKey Cloudy(C, nrStates), Sprinkler(S, nrStates), Rain(R, nrStates),
|
||||||
WetGrass(4, nrStates);
|
WetGrass(W, nrStates);
|
||||||
|
|
||||||
// create Factor Graph of the bayes net
|
// create Factor Graph of the bayes net
|
||||||
DiscreteFactorGraph graph;
|
DiscreteFactorGraph graph;
|
||||||
|
|
||||||
// add factors
|
// add factors
|
||||||
graph.add(Cloudy, "0.5 0.5"); //P(Cloudy)
|
graph.add(Cloudy, "0.5 0.5"); // P(Cloudy)
|
||||||
graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); //P(Sprinkler | Cloudy)
|
graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); // P(Sprinkler | Cloudy)
|
||||||
graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); //P(Rain | Cloudy)
|
graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); // P(Rain | Cloudy)
|
||||||
graph.add(Sprinkler & Rain & WetGrass,
|
graph.add(Sprinkler & Rain & WetGrass,
|
||||||
"1 0 0.1 0.9 0.1 0.9 0.001 0.99"); //P(WetGrass | Sprinkler, Rain)
|
"1 0 0.1 0.9 0.1 0.9 0.001 0.99"); // P(WetGrass | Sprinkler, Rain)
|
||||||
|
|
||||||
// Alternatively we can also create a DiscreteBayesNet, add DiscreteConditional
|
// Alternatively we can also create a DiscreteBayesNet, add
|
||||||
// factors and create a FactorGraph from it. (See testDiscreteBayesNet.cpp)
|
// DiscreteConditional factors and create a FactorGraph from it. (See
|
||||||
|
// testDiscreteBayesNet.cpp)
|
||||||
|
|
||||||
// Since this is a relatively small distribution, we can as well print
|
// Since this is a relatively small distribution, we can as well print
|
||||||
// the whole distribution..
|
// the whole distribution..
|
||||||
cout << "Distribution of Example: " << endl;
|
cout << "Distribution of Example: " << endl;
|
||||||
cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10)
|
cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10)
|
||||||
<< "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
|
<< "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
|
||||||
<< endl;
|
<< endl;
|
||||||
for (size_t a = 0; a < nrStates; a++)
|
for (size_t a = 0; a < nrStates; a++)
|
||||||
for (size_t m = 0; m < nrStates; m++)
|
for (size_t m = 0; m < nrStates; m++)
|
||||||
for (size_t h = 0; h < nrStates; h++)
|
for (size_t h = 0; h < nrStates; h++)
|
||||||
for (size_t c = 0; c < nrStates; c++) {
|
for (size_t c = 0; c < nrStates; c++) {
|
||||||
DiscreteFactor::Values values;
|
DiscreteFactor::Values values;
|
||||||
values[Cloudy.first] = c;
|
values[C] = c;
|
||||||
values[Sprinkler.first] = h;
|
values[S] = h;
|
||||||
values[Rain.first] = m;
|
values[R] = m;
|
||||||
values[WetGrass.first] = a;
|
values[W] = a;
|
||||||
double prodPot = graph(values);
|
double prodPot = graph(values);
|
||||||
cout << boolalpha << setw(8) << (bool) c << setw(14)
|
cout << setw(8) << static_cast<bool>(c) << setw(14)
|
||||||
<< (bool) h << setw(12) << (bool) m << setw(13)
|
<< static_cast<bool>(h) << setw(12) << static_cast<bool>(m)
|
||||||
<< (bool) a << setw(16) << prodPot << endl;
|
<< setw(13) << static_cast<bool>(a) << setw(16) << prodPot
|
||||||
|
<< endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// "Most Probable Explanation", i.e., configuration with largest value
|
// "Most Probable Explanation", i.e., configuration with largest value
|
||||||
DiscreteSequentialSolver solver(graph);
|
DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize();
|
||||||
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
|
cout << "\nMost Probable Explanation (MPE):" << endl;
|
||||||
cout <<"\nMost Probable Explanation (MPE):" << endl;
|
print(mpe);
|
||||||
cout << boolalpha << "Cloudy = " << (bool)(*optimalDecoding)[Cloudy.first]
|
|
||||||
<< " Sprinkler = " << (bool)(*optimalDecoding)[Sprinkler.first]
|
|
||||||
<< " Rain = " << boolalpha << (bool)(*optimalDecoding)[Rain.first]
|
|
||||||
<< " WetGrass = " << (bool)(*optimalDecoding)[WetGrass.first]<< endl;
|
|
||||||
|
|
||||||
|
// "Inference" We show an inference query like: probability that the Sprinkler
|
||||||
|
// was on; given that the grass is wet i.e. P( S | C=0) = ?
|
||||||
|
|
||||||
// "Inference" We show an inference query like: probability that the Sprinkler was on;
|
// add evidence that it is not Cloudy
|
||||||
// given that the grass is wet i.e. P( S | W=1) =?
|
graph.add(Cloudy, "1 0");
|
||||||
cout << "\nInference Query: Probability of Sprinkler being on given Grass is Wet" << endl;
|
|
||||||
|
|
||||||
// Method 1: we can compute the joint marginal P(S,W) and from that we can compute
|
// solve again, now with evidence
|
||||||
// P(S | W=1) = P(S,W=1)/P(W=1) We do this in following three steps..
|
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||||
|
DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize();
|
||||||
|
|
||||||
//Step1: Compute P(S,W)
|
cout << "\nMPE given C=0:" << endl;
|
||||||
DiscreteFactorGraph jointFG;
|
print(mpe_with_evidence);
|
||||||
jointFG = *solver.jointFactorGraph(DiscreteKeys(Sprinkler & WetGrass).indices());
|
|
||||||
DecisionTreeFactor probSW = jointFG.product();
|
|
||||||
|
|
||||||
//Step2: Compute P(W)
|
// we can also calculate arbitrary marginals:
|
||||||
DiscreteFactor::shared_ptr probW = solver.marginalFactor(WetGrass.first);
|
DiscreteMarginals marginals(graph);
|
||||||
|
cout << "\nP(S=1|C=0):" << marginals.marginalProbabilities(Sprinkler)[1]
|
||||||
//Step3: Computer P(S | W=1) = P(S,W=1)/P(W=1)
|
<< endl;
|
||||||
DiscreteFactor::Values values;
|
cout << "\nP(R=0|C=0):" << marginals.marginalProbabilities(Rain)[0] << endl;
|
||||||
values[WetGrass.first] = 1;
|
cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1]
|
||||||
|
<< endl;
|
||||||
//print P(S=0|W=1)
|
|
||||||
values[Sprinkler.first] = 0;
|
|
||||||
cout << "P(S=0|W=1) = " << probSW(values)/(*probW)(values) << endl;
|
|
||||||
|
|
||||||
//print P(S=1|W=1)
|
|
||||||
values[Sprinkler.first] = 1;
|
|
||||||
cout << "P(S=1|W=1) = " << probSW(values)/(*probW)(values) << endl;
|
|
||||||
|
|
||||||
// TODO: Method 2 : One way is to modify the factor graph to
|
|
||||||
// incorporate the evidence node and compute the marginal
|
|
||||||
// TODO: graph.addEvidence(Cloudy,0);
|
|
||||||
|
|
||||||
|
// We can also sample from it
|
||||||
|
cout << "\n10 samples:" << endl;
|
||||||
|
for (size_t i = 0; i < 10; i++) {
|
||||||
|
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||||
|
print(sample);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file DiscreteBayesNetExample.cpp
|
||||||
|
* @brief Hidden Markov Model example, discrete.
|
||||||
|
* @author Frank Dellaert
|
||||||
|
* @date July 12, 2020
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
|
||||||
|
#include <iomanip>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
int main(int argc, char **argv) {
|
||||||
|
const int nrNodes = 4;
|
||||||
|
const size_t nrStates = 3;
|
||||||
|
|
||||||
|
// Define variables as well as ordering
|
||||||
|
Ordering ordering;
|
||||||
|
vector<DiscreteKey> keys;
|
||||||
|
for (int k = 0; k < nrNodes; k++) {
|
||||||
|
DiscreteKey key_i(k, nrStates);
|
||||||
|
keys.push_back(key_i);
|
||||||
|
ordering.emplace_back(k);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create HMM as a DiscreteBayesNet
|
||||||
|
DiscreteBayesNet hmm;
|
||||||
|
|
||||||
|
// Define backbone
|
||||||
|
const string transition = "8/1/1 1/8/1 1/1/8";
|
||||||
|
for (int k = 1; k < nrNodes; k++) {
|
||||||
|
hmm.add(keys[k] | keys[k - 1] = transition);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add some measurements, not needed for all time steps!
|
||||||
|
hmm.add(keys[0] % "7/2/1");
|
||||||
|
hmm.add(keys[1] % "1/9/0");
|
||||||
|
hmm.add(keys.back() % "5/4/1");
|
||||||
|
|
||||||
|
// print
|
||||||
|
hmm.print("HMM");
|
||||||
|
|
||||||
|
// Convert to factor graph
|
||||||
|
DiscreteFactorGraph factorGraph(hmm);
|
||||||
|
|
||||||
|
// Create solver and eliminate
|
||||||
|
// This will create a DAG ordered with arrow of time reversed
|
||||||
|
DiscreteBayesNet::shared_ptr chordal =
|
||||||
|
factorGraph.eliminateSequential(ordering);
|
||||||
|
chordal->print("Eliminated");
|
||||||
|
|
||||||
|
// solve
|
||||||
|
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||||
|
GTSAM_PRINT(*mpe);
|
||||||
|
|
||||||
|
// We can also sample from it
|
||||||
|
cout << "\n10 samples:" << endl;
|
||||||
|
for (size_t k = 0; k < 10; k++) {
|
||||||
|
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||||
|
GTSAM_PRINT(*sample);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or compute the marginals. This re-eliminates the FG into a Bayes tree
|
||||||
|
cout << "\nComputing Node Marginals .." << endl;
|
||||||
|
DiscreteMarginals marginals(factorGraph);
|
||||||
|
for (int k = 0; k < nrNodes; k++) {
|
||||||
|
Vector margProbs = marginals.marginalProbabilities(keys[k]);
|
||||||
|
stringstream ss;
|
||||||
|
ss << "marginal " << k;
|
||||||
|
print(margProbs, ss.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(frank): put in the glue to have DiscreteMarginals produce *arbitrary*
|
||||||
|
// joints efficiently, by the Bayes tree shortcut magic. All the code is there
|
||||||
|
// but it's not yet connected.
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -0,0 +1,359 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file IMUKittiExampleGPS
|
||||||
|
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
|
||||||
|
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics
|
||||||
|
*/
|
||||||
|
|
||||||
|
// GTSAM related includes.
|
||||||
|
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||||
|
#include <gtsam/navigation/GPSFactor.h>
|
||||||
|
#include <gtsam/navigation/ImuFactor.h>
|
||||||
|
#include <gtsam/slam/dataset.h>
|
||||||
|
#include <gtsam/slam/BetweenFactor.h>
|
||||||
|
#include <gtsam/slam/PriorFactor.h>
|
||||||
|
#include <gtsam/nonlinear/ISAM2.h>
|
||||||
|
#include <gtsam/nonlinear/ISAM2Params.h>
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
#include <gtsam/inference/Symbol.h>
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
#include <fstream>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||||
|
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||||
|
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
|
||||||
|
|
||||||
|
struct KittiCalibration {
|
||||||
|
double body_ptx;
|
||||||
|
double body_pty;
|
||||||
|
double body_ptz;
|
||||||
|
double body_prx;
|
||||||
|
double body_pry;
|
||||||
|
double body_prz;
|
||||||
|
double accelerometer_sigma;
|
||||||
|
double gyroscope_sigma;
|
||||||
|
double integration_sigma;
|
||||||
|
double accelerometer_bias_sigma;
|
||||||
|
double gyroscope_bias_sigma;
|
||||||
|
double average_delta_t;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ImuMeasurement {
|
||||||
|
double time;
|
||||||
|
double dt;
|
||||||
|
Vector3 accelerometer;
|
||||||
|
Vector3 gyroscope; // omega
|
||||||
|
};
|
||||||
|
|
||||||
|
struct GpsMeasurement {
|
||||||
|
double time;
|
||||||
|
Vector3 position; // x,y,z
|
||||||
|
};
|
||||||
|
|
||||||
|
const string output_filename = "IMUKittiExampleGPSResults.csv";
|
||||||
|
|
||||||
|
void loadKittiData(KittiCalibration& kitti_calibration,
|
||||||
|
vector<ImuMeasurement>& imu_measurements,
|
||||||
|
vector<GpsMeasurement>& gps_measurements) {
|
||||||
|
string line;
|
||||||
|
|
||||||
|
// Read IMU metadata and compute relative sensor pose transforms
|
||||||
|
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
|
||||||
|
// AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
|
||||||
|
string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||||
|
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||||
|
|
||||||
|
printf("-- Reading sensor metadata\n");
|
||||||
|
|
||||||
|
getline(imu_metadata, line, '\n'); // ignore the first line
|
||||||
|
|
||||||
|
// Load Kitti calibration
|
||||||
|
getline(imu_metadata, line, '\n');
|
||||||
|
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||||
|
&kitti_calibration.body_ptx,
|
||||||
|
&kitti_calibration.body_pty,
|
||||||
|
&kitti_calibration.body_ptz,
|
||||||
|
&kitti_calibration.body_prx,
|
||||||
|
&kitti_calibration.body_pry,
|
||||||
|
&kitti_calibration.body_prz,
|
||||||
|
&kitti_calibration.accelerometer_sigma,
|
||||||
|
&kitti_calibration.gyroscope_sigma,
|
||||||
|
&kitti_calibration.integration_sigma,
|
||||||
|
&kitti_calibration.accelerometer_bias_sigma,
|
||||||
|
&kitti_calibration.gyroscope_bias_sigma,
|
||||||
|
&kitti_calibration.average_delta_t);
|
||||||
|
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||||
|
kitti_calibration.body_ptx,
|
||||||
|
kitti_calibration.body_pty,
|
||||||
|
kitti_calibration.body_ptz,
|
||||||
|
kitti_calibration.body_prx,
|
||||||
|
kitti_calibration.body_pry,
|
||||||
|
kitti_calibration.body_prz,
|
||||||
|
kitti_calibration.accelerometer_sigma,
|
||||||
|
kitti_calibration.gyroscope_sigma,
|
||||||
|
kitti_calibration.integration_sigma,
|
||||||
|
kitti_calibration.accelerometer_bias_sigma,
|
||||||
|
kitti_calibration.gyroscope_bias_sigma,
|
||||||
|
kitti_calibration.average_delta_t);
|
||||||
|
|
||||||
|
// Read IMU data
|
||||||
|
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||||
|
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
|
||||||
|
printf("-- Reading IMU measurements from file\n");
|
||||||
|
{
|
||||||
|
ifstream imu_data(imu_data_file.c_str());
|
||||||
|
getline(imu_data, line, '\n'); // ignore the first line
|
||||||
|
|
||||||
|
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0;
|
||||||
|
while (!imu_data.eof()) {
|
||||||
|
getline(imu_data, line, '\n');
|
||||||
|
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf",
|
||||||
|
&time, &dt,
|
||||||
|
&acc_x, &acc_y, &acc_z,
|
||||||
|
&gyro_x, &gyro_y, &gyro_z);
|
||||||
|
|
||||||
|
ImuMeasurement measurement;
|
||||||
|
measurement.time = time;
|
||||||
|
measurement.dt = dt;
|
||||||
|
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
|
||||||
|
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
|
||||||
|
imu_measurements.push_back(measurement);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read GPS data
|
||||||
|
// Time,X,Y,Z
|
||||||
|
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
|
||||||
|
printf("-- Reading GPS measurements from file\n");
|
||||||
|
{
|
||||||
|
ifstream gps_data(gps_data_file.c_str());
|
||||||
|
getline(gps_data, line, '\n'); // ignore the first line
|
||||||
|
|
||||||
|
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
|
||||||
|
while (!gps_data.eof()) {
|
||||||
|
getline(gps_data, line, '\n');
|
||||||
|
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
|
||||||
|
|
||||||
|
GpsMeasurement measurement;
|
||||||
|
measurement.time = time;
|
||||||
|
measurement.position = Vector3(gps_x, gps_y, gps_z);
|
||||||
|
gps_measurements.push_back(measurement);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
KittiCalibration kitti_calibration;
|
||||||
|
vector<ImuMeasurement> imu_measurements;
|
||||||
|
vector<GpsMeasurement> gps_measurements;
|
||||||
|
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||||
|
|
||||||
|
Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
|
||||||
|
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||||
|
.finished();
|
||||||
|
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||||
|
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||||
|
printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same");
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure different variables
|
||||||
|
// double t_offset = gps_measurements[0].time;
|
||||||
|
size_t first_gps_pose = 1;
|
||||||
|
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||||
|
double g = 9.8;
|
||||||
|
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||||
|
|
||||||
|
// Configure noise models
|
||||||
|
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||||
|
Vector3::Constant(1.0/0.07))
|
||||||
|
.finished());
|
||||||
|
|
||||||
|
// Set initial conditions for the estimated trajectory
|
||||||
|
// initial pose is the reference frame (navigation frame)
|
||||||
|
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||||
|
// the vehicle is stationary at the beginning at position 0,0,0
|
||||||
|
Vector3 current_velocity_global = Vector3::Zero();
|
||||||
|
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||||
|
|
||||||
|
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||||
|
Vector3::Constant(1.0))
|
||||||
|
.finished());
|
||||||
|
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||||
|
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100),
|
||||||
|
Vector3::Constant(5.00e-05))
|
||||||
|
.finished());
|
||||||
|
|
||||||
|
// Set IMU preintegration parameters
|
||||||
|
Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||||
|
Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||||
|
// error committed in integrating position from velocities
|
||||||
|
Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||||
|
|
||||||
|
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||||
|
imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
|
||||||
|
imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous
|
||||||
|
imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
|
||||||
|
imu_params->omegaCoriolis = w_coriolis;
|
||||||
|
|
||||||
|
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr;
|
||||||
|
|
||||||
|
// Set ISAM2 parameters and create ISAM2 solver object
|
||||||
|
ISAM2Params isam_params;
|
||||||
|
isam_params.factorization = ISAM2Params::CHOLESKY;
|
||||||
|
isam_params.relinearizeSkip = 10;
|
||||||
|
|
||||||
|
ISAM2 isam(isam_params);
|
||||||
|
|
||||||
|
// Create the factor graph and values object that will store new factors and values to add to the incremental graph
|
||||||
|
NonlinearFactorGraph new_factors;
|
||||||
|
Values new_values; // values storing the initial estimates of new nodes in the factor graph
|
||||||
|
|
||||||
|
/// Main loop:
|
||||||
|
/// (1) we read the measurements
|
||||||
|
/// (2) we create the corresponding factors in the graph
|
||||||
|
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||||
|
printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n");
|
||||||
|
size_t j = 0;
|
||||||
|
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||||
|
// At each non=IMU measurement we initialize a new node in the graph
|
||||||
|
auto current_pose_key = X(i);
|
||||||
|
auto current_vel_key = V(i);
|
||||||
|
auto current_bias_key = B(i);
|
||||||
|
double t = gps_measurements[i].time;
|
||||||
|
|
||||||
|
if (i == first_gps_pose) {
|
||||||
|
// Create initial estimate and prior on initial pose, velocity, and biases
|
||||||
|
new_values.insert(current_pose_key, current_pose_global);
|
||||||
|
new_values.insert(current_vel_key, current_velocity_global);
|
||||||
|
new_values.insert(current_bias_key, current_bias);
|
||||||
|
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x);
|
||||||
|
new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v);
|
||||||
|
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b);
|
||||||
|
} else {
|
||||||
|
double t_previous = gps_measurements[i-1].time;
|
||||||
|
|
||||||
|
// Summarize IMU data between the previous GPS measurement and now
|
||||||
|
current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias);
|
||||||
|
static size_t included_imu_measurement_count = 0;
|
||||||
|
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||||
|
if (imu_measurements[j].time >= t_previous) {
|
||||||
|
current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer,
|
||||||
|
imu_measurements[j].gyroscope,
|
||||||
|
imu_measurements[j].dt);
|
||||||
|
included_imu_measurement_count++;
|
||||||
|
}
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create IMU factor
|
||||||
|
auto previous_pose_key = X(i-1);
|
||||||
|
auto previous_vel_key = V(i-1);
|
||||||
|
auto previous_bias_key = B(i-1);
|
||||||
|
|
||||||
|
new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key,
|
||||||
|
current_pose_key, current_vel_key,
|
||||||
|
previous_bias_key, *current_summarized_measurement);
|
||||||
|
|
||||||
|
// Bias evolution as given in the IMU metadata
|
||||||
|
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() <<
|
||||||
|
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
|
||||||
|
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
|
||||||
|
.finished());
|
||||||
|
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key,
|
||||||
|
current_bias_key,
|
||||||
|
imuBias::ConstantBias(),
|
||||||
|
sigma_between_b);
|
||||||
|
|
||||||
|
// Create GPS factor
|
||||||
|
auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||||
|
if ((i % gps_skip) == 0) {
|
||||||
|
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps);
|
||||||
|
new_values.insert(current_pose_key, gps_pose);
|
||||||
|
|
||||||
|
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
|
||||||
|
gps_pose.translation().print();
|
||||||
|
printf("\n\n");
|
||||||
|
} else {
|
||||||
|
new_values.insert(current_pose_key, current_pose_global);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add initial values for velocity and bias based on the previous estimates
|
||||||
|
new_values.insert(current_vel_key, current_velocity_global);
|
||||||
|
new_values.insert(current_bias_key, current_bias);
|
||||||
|
|
||||||
|
// Update solver
|
||||||
|
// =======================================================================
|
||||||
|
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||||
|
// first so that the heading becomes observable.
|
||||||
|
if (i > (first_gps_pose + 2*gps_skip)) {
|
||||||
|
printf("################ NEW FACTORS AT TIME %lf ################\n", t);
|
||||||
|
new_factors.print();
|
||||||
|
|
||||||
|
isam.update(new_factors, new_values);
|
||||||
|
|
||||||
|
// Reset the newFactors and newValues list
|
||||||
|
new_factors.resize(0);
|
||||||
|
new_values.clear();
|
||||||
|
|
||||||
|
// Extract the result/current estimates
|
||||||
|
Values result = isam.calculateEstimate();
|
||||||
|
|
||||||
|
current_pose_global = result.at<Pose3>(current_pose_key);
|
||||||
|
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||||
|
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||||
|
|
||||||
|
printf("\n################ POSE AT TIME %lf ################\n", t);
|
||||||
|
current_pose_global.print();
|
||||||
|
printf("\n\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save results to file
|
||||||
|
printf("\nWriting results to file...\n");
|
||||||
|
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||||
|
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||||
|
|
||||||
|
Values result = isam.calculateEstimate();
|
||||||
|
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||||
|
auto pose_key = X(i);
|
||||||
|
auto vel_key = V(i);
|
||||||
|
auto bias_key = B(i);
|
||||||
|
|
||||||
|
auto pose = result.at<Pose3>(pose_key);
|
||||||
|
auto velocity = result.at<Vector3>(vel_key);
|
||||||
|
auto bias = result.at<imuBias::ConstantBias>(bias_key);
|
||||||
|
|
||||||
|
auto pose_quat = pose.rotation().toQuaternion();
|
||||||
|
auto gps = gps_measurements[i].position;
|
||||||
|
|
||||||
|
cout << "State at #" << i << endl;
|
||||||
|
cout << "Pose:" << endl << pose << endl;
|
||||||
|
cout << "Velocity:" << endl << velocity << endl;
|
||||||
|
cout << "Bias:" << endl << bias << endl;
|
||||||
|
|
||||||
|
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||||
|
gps_measurements[i].time,
|
||||||
|
pose.x(), pose.y(), pose.z(),
|
||||||
|
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
|
||||||
|
gps(0), gps(1), gps(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
fclose(fp_out);
|
||||||
|
}
|
|
@ -35,22 +35,28 @@
|
||||||
* optional arguments:
|
* optional arguments:
|
||||||
* data_csv_path path to the CSV file with the IMU data.
|
* data_csv_path path to the CSV file with the IMU data.
|
||||||
* -c use CombinedImuFactor
|
* -c use CombinedImuFactor
|
||||||
|
* Note: Define USE_LM to use Levenberg Marquardt Optimizer
|
||||||
|
* By default ISAM2 is used
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// GTSAM related includes.
|
// GTSAM related includes.
|
||||||
#include <gtsam/inference/Symbol.h>
|
|
||||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||||
#include <gtsam/navigation/GPSFactor.h>
|
#include <gtsam/navigation/GPSFactor.h>
|
||||||
#include <gtsam/navigation/ImuFactor.h>
|
#include <gtsam/navigation/ImuFactor.h>
|
||||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
|
||||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
|
||||||
#include <gtsam/slam/BetweenFactor.h>
|
#include <gtsam/slam/BetweenFactor.h>
|
||||||
#include <gtsam/slam/dataset.h>
|
#include <gtsam/slam/dataset.h>
|
||||||
|
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
#include <gtsam/nonlinear/ISAM2.h>
|
||||||
|
#include <gtsam/inference/Symbol.h>
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
|
// Uncomment the following to use Levenberg Marquardt Optimizer
|
||||||
|
// #define USE_LM
|
||||||
|
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -64,6 +70,17 @@ static const char use_combined_imu_flag[3] = "-c";
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
string data_filename;
|
string data_filename;
|
||||||
bool use_combined_imu = false;
|
bool use_combined_imu = false;
|
||||||
|
|
||||||
|
#ifndef USE_LM
|
||||||
|
printf("Using ISAM2\n");
|
||||||
|
ISAM2Params parameters;
|
||||||
|
parameters.relinearizeThreshold = 0.01;
|
||||||
|
parameters.relinearizeSkip = 1;
|
||||||
|
ISAM2 isam2(parameters);
|
||||||
|
#else
|
||||||
|
printf("Using Levenberg Marquardt Optimizer\n");
|
||||||
|
#endif
|
||||||
|
|
||||||
if (argc < 2) {
|
if (argc < 2) {
|
||||||
printf("using default CSV file\n");
|
printf("using default CSV file\n");
|
||||||
data_filename = findExampleDataFile("imuAndGPSdata.csv");
|
data_filename = findExampleDataFile("imuAndGPSdata.csv");
|
||||||
|
@ -252,9 +269,19 @@ int main(int argc, char* argv[]) {
|
||||||
initial_values.insert(V(correction_count), prop_state.v());
|
initial_values.insert(V(correction_count), prop_state.v());
|
||||||
initial_values.insert(B(correction_count), prev_bias);
|
initial_values.insert(B(correction_count), prev_bias);
|
||||||
|
|
||||||
|
Values result;
|
||||||
|
#ifdef USE_LM
|
||||||
LevenbergMarquardtOptimizer optimizer(*graph, initial_values);
|
LevenbergMarquardtOptimizer optimizer(*graph, initial_values);
|
||||||
Values result = optimizer.optimize();
|
result = optimizer.optimize();
|
||||||
|
#else
|
||||||
|
isam2.update(*graph, initial_values);
|
||||||
|
isam2.update();
|
||||||
|
result = isam2.calculateEstimate();
|
||||||
|
|
||||||
|
// reset the graph
|
||||||
|
graph->resize(0);
|
||||||
|
initial_values.clear();
|
||||||
|
#endif
|
||||||
// Overwrite the beginning of the preintegration for the next step.
|
// Overwrite the beginning of the preintegration for the next step.
|
||||||
prev_state = NavState(result.at<Pose3>(X(correction_count)),
|
prev_state = NavState(result.at<Pose3>(X(correction_count)),
|
||||||
result.at<Vector3>(V(correction_count)));
|
result.at<Vector3>(V(correction_count)));
|
||||||
|
|
|
@ -85,7 +85,7 @@ class UnaryFactor: public NoiseModelFactor1<Pose2> {
|
||||||
// function, returning a vector of errors when evaluated at the provided variable value. It
|
// function, returning a vector of errors when evaluated at the provided variable value. It
|
||||||
// must also calculate the Jacobians for this measurement function, if requested.
|
// must also calculate the Jacobians for this measurement function, if requested.
|
||||||
Vector evaluateError(const Pose2& q,
|
Vector evaluateError(const Pose2& q,
|
||||||
boost::optional<Matrix&> H = boost::none) const {
|
boost::optional<Matrix&> H = boost::none) const override {
|
||||||
// The measurement function for a GPS-like measurement is simple:
|
// The measurement function for a GPS-like measurement is simple:
|
||||||
// error_x = pose.x - measurement.x
|
// error_x = pose.x - measurement.x
|
||||||
// error_y = pose.y - measurement.y
|
// error_y = pose.y - measurement.y
|
||||||
|
@ -99,7 +99,7 @@ class UnaryFactor: public NoiseModelFactor1<Pose2> {
|
||||||
// The second is a 'clone' function that allows the factor to be copied. Under most
|
// The second is a 'clone' function that allows the factor to be copied. Under most
|
||||||
// circumstances, the following code that employs the default copy constructor should
|
// circumstances, the following code that employs the default copy constructor should
|
||||||
// work fine.
|
// work fine.
|
||||||
virtual gtsam::NonlinearFactor::shared_ptr clone() const {
|
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||||
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||||
gtsam::NonlinearFactor::shared_ptr(new UnaryFactor(*this))); }
|
gtsam::NonlinearFactor::shared_ptr(new UnaryFactor(*this))); }
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@ int main(int argc, char* argv[]) {
|
||||||
Symbol('x', i), corrupted_pose);
|
Symbol('x', i), corrupted_pose);
|
||||||
}
|
}
|
||||||
for (size_t j = 0; j < points.size(); ++j) {
|
for (size_t j = 0; j < points.size(); ++j) {
|
||||||
auto corrupted_point = points[j] + Point3(-0.25, 0.20, 0.15);
|
Point3 corrupted_point = points[j] + Point3(-0.25, 0.20, 0.15);
|
||||||
initialEstimate.insert<Point3>(Symbol('l', j), corrupted_point);
|
initialEstimate.insert<Point3>(Symbol('l', j), corrupted_point);
|
||||||
}
|
}
|
||||||
initialEstimate.print("Initial Estimates:\n");
|
initialEstimate.print("Initial Estimates:\n");
|
||||||
|
|
|
@ -50,11 +50,11 @@
|
||||||
#include <boost/program_options.hpp>
|
#include <boost/program_options.hpp>
|
||||||
#include <boost/range/algorithm/set_algorithm.hpp>
|
#include <boost/range/algorithm/set_algorithm.hpp>
|
||||||
#include <boost/range/adaptor/reversed.hpp>
|
#include <boost/range/adaptor/reversed.hpp>
|
||||||
#include <boost/random.hpp>
|
|
||||||
#include <boost/serialization/export.hpp>
|
#include <boost/serialization/export.hpp>
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <random>
|
||||||
|
|
||||||
#ifdef GTSAM_USE_TBB
|
#ifdef GTSAM_USE_TBB
|
||||||
#include <tbb/task_arena.h> // tbb::task_arena
|
#include <tbb/task_arena.h> // tbb::task_arena
|
||||||
|
@ -554,8 +554,8 @@ void runCompare()
|
||||||
void runPerturb()
|
void runPerturb()
|
||||||
{
|
{
|
||||||
// Set up random number generator
|
// Set up random number generator
|
||||||
boost::mt19937 rng;
|
std::mt19937 rng;
|
||||||
boost::normal_distribution<double> normal(0.0, perturbationNoise);
|
std::normal_distribution<double> normal(0.0, perturbationNoise);
|
||||||
|
|
||||||
// Perturb values
|
// Perturb values
|
||||||
VectorValues noise;
|
VectorValues noise;
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
* -------------------------------------------------------------------------- */
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file small.cpp
|
* @file UGM_chain.cpp
|
||||||
* @brief UGM (undirected graphical model) examples: chain
|
* @brief UGM (undirected graphical model) examples: chain
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
* @author Abhijit Kundu
|
* @author Abhijit Kundu
|
||||||
|
@ -19,10 +19,9 @@
|
||||||
* for more explanation. This code demos the same example using GTSAM.
|
* for more explanation. This code demos the same example using GTSAM.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
|
||||||
#include <gtsam/discrete/DiscreteSequentialSolver.h>
|
|
||||||
#include <gtsam/discrete/DiscreteMarginals.h>
|
|
||||||
#include <gtsam/base/timing.h>
|
#include <gtsam/base/timing.h>
|
||||||
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
|
||||||
|
@ -30,9 +29,8 @@ using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
|
// Set Number of Nodes in the Graph
|
||||||
// Set Number of Nodes in the Graph
|
const int nrNodes = 60;
|
||||||
const int nrNodes = 60;
|
|
||||||
|
|
||||||
// Each node takes 1 of 7 possible states denoted by 0-6 in following order:
|
// Each node takes 1 of 7 possible states denoted by 0-6 in following order:
|
||||||
// ["VideoGames" "Industry" "GradSchool" "VideoGames(with PhD)"
|
// ["VideoGames" "Industry" "GradSchool" "VideoGames(with PhD)"
|
||||||
|
@ -40,70 +38,55 @@ int main(int argc, char** argv) {
|
||||||
const size_t nrStates = 7;
|
const size_t nrStates = 7;
|
||||||
|
|
||||||
// define variables
|
// define variables
|
||||||
vector<DiscreteKey> nodes;
|
vector<DiscreteKey> nodes;
|
||||||
for (int i = 0; i < nrNodes; i++){
|
for (int i = 0; i < nrNodes; i++) {
|
||||||
DiscreteKey dk(i, nrStates);
|
DiscreteKey dk(i, nrStates);
|
||||||
nodes.push_back(dk);
|
nodes.push_back(dk);
|
||||||
}
|
}
|
||||||
|
|
||||||
// create graph
|
// create graph
|
||||||
DiscreteFactorGraph graph;
|
DiscreteFactorGraph graph;
|
||||||
|
|
||||||
// add node potentials
|
// add node potentials
|
||||||
graph.add(nodes[0], ".3 .6 .1 0 0 0 0");
|
graph.add(nodes[0], ".3 .6 .1 0 0 0 0");
|
||||||
for (int i = 1; i < nrNodes; i++)
|
for (int i = 1; i < nrNodes; i++) graph.add(nodes[i], "1 1 1 1 1 1 1");
|
||||||
graph.add(nodes[i], "1 1 1 1 1 1 1");
|
|
||||||
|
|
||||||
const std::string edgePotential = ".08 .9 .01 0 0 0 .01 "
|
const std::string edgePotential =
|
||||||
".03 .95 .01 0 0 0 .01 "
|
".08 .9 .01 0 0 0 .01 "
|
||||||
".06 .06 .75 .05 .05 .02 .01 "
|
".03 .95 .01 0 0 0 .01 "
|
||||||
"0 0 0 .3 .6 .09 .01 "
|
".06 .06 .75 .05 .05 .02 .01 "
|
||||||
"0 0 0 .02 .95 .02 .01 "
|
"0 0 0 .3 .6 .09 .01 "
|
||||||
"0 0 0 .01 .01 .97 .01 "
|
"0 0 0 .02 .95 .02 .01 "
|
||||||
"0 0 0 0 0 0 1";
|
"0 0 0 .01 .01 .97 .01 "
|
||||||
|
"0 0 0 0 0 0 1";
|
||||||
|
|
||||||
// add edge potentials
|
// add edge potentials
|
||||||
for (int i = 0; i < nrNodes - 1; i++)
|
for (int i = 0; i < nrNodes - 1; i++)
|
||||||
graph.add(nodes[i] & nodes[i + 1], edgePotential);
|
graph.add(nodes[i] & nodes[i + 1], edgePotential);
|
||||||
|
|
||||||
cout << "Created Factor Graph with " << nrNodes << " variable nodes and "
|
cout << "Created Factor Graph with " << nrNodes << " variable nodes and "
|
||||||
<< graph.size() << " factors (Unary+Edge).";
|
<< graph.size() << " factors (Unary+Edge).";
|
||||||
|
|
||||||
// "Decoding", i.e., configuration with largest value
|
// "Decoding", i.e., configuration with largest value
|
||||||
// We use sequential variable elimination
|
// We use sequential variable elimination
|
||||||
DiscreteSequentialSolver solver(graph);
|
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||||
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
|
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||||
optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n");
|
optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||||
|
|
||||||
// "Inference" Computing marginals for each node
|
// "Inference" Computing marginals for each node
|
||||||
|
|
||||||
|
|
||||||
cout << "\nComputing Node Marginals ..(Sequential Elimination)" << endl;
|
|
||||||
gttic_(Sequential);
|
|
||||||
for (vector<DiscreteKey>::iterator itr = nodes.begin(); itr != nodes.end();
|
|
||||||
++itr) {
|
|
||||||
//Compute the marginal
|
|
||||||
Vector margProbs = solver.marginalProbabilities(*itr);
|
|
||||||
|
|
||||||
//Print the marginals
|
|
||||||
cout << "Node#" << setw(4) << itr->first << " : ";
|
|
||||||
print(margProbs);
|
|
||||||
}
|
|
||||||
gttoc_(Sequential);
|
|
||||||
|
|
||||||
// Here we'll make use of DiscreteMarginals class, which makes use of
|
// Here we'll make use of DiscreteMarginals class, which makes use of
|
||||||
// bayes-tree based shortcut evaluation of marginals
|
// bayes-tree based shortcut evaluation of marginals
|
||||||
DiscreteMarginals marginals(graph);
|
DiscreteMarginals marginals(graph);
|
||||||
|
|
||||||
cout << "\nComputing Node Marginals ..(BayesTree based)" << endl;
|
cout << "\nComputing Node Marginals ..(BayesTree based)" << endl;
|
||||||
gttic_(Multifrontal);
|
gttic_(Multifrontal);
|
||||||
for (vector<DiscreteKey>::iterator itr = nodes.begin(); itr != nodes.end();
|
for (vector<DiscreteKey>::iterator it = nodes.begin(); it != nodes.end();
|
||||||
++itr) {
|
++it) {
|
||||||
//Compute the marginal
|
// Compute the marginal
|
||||||
Vector margProbs = marginals.marginalProbabilities(*itr);
|
Vector margProbs = marginals.marginalProbabilities(*it);
|
||||||
|
|
||||||
//Print the marginals
|
// Print the marginals
|
||||||
cout << "Node#" << setw(4) << itr->first << " : ";
|
cout << "Node#" << setw(4) << it->first << " : ";
|
||||||
print(margProbs);
|
print(margProbs);
|
||||||
}
|
}
|
||||||
gttoc_(Multifrontal);
|
gttoc_(Multifrontal);
|
||||||
|
@ -111,4 +94,3 @@ int main(int argc, char** argv) {
|
||||||
tictoc_print_();
|
tictoc_print_();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,15 +10,16 @@
|
||||||
* -------------------------------------------------------------------------- */
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file small.cpp
|
* @file UGM_small.cpp
|
||||||
* @brief UGM (undirected graphical model) examples: small
|
* @brief UGM (undirected graphical model) examples: small
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
*
|
*
|
||||||
* See http://www.di.ens.fr/~mschmidt/Software/UGM/small.html
|
* See http://www.di.ens.fr/~mschmidt/Software/UGM/small.html
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/base/Vector.h>
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteSequentialSolver.h>
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
@ -61,24 +62,24 @@ int main(int argc, char** argv) {
|
||||||
|
|
||||||
// "Decoding", i.e., configuration with largest value (MPE)
|
// "Decoding", i.e., configuration with largest value (MPE)
|
||||||
// We use sequential variable elimination
|
// We use sequential variable elimination
|
||||||
DiscreteSequentialSolver solver(graph);
|
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||||
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
|
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||||
optimalDecoding->print("\noptimalDecoding");
|
optimalDecoding->print("\noptimalDecoding");
|
||||||
|
|
||||||
// "Inference" Computing marginals
|
// "Inference" Computing marginals
|
||||||
cout << "\nComputing Node Marginals .." << endl;
|
cout << "\nComputing Node Marginals .." << endl;
|
||||||
Vector margProbs;
|
DiscreteMarginals marginals(graph);
|
||||||
|
|
||||||
margProbs = solver.marginalProbabilities(Cathy);
|
Vector margProbs = marginals.marginalProbabilities(Cathy);
|
||||||
print(margProbs, "Cathy's Node Marginal:");
|
print(margProbs, "Cathy's Node Marginal:");
|
||||||
|
|
||||||
margProbs = solver.marginalProbabilities(Heather);
|
margProbs = marginals.marginalProbabilities(Heather);
|
||||||
print(margProbs, "Heather's Node Marginal");
|
print(margProbs, "Heather's Node Marginal");
|
||||||
|
|
||||||
margProbs = solver.marginalProbabilities(Mark);
|
margProbs = marginals.marginalProbabilities(Mark);
|
||||||
print(margProbs, "Mark's Node Marginal");
|
print(margProbs, "Mark's Node Marginal");
|
||||||
|
|
||||||
margProbs = solver.marginalProbabilities(Allison);
|
margProbs = marginals.marginalProbabilities(Allison);
|
||||||
print(margProbs, "Allison's Node Marginal");
|
print(margProbs, "Allison's Node Marginal");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
63
gtsam.h
63
gtsam.h
|
@ -598,6 +598,7 @@ class SOn {
|
||||||
// Standard Constructors
|
// Standard Constructors
|
||||||
SOn(size_t n);
|
SOn(size_t n);
|
||||||
static gtsam::SOn FromMatrix(Matrix R);
|
static gtsam::SOn FromMatrix(Matrix R);
|
||||||
|
static gtsam::SOn Lift(size_t n, Matrix R);
|
||||||
|
|
||||||
// Testable
|
// Testable
|
||||||
void print(string s) const;
|
void print(string s) const;
|
||||||
|
@ -1458,7 +1459,7 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1469,7 +1470,7 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1480,7 +1481,7 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
|
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1491,7 +1492,7 @@ virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1502,7 +1503,7 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
|
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1513,7 +1514,7 @@ virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
|
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1524,7 +1525,7 @@ virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
|
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1535,7 +1536,7 @@ virtual class DCS: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1546,7 +1547,7 @@ virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}///\namespace mEstimator
|
}///\namespace mEstimator
|
||||||
|
@ -1937,6 +1938,22 @@ virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParamete
|
||||||
void print() const;
|
void print() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#include <gtsam/linear/Preconditioner.h>
|
||||||
|
virtual class PreconditionerParameters {
|
||||||
|
PreconditionerParameters();
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual class DummyPreconditionerParameters : gtsam::PreconditionerParameters {
|
||||||
|
DummyPreconditionerParameters();
|
||||||
|
};
|
||||||
|
|
||||||
|
#include <gtsam/linear/PCGSolver.h>
|
||||||
|
virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters {
|
||||||
|
PCGSolverParameters();
|
||||||
|
void print(string s);
|
||||||
|
void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner);
|
||||||
|
};
|
||||||
|
|
||||||
#include <gtsam/linear/SubgraphSolver.h>
|
#include <gtsam/linear/SubgraphSolver.h>
|
||||||
virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters {
|
virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters {
|
||||||
SubgraphSolverParameters();
|
SubgraphSolverParameters();
|
||||||
|
@ -2835,6 +2852,34 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor {
|
||||||
KarcherMeanFactor(const gtsam::KeyVector& keys);
|
KarcherMeanFactor(const gtsam::KeyVector& keys);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#include <gtsam/slam/FrobeniusFactor.h>
|
||||||
|
gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel(
|
||||||
|
gtsam::noiseModel::Base* model, size_t d);
|
||||||
|
|
||||||
|
template<T = {gtsam::SO3, gtsam::SO4}>
|
||||||
|
virtual class FrobeniusFactor : gtsam::NoiseModelFactor {
|
||||||
|
FrobeniusFactor(size_t key1, size_t key2);
|
||||||
|
FrobeniusFactor(size_t key1, size_t key2, gtsam::noiseModel::Base* model);
|
||||||
|
|
||||||
|
Vector evaluateError(const T& R1, const T& R2);
|
||||||
|
};
|
||||||
|
|
||||||
|
template<T = {gtsam::SO3, gtsam::SO4}>
|
||||||
|
virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor {
|
||||||
|
FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12);
|
||||||
|
FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12, gtsam::noiseModel::Base* model);
|
||||||
|
|
||||||
|
Vector evaluateError(const T& R1, const T& R2);
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor {
|
||||||
|
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
|
||||||
|
size_t p);
|
||||||
|
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
|
||||||
|
size_t p, gtsam::noiseModel::Base* model);
|
||||||
|
Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2);
|
||||||
|
};
|
||||||
|
|
||||||
//*************************************************************************
|
//*************************************************************************
|
||||||
// Navigation
|
// Navigation
|
||||||
//*************************************************************************
|
//*************************************************************************
|
||||||
|
|
|
@ -15,7 +15,7 @@ set (gtsam_subdirs
|
||||||
sfm
|
sfm
|
||||||
slam
|
slam
|
||||||
smart
|
smart
|
||||||
navigation
|
navigation
|
||||||
)
|
)
|
||||||
|
|
||||||
set(gtsam_srcs)
|
set(gtsam_srcs)
|
||||||
|
@ -186,11 +186,17 @@ install(
|
||||||
list(APPEND GTSAM_EXPORTED_TARGETS gtsam)
|
list(APPEND GTSAM_EXPORTED_TARGETS gtsam)
|
||||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||||
|
|
||||||
# make sure that ccolamd compiles even in face of warnings
|
# Make sure that ccolamd compiles even in face of warnings
|
||||||
|
# and suppress all warnings from 3rd party code if Release build
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-w")
|
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "/w")
|
||||||
else()
|
else()
|
||||||
|
if("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
|
||||||
|
# Suppress all warnings from 3rd party sources.
|
||||||
|
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-w")
|
||||||
|
else()
|
||||||
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-Wno-error")
|
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-Wno-error")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Create the matlab toolbox for the gtsam library
|
# Create the matlab toolbox for the gtsam library
|
||||||
|
|
|
@ -70,7 +70,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// equals implementing generic Value interface
|
/// equals implementing generic Value interface
|
||||||
virtual bool equals_(const Value& p, double tol = 1e-9) const {
|
bool equals_(const Value& p, double tol = 1e-9) const override {
|
||||||
// Cast the base class Value pointer to a templated generic class pointer
|
// Cast the base class Value pointer to a templated generic class pointer
|
||||||
const GenericValue& genericValue2 = static_cast<const GenericValue&>(p);
|
const GenericValue& genericValue2 = static_cast<const GenericValue&>(p);
|
||||||
// Return the result of using the equals traits for the derived class
|
// Return the result of using the equals traits for the derived class
|
||||||
|
@ -83,7 +83,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Virtual print function, uses traits
|
/// Virtual print function, uses traits
|
||||||
virtual void print(const std::string& str) const {
|
void print(const std::string& str) const override {
|
||||||
std::cout << "(" << demangle(typeid(T).name()) << ") ";
|
std::cout << "(" << demangle(typeid(T).name()) << ") ";
|
||||||
traits<T>::Print(value_, str);
|
traits<T>::Print(value_, str);
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,7 @@ public:
|
||||||
/**
|
/**
|
||||||
* Create a duplicate object returned as a pointer to the generic Value interface.
|
* Create a duplicate object returned as a pointer to the generic Value interface.
|
||||||
*/
|
*/
|
||||||
virtual Value* clone_() const {
|
Value* clone_() const override {
|
||||||
GenericValue* ptr = new GenericValue(*this); // calls copy constructor to fill in
|
GenericValue* ptr = new GenericValue(*this); // calls copy constructor to fill in
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
@ -99,19 +99,19 @@ public:
|
||||||
/**
|
/**
|
||||||
* Destroy and deallocate this object, only if it was originally allocated using clone_().
|
* Destroy and deallocate this object, only if it was originally allocated using clone_().
|
||||||
*/
|
*/
|
||||||
virtual void deallocate_() const {
|
void deallocate_() const override {
|
||||||
delete this;
|
delete this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clone this value (normal clone on the heap, delete with 'delete' operator)
|
* Clone this value (normal clone on the heap, delete with 'delete' operator)
|
||||||
*/
|
*/
|
||||||
virtual boost::shared_ptr<Value> clone() const {
|
boost::shared_ptr<Value> clone() const override {
|
||||||
return boost::allocate_shared<GenericValue>(Eigen::aligned_allocator<GenericValue>(), *this);
|
return boost::allocate_shared<GenericValue>(Eigen::aligned_allocator<GenericValue>(), *this);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generic Value interface version of retract
|
/// Generic Value interface version of retract
|
||||||
virtual Value* retract_(const Vector& delta) const {
|
Value* retract_(const Vector& delta) const override {
|
||||||
// Call retract on the derived class using the retract trait function
|
// Call retract on the derived class using the retract trait function
|
||||||
const T retractResult = traits<T>::Retract(GenericValue<T>::value(), delta);
|
const T retractResult = traits<T>::Retract(GenericValue<T>::value(), delta);
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generic Value interface version of localCoordinates
|
/// Generic Value interface version of localCoordinates
|
||||||
virtual Vector localCoordinates_(const Value& value2) const {
|
Vector localCoordinates_(const Value& value2) const override {
|
||||||
// Cast the base class Value pointer to a templated generic class pointer
|
// Cast the base class Value pointer to a templated generic class pointer
|
||||||
const GenericValue<T>& genericValue2 =
|
const GenericValue<T>& genericValue2 =
|
||||||
static_cast<const GenericValue<T>&>(value2);
|
static_cast<const GenericValue<T>&>(value2);
|
||||||
|
@ -142,12 +142,12 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return run-time dimensionality
|
/// Return run-time dimensionality
|
||||||
virtual size_t dim() const {
|
size_t dim() const override {
|
||||||
return traits<T>::GetDimension(value_);
|
return traits<T>::GetDimension(value_);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Assignment operator
|
/// Assignment operator
|
||||||
virtual Value& operator=(const Value& rhs) {
|
Value& operator=(const Value& rhs) override {
|
||||||
// Cast the base class Value pointer to a derived class pointer
|
// Cast the base class Value pointer to a derived class pointer
|
||||||
const GenericValue& derivedRhs = static_cast<const GenericValue&>(rhs);
|
const GenericValue& derivedRhs = static_cast<const GenericValue&>(rhs);
|
||||||
|
|
||||||
|
|
|
@ -548,17 +548,47 @@ GTSAM_EXPORT Vector columnNormSquare(const Matrix &A);
|
||||||
namespace boost {
|
namespace boost {
|
||||||
namespace serialization {
|
namespace serialization {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ref. https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
|
||||||
|
*
|
||||||
|
* Eigen supports calling resize() on both static and dynamic matrices.
|
||||||
|
* This allows for a uniform API, with resize having no effect if the static matrix
|
||||||
|
* is already the correct size.
|
||||||
|
* https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
|
||||||
|
*
|
||||||
|
* We use all the Matrix template parameters to ensure wide compatibility.
|
||||||
|
*
|
||||||
|
* eigen_typekit in ROS uses the same code
|
||||||
|
* http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
|
||||||
|
*/
|
||||||
|
|
||||||
// split version - sends sizes ahead
|
// split version - sends sizes ahead
|
||||||
template<class Archive>
|
template<class Archive,
|
||||||
void save(Archive & ar, const gtsam::Matrix & m, unsigned int /*version*/) {
|
typename Scalar_,
|
||||||
|
int Rows_,
|
||||||
|
int Cols_,
|
||||||
|
int Ops_,
|
||||||
|
int MaxRows_,
|
||||||
|
int MaxCols_>
|
||||||
|
void save(Archive & ar,
|
||||||
|
const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||||
|
const unsigned int /*version*/) {
|
||||||
const size_t rows = m.rows(), cols = m.cols();
|
const size_t rows = m.rows(), cols = m.cols();
|
||||||
ar << BOOST_SERIALIZATION_NVP(rows);
|
ar << BOOST_SERIALIZATION_NVP(rows);
|
||||||
ar << BOOST_SERIALIZATION_NVP(cols);
|
ar << BOOST_SERIALIZATION_NVP(cols);
|
||||||
ar << make_nvp("data", make_array(m.data(), m.size()));
|
ar << make_nvp("data", make_array(m.data(), m.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Archive>
|
template<class Archive,
|
||||||
void load(Archive & ar, gtsam::Matrix & m, unsigned int /*version*/) {
|
typename Scalar_,
|
||||||
|
int Rows_,
|
||||||
|
int Cols_,
|
||||||
|
int Ops_,
|
||||||
|
int MaxRows_,
|
||||||
|
int MaxCols_>
|
||||||
|
void load(Archive & ar,
|
||||||
|
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||||
|
const unsigned int /*version*/) {
|
||||||
size_t rows, cols;
|
size_t rows, cols;
|
||||||
ar >> BOOST_SERIALIZATION_NVP(rows);
|
ar >> BOOST_SERIALIZATION_NVP(rows);
|
||||||
ar >> BOOST_SERIALIZATION_NVP(cols);
|
ar >> BOOST_SERIALIZATION_NVP(cols);
|
||||||
|
@ -566,8 +596,25 @@ namespace boost {
|
||||||
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
|
||||||
|
template<class Archive,
|
||||||
|
typename Scalar_,
|
||||||
|
int Rows_,
|
||||||
|
int Cols_,
|
||||||
|
int Ops_,
|
||||||
|
int MaxRows_,
|
||||||
|
int MaxCols_>
|
||||||
|
void serialize(Archive & ar,
|
||||||
|
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||||
|
const unsigned int version) {
|
||||||
|
split_free(ar, m, version);
|
||||||
|
}
|
||||||
|
|
||||||
|
// specialized to Matrix for MATLAB wrapper
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) {
|
||||||
|
split_free(ar, m, version);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace serialization
|
} // namespace serialization
|
||||||
} // namespace boost
|
} // namespace boost
|
||||||
|
|
||||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Matrix);
|
|
||||||
|
|
||||||
|
|
|
@ -71,12 +71,12 @@ protected:
|
||||||
String(description.begin(), description.end())) {
|
String(description.begin(), description.end())) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Default destructor doesn't have the throw()
|
/// Default destructor doesn't have the noexcept
|
||||||
virtual ~ThreadsafeException() throw () {
|
virtual ~ThreadsafeException() noexcept {
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
virtual const char* what() const throw () {
|
const char* what() const noexcept override {
|
||||||
return description_ ? description_->c_str() : "";
|
return description_ ? description_->c_str() : "";
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -113,8 +113,8 @@ public:
|
||||||
class CholeskyFailed : public gtsam::ThreadsafeException<CholeskyFailed>
|
class CholeskyFailed : public gtsam::ThreadsafeException<CholeskyFailed>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CholeskyFailed() throw() {}
|
CholeskyFailed() noexcept {}
|
||||||
virtual ~CholeskyFailed() throw() {}
|
virtual ~CholeskyFailed() noexcept {}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace gtsam
|
} // namespace gtsam
|
||||||
|
|
|
@ -89,7 +89,7 @@ void TimingOutline::print(const std::string& outline) const {
|
||||||
childOrder[child.second->myOrder_] = child.second;
|
childOrder[child.second->myOrder_] = child.second;
|
||||||
}
|
}
|
||||||
// Print children
|
// Print children
|
||||||
for(const ChildOrder::value_type order_child: childOrder) {
|
for(const ChildOrder::value_type& order_child: childOrder) {
|
||||||
std::string childOutline(outline);
|
std::string childOutline(outline);
|
||||||
childOutline += "| ";
|
childOutline += "| ";
|
||||||
order_child.second->print(childOutline);
|
order_child.second->print(childOutline);
|
||||||
|
|
|
@ -57,7 +57,7 @@ namespace gtsam {
|
||||||
makeNewTasks(makeNewTasks),
|
makeNewTasks(makeNewTasks),
|
||||||
isPostOrderPhase(false) {}
|
isPostOrderPhase(false) {}
|
||||||
|
|
||||||
tbb::task* execute()
|
tbb::task* execute() override
|
||||||
{
|
{
|
||||||
if(isPostOrderPhase)
|
if(isPostOrderPhase)
|
||||||
{
|
{
|
||||||
|
@ -144,7 +144,7 @@ namespace gtsam {
|
||||||
roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost),
|
roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost),
|
||||||
problemSizeThreshold(problemSizeThreshold) {}
|
problemSizeThreshold(problemSizeThreshold) {}
|
||||||
|
|
||||||
tbb::task* execute()
|
tbb::task* execute() override
|
||||||
{
|
{
|
||||||
typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask;
|
typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask;
|
||||||
// Create data and tasks for our children
|
// Create data and tasks for our children
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
|
||||||
#include <exception>
|
#include <exception>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
#ifdef GTSAM_USE_TBB
|
#ifdef GTSAM_USE_TBB
|
||||||
#include <tbb/scalable_allocator.h>
|
#include <tbb/scalable_allocator.h>
|
||||||
|
@ -54,7 +55,7 @@
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/// Function to demangle type name of variable, e.g. demangle(typeid(x).name())
|
/// Function to demangle type name of variable, e.g. demangle(typeid(x).name())
|
||||||
std::string demangle(const char* name);
|
std::string GTSAM_EXPORT demangle(const char* name);
|
||||||
|
|
||||||
/// Integer nonlinear key type
|
/// Integer nonlinear key type
|
||||||
typedef std::uint64_t Key;
|
typedef std::uint64_t Key;
|
||||||
|
|
|
@ -66,42 +66,42 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Leaf-Leaf equality
|
/// Leaf-Leaf equality
|
||||||
bool sameLeaf(const Leaf& q) const {
|
bool sameLeaf(const Leaf& q) const override {
|
||||||
return constant_ == q.constant_;
|
return constant_ == q.constant_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// polymorphic equality: is q is a leaf, could be
|
/// polymorphic equality: is q is a leaf, could be
|
||||||
bool sameLeaf(const Node& q) const {
|
bool sameLeaf(const Node& q) const override {
|
||||||
return (q.isLeaf() && q.sameLeaf(*this));
|
return (q.isLeaf() && q.sameLeaf(*this));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** equality up to tolerance */
|
/** equality up to tolerance */
|
||||||
bool equals(const Node& q, double tol) const {
|
bool equals(const Node& q, double tol) const override {
|
||||||
const Leaf* other = dynamic_cast<const Leaf*> (&q);
|
const Leaf* other = dynamic_cast<const Leaf*> (&q);
|
||||||
if (!other) return false;
|
if (!other) return false;
|
||||||
return std::abs(double(this->constant_ - other->constant_)) < tol;
|
return std::abs(double(this->constant_ - other->constant_)) < tol;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** print */
|
/** print */
|
||||||
void print(const std::string& s) const {
|
void print(const std::string& s) const override {
|
||||||
bool showZero = true;
|
bool showZero = true;
|
||||||
if (showZero || constant_) std::cout << s << " Leaf " << constant_ << std::endl;
|
if (showZero || constant_) std::cout << s << " Leaf " << constant_ << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** to graphviz file */
|
/** to graphviz file */
|
||||||
void dot(std::ostream& os, bool showZero) const {
|
void dot(std::ostream& os, bool showZero) const override {
|
||||||
if (showZero || constant_) os << "\"" << this->id() << "\" [label=\""
|
if (showZero || constant_) os << "\"" << this->id() << "\" [label=\""
|
||||||
<< boost::format("%4.2g") % constant_
|
<< boost::format("%4.2g") % constant_
|
||||||
<< "\", shape=box, rank=sink, height=0.35, fixedsize=true]\n"; // width=0.55,
|
<< "\", shape=box, rank=sink, height=0.35, fixedsize=true]\n"; // width=0.55,
|
||||||
}
|
}
|
||||||
|
|
||||||
/** evaluate */
|
/** evaluate */
|
||||||
const Y& operator()(const Assignment<L>& x) const {
|
const Y& operator()(const Assignment<L>& x) const override {
|
||||||
return constant_;
|
return constant_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** apply unary operator */
|
/** apply unary operator */
|
||||||
NodePtr apply(const Unary& op) const {
|
NodePtr apply(const Unary& op) const override {
|
||||||
NodePtr f(new Leaf(op(constant_)));
|
NodePtr f(new Leaf(op(constant_)));
|
||||||
return f;
|
return f;
|
||||||
}
|
}
|
||||||
|
@ -111,27 +111,27 @@ namespace gtsam {
|
||||||
// Simply calls apply on argument to call correct virtual method:
|
// Simply calls apply on argument to call correct virtual method:
|
||||||
// fL.apply_f_op_g(gL) -> gL.apply_g_op_fL(fL) (below)
|
// fL.apply_f_op_g(gL) -> gL.apply_g_op_fL(fL) (below)
|
||||||
// fL.apply_f_op_g(gC) -> gC.apply_g_op_fL(fL) (Choice)
|
// fL.apply_f_op_g(gC) -> gC.apply_g_op_fL(fL) (Choice)
|
||||||
NodePtr apply_f_op_g(const Node& g, const Binary& op) const {
|
NodePtr apply_f_op_g(const Node& g, const Binary& op) const override {
|
||||||
return g.apply_g_op_fL(*this, op);
|
return g.apply_g_op_fL(*this, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Applying binary operator to two leaves results in a leaf
|
// Applying binary operator to two leaves results in a leaf
|
||||||
NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const {
|
NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const override {
|
||||||
NodePtr h(new Leaf(op(fL.constant_, constant_))); // fL op gL
|
NodePtr h(new Leaf(op(fL.constant_, constant_))); // fL op gL
|
||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If second argument is a Choice node, call it's apply with leaf as second
|
// If second argument is a Choice node, call it's apply with leaf as second
|
||||||
NodePtr apply_g_op_fC(const Choice& fC, const Binary& op) const {
|
NodePtr apply_g_op_fC(const Choice& fC, const Binary& op) const override {
|
||||||
return fC.apply_fC_op_gL(*this, op); // operand order back to normal
|
return fC.apply_fC_op_gL(*this, op); // operand order back to normal
|
||||||
}
|
}
|
||||||
|
|
||||||
/** choose a branch, create new memory ! */
|
/** choose a branch, create new memory ! */
|
||||||
NodePtr choose(const L& label, size_t index) const {
|
NodePtr choose(const L& label, size_t index) const override {
|
||||||
return NodePtr(new Leaf(constant()));
|
return NodePtr(new Leaf(constant()));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isLeaf() const { return true; }
|
bool isLeaf() const override { return true; }
|
||||||
|
|
||||||
}; // Leaf
|
}; // Leaf
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ namespace gtsam {
|
||||||
return f;
|
return f;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isLeaf() const { return false; }
|
bool isLeaf() const override { return false; }
|
||||||
|
|
||||||
/** Constructor, given choice label and mandatory expected branch count */
|
/** Constructor, given choice label and mandatory expected branch count */
|
||||||
Choice(const L& label, size_t count) :
|
Choice(const L& label, size_t count) :
|
||||||
|
@ -236,7 +236,7 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** print (as a tree) */
|
/** print (as a tree) */
|
||||||
void print(const std::string& s) const {
|
void print(const std::string& s) const override {
|
||||||
std::cout << s << " Choice(";
|
std::cout << s << " Choice(";
|
||||||
// std::cout << this << ",";
|
// std::cout << this << ",";
|
||||||
std::cout << label_ << ") " << std::endl;
|
std::cout << label_ << ") " << std::endl;
|
||||||
|
@ -245,7 +245,7 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** output to graphviz (as a a graph) */
|
/** output to graphviz (as a a graph) */
|
||||||
void dot(std::ostream& os, bool showZero) const {
|
void dot(std::ostream& os, bool showZero) const override {
|
||||||
os << "\"" << this->id() << "\" [shape=circle, label=\"" << label_
|
os << "\"" << this->id() << "\" [shape=circle, label=\"" << label_
|
||||||
<< "\"]\n";
|
<< "\"]\n";
|
||||||
for (size_t i = 0; i < branches_.size(); i++) {
|
for (size_t i = 0; i < branches_.size(); i++) {
|
||||||
|
@ -266,17 +266,17 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Choice-Leaf equality: always false
|
/// Choice-Leaf equality: always false
|
||||||
bool sameLeaf(const Leaf& q) const {
|
bool sameLeaf(const Leaf& q) const override {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// polymorphic equality: if q is a leaf, could be...
|
/// polymorphic equality: if q is a leaf, could be...
|
||||||
bool sameLeaf(const Node& q) const {
|
bool sameLeaf(const Node& q) const override {
|
||||||
return (q.isLeaf() && q.sameLeaf(*this));
|
return (q.isLeaf() && q.sameLeaf(*this));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** equality up to tolerance */
|
/** equality up to tolerance */
|
||||||
bool equals(const Node& q, double tol) const {
|
bool equals(const Node& q, double tol) const override {
|
||||||
const Choice* other = dynamic_cast<const Choice*> (&q);
|
const Choice* other = dynamic_cast<const Choice*> (&q);
|
||||||
if (!other) return false;
|
if (!other) return false;
|
||||||
if (this->label_ != other->label_) return false;
|
if (this->label_ != other->label_) return false;
|
||||||
|
@ -288,7 +288,7 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** evaluate */
|
/** evaluate */
|
||||||
const Y& operator()(const Assignment<L>& x) const {
|
const Y& operator()(const Assignment<L>& x) const override {
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
typename Assignment<L>::const_iterator it = x.find(label_);
|
typename Assignment<L>::const_iterator it = x.find(label_);
|
||||||
if (it == x.end()) {
|
if (it == x.end()) {
|
||||||
|
@ -314,7 +314,7 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** apply unary operator */
|
/** apply unary operator */
|
||||||
NodePtr apply(const Unary& op) const {
|
NodePtr apply(const Unary& op) const override {
|
||||||
boost::shared_ptr<Choice> r(new Choice(label_, *this, op));
|
boost::shared_ptr<Choice> r(new Choice(label_, *this, op));
|
||||||
return Unique(r);
|
return Unique(r);
|
||||||
}
|
}
|
||||||
|
@ -324,12 +324,12 @@ namespace gtsam {
|
||||||
// Simply calls apply on argument to call correct virtual method:
|
// Simply calls apply on argument to call correct virtual method:
|
||||||
// fC.apply_f_op_g(gL) -> gL.apply_g_op_fC(fC) -> (Leaf)
|
// fC.apply_f_op_g(gL) -> gL.apply_g_op_fC(fC) -> (Leaf)
|
||||||
// fC.apply_f_op_g(gC) -> gC.apply_g_op_fC(fC) -> (below)
|
// fC.apply_f_op_g(gC) -> gC.apply_g_op_fC(fC) -> (below)
|
||||||
NodePtr apply_f_op_g(const Node& g, const Binary& op) const {
|
NodePtr apply_f_op_g(const Node& g, const Binary& op) const override {
|
||||||
return g.apply_g_op_fC(*this, op);
|
return g.apply_g_op_fC(*this, op);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If second argument of binary op is Leaf node, recurse on branches
|
// If second argument of binary op is Leaf node, recurse on branches
|
||||||
NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const {
|
NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const override {
|
||||||
boost::shared_ptr<Choice> h(new Choice(label(), nrChoices()));
|
boost::shared_ptr<Choice> h(new Choice(label(), nrChoices()));
|
||||||
for(NodePtr branch: branches_)
|
for(NodePtr branch: branches_)
|
||||||
h->push_back(fL.apply_f_op_g(*branch, op));
|
h->push_back(fL.apply_f_op_g(*branch, op));
|
||||||
|
@ -337,7 +337,7 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
// If second argument of binary op is Choice, call constructor
|
// If second argument of binary op is Choice, call constructor
|
||||||
NodePtr apply_g_op_fC(const Choice& fC, const Binary& op) const {
|
NodePtr apply_g_op_fC(const Choice& fC, const Binary& op) const override {
|
||||||
boost::shared_ptr<Choice> h(new Choice(fC, *this, op));
|
boost::shared_ptr<Choice> h(new Choice(fC, *this, op));
|
||||||
return Unique(h);
|
return Unique(h);
|
||||||
}
|
}
|
||||||
|
@ -352,7 +352,7 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** choose a branch, recursively */
|
/** choose a branch, recursively */
|
||||||
NodePtr choose(const L& label, size_t index) const {
|
NodePtr choose(const L& label, size_t index) const override {
|
||||||
if (label_ == label)
|
if (label_ == label)
|
||||||
return branches_[index]; // choose branch
|
return branches_[index]; // choose branch
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@ namespace gtsam {
|
||||||
for(Key j: f.keys()) cs[j] = f.cardinality(j);
|
for(Key j: f.keys()) cs[j] = f.cardinality(j);
|
||||||
// Convert map into keys
|
// Convert map into keys
|
||||||
DiscreteKeys keys;
|
DiscreteKeys keys;
|
||||||
for(const DiscreteKey& key: cs)
|
for(const std::pair<const Key,size_t>& key: cs)
|
||||||
keys.push_back(key);
|
keys.push_back(key);
|
||||||
// apply operand
|
// apply operand
|
||||||
ADT result = ADT::apply(f, op);
|
ADT result = ADT::apply(f, op);
|
||||||
|
|
|
@ -69,23 +69,23 @@ namespace gtsam {
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// equality
|
/// equality
|
||||||
bool equals(const DiscreteFactor& other, double tol = 1e-9) const;
|
bool equals(const DiscreteFactor& other, double tol = 1e-9) const override;
|
||||||
|
|
||||||
// print
|
// print
|
||||||
virtual void print(const std::string& s = "DecisionTreeFactor:\n",
|
void print(const std::string& s = "DecisionTreeFactor:\n",
|
||||||
const KeyFormatter& formatter = DefaultKeyFormatter) const;
|
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// Value is just look up in AlgebraicDecisonTree
|
/// Value is just look up in AlgebraicDecisonTree
|
||||||
virtual double operator()(const Values& values) const {
|
double operator()(const Values& values) const override {
|
||||||
return Potentials::operator()(values);
|
return Potentials::operator()(values);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// multiply two factors
|
/// multiply two factors
|
||||||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const {
|
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override {
|
||||||
return apply(f, ADT::Ring::mul);
|
return apply(f, ADT::Ring::mul);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert into a decisiontree
|
/// Convert into a decisiontree
|
||||||
virtual DecisionTreeFactor toDecisionTreeFactor() const {
|
DecisionTreeFactor toDecisionTreeFactor() const override {
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,13 +20,14 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <boost/shared_ptr.hpp>
|
#include <boost/shared_ptr.hpp>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
#include <gtsam/inference/FactorGraph.h>
|
#include <gtsam/inference/FactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteConditional.h>
|
#include <gtsam/discrete/DiscreteConditional.h>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/** A Bayes net made from linear-Discrete densities */
|
/** A Bayes net made from linear-Discrete densities */
|
||||||
class GTSAM_EXPORT DiscreteBayesNet: public FactorGraph<DiscreteConditional>
|
class GTSAM_EXPORT DiscreteBayesNet: public BayesNet<DiscreteConditional>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
|
|
@ -29,13 +29,32 @@ namespace gtsam {
|
||||||
template class BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>;
|
template class BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>;
|
||||||
template class BayesTree<DiscreteBayesTreeClique>;
|
template class BayesTree<DiscreteBayesTreeClique>;
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double DiscreteBayesTreeClique::evaluate(
|
||||||
|
const DiscreteConditional::Values& values) const {
|
||||||
|
// evaluate all conditionals and multiply
|
||||||
|
double result = (*conditional_)(values);
|
||||||
|
for (const auto& child : children) {
|
||||||
|
result *= child->evaluate(values);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
bool DiscreteBayesTree::equals(const This& other, double tol) const
|
bool DiscreteBayesTree::equals(const This& other, double tol) const {
|
||||||
{
|
|
||||||
return Base::equals(other, tol);
|
return Base::equals(other, tol);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double DiscreteBayesTree::evaluate(
|
||||||
|
const DiscreteConditional::Values& values) const {
|
||||||
|
double result = 1.0;
|
||||||
|
for (const auto& root : roots_) {
|
||||||
|
result *= root->evaluate(values);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
} // \namespace gtsam
|
} // \namespace gtsam
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,8 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file DiscreteBayesTree.h
|
* @file DiscreteBayesTree.h
|
||||||
* @brief Discrete Bayes Tree, the result of eliminating a DiscreteJunctionTree
|
* @brief Discrete Bayes Tree, the result of eliminating a
|
||||||
|
* DiscreteJunctionTree
|
||||||
* @brief DiscreteBayesTree
|
* @brief DiscreteBayesTree
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
* @author Richard Roberts
|
* @author Richard Roberts
|
||||||
|
@ -22,45 +23,63 @@
|
||||||
#include <gtsam/discrete/DiscreteBayesNet.h>
|
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/inference/BayesTree.h>
|
#include <gtsam/inference/BayesTree.h>
|
||||||
|
#include <gtsam/inference/Conditional.h>
|
||||||
#include <gtsam/inference/BayesTreeCliqueBase.h>
|
#include <gtsam/inference/BayesTreeCliqueBase.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
// Forward declarations
|
// Forward declarations
|
||||||
class DiscreteConditional;
|
class DiscreteConditional;
|
||||||
class VectorValues;
|
class VectorValues;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/** A clique in a DiscreteBayesTree */
|
/** A clique in a DiscreteBayesTree */
|
||||||
class GTSAM_EXPORT DiscreteBayesTreeClique :
|
class GTSAM_EXPORT DiscreteBayesTreeClique
|
||||||
public BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>
|
: public BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph> {
|
||||||
{
|
public:
|
||||||
public:
|
typedef DiscreteBayesTreeClique This;
|
||||||
typedef DiscreteBayesTreeClique This;
|
typedef BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>
|
||||||
typedef BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph> Base;
|
Base;
|
||||||
typedef boost::shared_ptr<This> shared_ptr;
|
typedef boost::shared_ptr<This> shared_ptr;
|
||||||
typedef boost::weak_ptr<This> weak_ptr;
|
typedef boost::weak_ptr<This> weak_ptr;
|
||||||
DiscreteBayesTreeClique() {}
|
DiscreteBayesTreeClique() {}
|
||||||
DiscreteBayesTreeClique(const boost::shared_ptr<DiscreteConditional>& conditional) : Base(conditional) {}
|
virtual ~DiscreteBayesTreeClique() {}
|
||||||
};
|
DiscreteBayesTreeClique(
|
||||||
|
const boost::shared_ptr<DiscreteConditional>& conditional)
|
||||||
|
: Base(conditional) {}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/// print index signature only
|
||||||
/** A Bayes tree representing a Discrete density */
|
void printSignature(
|
||||||
class GTSAM_EXPORT DiscreteBayesTree :
|
const std::string& s = "Clique: ",
|
||||||
public BayesTree<DiscreteBayesTreeClique>
|
const KeyFormatter& formatter = DefaultKeyFormatter) const {
|
||||||
{
|
conditional_->printSignature(s, formatter);
|
||||||
private:
|
}
|
||||||
typedef BayesTree<DiscreteBayesTreeClique> Base;
|
|
||||||
|
|
||||||
public:
|
//** evaluate conditional probability of subtree for given Values */
|
||||||
typedef DiscreteBayesTree This;
|
double evaluate(const DiscreteConditional::Values& values) const;
|
||||||
typedef boost::shared_ptr<This> shared_ptr;
|
};
|
||||||
|
|
||||||
/** Default constructor, creates an empty Bayes tree */
|
/* ************************************************************************* */
|
||||||
DiscreteBayesTree() {}
|
/** A Bayes tree representing a Discrete density */
|
||||||
|
class GTSAM_EXPORT DiscreteBayesTree
|
||||||
|
: public BayesTree<DiscreteBayesTreeClique> {
|
||||||
|
private:
|
||||||
|
typedef BayesTree<DiscreteBayesTreeClique> Base;
|
||||||
|
|
||||||
/** Check equality */
|
public:
|
||||||
bool equals(const This& other, double tol = 1e-9) const;
|
typedef DiscreteBayesTree This;
|
||||||
};
|
typedef boost::shared_ptr<This> shared_ptr;
|
||||||
|
|
||||||
}
|
/** Default constructor, creates an empty Bayes tree */
|
||||||
|
DiscreteBayesTree() {}
|
||||||
|
|
||||||
|
/** Check equality */
|
||||||
|
bool equals(const This& other, double tol = 1e-9) const;
|
||||||
|
|
||||||
|
//** evaluate probability for given Values */
|
||||||
|
double evaluate(const DiscreteConditional::Values& values) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace gtsam
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <random>
|
#include <random>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
@ -61,16 +62,26 @@ DiscreteConditional::DiscreteConditional(const DecisionTreeFactor& joint,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
DiscreteConditional::DiscreteConditional(const Signature& signature) :
|
DiscreteConditional::DiscreteConditional(const Signature& signature)
|
||||||
BaseFactor(signature.discreteKeysParentsFirst(), signature.cpt()), BaseConditional(
|
: BaseFactor(signature.discreteKeys(), signature.cpt()),
|
||||||
1) {
|
BaseConditional(1) {}
|
||||||
}
|
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
void DiscreteConditional::print(const std::string& s,
|
void DiscreteConditional::print(const string& s,
|
||||||
const KeyFormatter& formatter) const {
|
const KeyFormatter& formatter) const {
|
||||||
std::cout << s << std::endl;
|
cout << s << " P( ";
|
||||||
Potentials::print(s);
|
for (const_iterator it = beginFrontals(); it != endFrontals(); ++it) {
|
||||||
|
cout << formatter(*it) << " ";
|
||||||
|
}
|
||||||
|
if (nrParents()) {
|
||||||
|
cout << "| ";
|
||||||
|
for (const_iterator it = beginParents(); it != endParents(); ++it) {
|
||||||
|
cout << formatter(*it) << " ";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cout << ")";
|
||||||
|
Potentials::print("");
|
||||||
|
cout << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
|
@ -173,55 +184,28 @@ size_t DiscreteConditional::solve(const Values& parentsValues) const {
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
size_t DiscreteConditional::sample(const Values& parentsValues) const {
|
size_t DiscreteConditional::sample(const Values& parentsValues) const {
|
||||||
|
static mt19937 rng(2); // random number generator
|
||||||
static mt19937 rng(2); // random number generator
|
|
||||||
|
|
||||||
bool debug = ISDEBUG("DiscreteConditional::sample");
|
|
||||||
|
|
||||||
// Get the correct conditional density
|
// Get the correct conditional density
|
||||||
ADT pFS = choose(parentsValues); // P(F|S=parentsValues)
|
ADT pFS = choose(parentsValues); // P(F|S=parentsValues)
|
||||||
if (debug)
|
|
||||||
GTSAM_PRINT(pFS);
|
|
||||||
|
|
||||||
// get cumulative distribution function (cdf)
|
// TODO(Duy): only works for one key now, seems horribly slow this way
|
||||||
// TODO, only works for one key now, seems horribly slow this way
|
|
||||||
assert(nrFrontals() == 1);
|
assert(nrFrontals() == 1);
|
||||||
Key j = (firstFrontalKey());
|
Key key = firstFrontalKey();
|
||||||
size_t nj = cardinality(j);
|
size_t nj = cardinality(key);
|
||||||
vector<double> cdf(nj);
|
vector<double> p(nj);
|
||||||
Values frontals;
|
Values frontals;
|
||||||
double sum = 0;
|
|
||||||
for (size_t value = 0; value < nj; value++) {
|
for (size_t value = 0; value < nj; value++) {
|
||||||
frontals[j] = value;
|
frontals[key] = value;
|
||||||
double pValueS = pFS(frontals); // P(F=value|S=parentsValues)
|
p[value] = pFS(frontals); // P(F=value|S=parentsValues)
|
||||||
sum += pValueS; // accumulate
|
if (p[value] == 1.0) {
|
||||||
if (debug)
|
return value; // shortcut exit
|
||||||
cout << sum << " ";
|
|
||||||
if (pValueS == 1) {
|
|
||||||
if (debug)
|
|
||||||
cout << "--> " << value << endl;
|
|
||||||
return value; // shortcut exit
|
|
||||||
}
|
}
|
||||||
cdf[value] = sum;
|
|
||||||
}
|
}
|
||||||
|
std::discrete_distribution<size_t> distribution(p.begin(), p.end());
|
||||||
// inspired by http://www.boost.org/doc/libs/1_46_1/doc/html/boost_random/tutorial.html
|
return distribution(rng);
|
||||||
uniform_real_distribution<double> dist(0, cdf.back());
|
|
||||||
size_t sampled = lower_bound(cdf.begin(), cdf.end(), dist(rng)) - cdf.begin();
|
|
||||||
if (debug)
|
|
||||||
cout << "-> " << sampled << endl;
|
|
||||||
|
|
||||||
return sampled;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
|
||||||
//void DiscreteConditional::permuteWithInverse(
|
|
||||||
// const Permutation& inversePermutation) {
|
|
||||||
// IndexConditionalOrdered::permuteWithInverse(inversePermutation);
|
|
||||||
// Potentials::permuteWithInverse(inversePermutation);
|
|
||||||
//}
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
|
|
||||||
}// namespace
|
}// namespace
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
#include <boost/shared_ptr.hpp>
|
#include <boost/shared_ptr.hpp>
|
||||||
#include <boost/make_shared.hpp>
|
#include <boost/make_shared.hpp>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -83,17 +85,24 @@ public:
|
||||||
|
|
||||||
/// GTSAM-style print
|
/// GTSAM-style print
|
||||||
void print(const std::string& s = "Discrete Conditional: ",
|
void print(const std::string& s = "Discrete Conditional: ",
|
||||||
const KeyFormatter& formatter = DefaultKeyFormatter) const;
|
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||||
|
|
||||||
/// GTSAM-style equals
|
/// GTSAM-style equals
|
||||||
bool equals(const DiscreteFactor& other, double tol = 1e-9) const;
|
bool equals(const DiscreteFactor& other, double tol = 1e-9) const override;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
/// print index signature only
|
||||||
|
void printSignature(
|
||||||
|
const std::string& s = "Discrete Conditional: ",
|
||||||
|
const KeyFormatter& formatter = DefaultKeyFormatter) const {
|
||||||
|
static_cast<const BaseConditional*>(this)->print(s, formatter);
|
||||||
|
}
|
||||||
|
|
||||||
/// Evaluate, just look up in AlgebraicDecisonTree
|
/// Evaluate, just look up in AlgebraicDecisonTree
|
||||||
virtual double operator()(const Values& values) const {
|
double operator()(const Values& values) const override {
|
||||||
return Potentials::operator()(values);
|
return Potentials::operator()(values);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,50 +15,52 @@
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <gtsam/discrete/Potentials.h>
|
|
||||||
#include <gtsam/discrete/DecisionTree-inl.h>
|
#include <gtsam/discrete/DecisionTree-inl.h>
|
||||||
|
#include <gtsam/discrete/Potentials.h>
|
||||||
|
|
||||||
#include <boost/format.hpp>
|
#include <boost/format.hpp>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
// explicit instantiation
|
// explicit instantiation
|
||||||
template class DecisionTree<Key, double> ;
|
template class DecisionTree<Key, double>;
|
||||||
template class AlgebraicDecisionTree<Key> ;
|
template class AlgebraicDecisionTree<Key>;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Potentials::safe_div(const double& a, const double& b) {
|
double Potentials::safe_div(const double& a, const double& b) {
|
||||||
// cout << boost::format("%g / %g = %g\n") % a % b % ((a == 0) ? 0 : (a / b));
|
// cout << boost::format("%g / %g = %g\n") % a % b % ((a == 0) ? 0 : (a / b));
|
||||||
// The use for safe_div is when we divide the product factor by the sum factor.
|
// The use for safe_div is when we divide the product factor by the sum
|
||||||
// If the product or sum is zero, we accord zero probability to the event.
|
// factor. If the product or sum is zero, we accord zero probability to the
|
||||||
return (a == 0 || b == 0) ? 0 : (a / b);
|
// event.
|
||||||
}
|
return (a == 0 || b == 0) ? 0 : (a / b);
|
||||||
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ********************************************************************************
|
||||||
Potentials::Potentials() :
|
*/
|
||||||
ADT(1.0) {
|
Potentials::Potentials() : ADT(1.0) {}
|
||||||
}
|
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ********************************************************************************
|
||||||
Potentials::Potentials(const DiscreteKeys& keys, const ADT& decisionTree) :
|
*/
|
||||||
ADT(decisionTree), cardinalities_(keys.cardinalities()) {
|
Potentials::Potentials(const DiscreteKeys& keys, const ADT& decisionTree)
|
||||||
}
|
: ADT(decisionTree), cardinalities_(keys.cardinalities()) {}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
bool Potentials::equals(const Potentials& other, double tol) const {
|
bool Potentials::equals(const Potentials& other, double tol) const {
|
||||||
return ADT::equals(other, tol);
|
return ADT::equals(other, tol);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void Potentials::print(const string& s,
|
void Potentials::print(const string& s, const KeyFormatter& formatter) const {
|
||||||
const KeyFormatter& formatter) const {
|
cout << s << "\n Cardinalities: {";
|
||||||
cout << s << "\n Cardinalities: ";
|
for (const std::pair<const Key,size_t>& key : cardinalities_)
|
||||||
for(const DiscreteKey& key: cardinalities_)
|
cout << formatter(key.first) << ":" << key.second << ", ";
|
||||||
cout << formatter(key.first) << "=" << formatter(key.second) << " ";
|
cout << "}" << endl;
|
||||||
cout << endl;
|
ADT::print(" ");
|
||||||
ADT::print(" ");
|
}
|
||||||
}
|
|
||||||
//
|
//
|
||||||
// /* ************************************************************************* */
|
// /* ************************************************************************* */
|
||||||
// template<class P>
|
// template<class P>
|
||||||
|
@ -95,4 +97,4 @@ namespace gtsam {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
} // namespace gtsam
|
} // namespace gtsam
|
||||||
|
|
|
@ -122,28 +122,30 @@ namespace gtsam {
|
||||||
key_(key) {
|
key_(key) {
|
||||||
}
|
}
|
||||||
|
|
||||||
DiscreteKeys Signature::discreteKeysParentsFirst() const {
|
DiscreteKeys Signature::discreteKeys() const {
|
||||||
DiscreteKeys keys;
|
DiscreteKeys keys;
|
||||||
for(const DiscreteKey& key: parents_)
|
|
||||||
keys.push_back(key);
|
|
||||||
keys.push_back(key_);
|
keys.push_back(key_);
|
||||||
|
for (const DiscreteKey& key : parents_) keys.push_back(key);
|
||||||
return keys;
|
return keys;
|
||||||
}
|
}
|
||||||
|
|
||||||
KeyVector Signature::indices() const {
|
KeyVector Signature::indices() const {
|
||||||
KeyVector js;
|
KeyVector js;
|
||||||
js.push_back(key_.first);
|
js.push_back(key_.first);
|
||||||
for(const DiscreteKey& key: parents_)
|
for (const DiscreteKey& key : parents_) js.push_back(key.first);
|
||||||
js.push_back(key.first);
|
|
||||||
return js;
|
return js;
|
||||||
}
|
}
|
||||||
|
|
||||||
vector<double> Signature::cpt() const {
|
vector<double> Signature::cpt() const {
|
||||||
vector<double> cpt;
|
vector<double> cpt;
|
||||||
if (table_) {
|
if (table_) {
|
||||||
for(const Row& row: *table_)
|
const size_t nrStates = table_->at(0).size();
|
||||||
for(const double& x: row)
|
for (size_t j = 0; j < nrStates; j++) {
|
||||||
cpt.push_back(x);
|
for (const Row& row : *table_) {
|
||||||
|
assert(row.size() == nrStates);
|
||||||
|
cpt.push_back(row[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return cpt;
|
return cpt;
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,8 +86,8 @@ namespace gtsam {
|
||||||
return parents_;
|
return parents_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** All keys, with variable key last */
|
/** All keys, with variable key first */
|
||||||
DiscreteKeys discreteKeysParentsFirst() const;
|
DiscreteKeys discreteKeys() const;
|
||||||
|
|
||||||
/** All key indices, with variable key first */
|
/** All key indices, with variable key first */
|
||||||
KeyVector indices() const;
|
KeyVector indices() const;
|
||||||
|
|
|
@ -132,7 +132,7 @@ TEST(ADT, example3)
|
||||||
|
|
||||||
/** Convert Signature into CPT */
|
/** Convert Signature into CPT */
|
||||||
ADT create(const Signature& signature) {
|
ADT create(const Signature& signature) {
|
||||||
ADT p(signature.discreteKeysParentsFirst(), signature.cpt());
|
ADT p(signature.discreteKeys(), signature.cpt());
|
||||||
static size_t count = 0;
|
static size_t count = 0;
|
||||||
const DiscreteKey& key = signature.key();
|
const DiscreteKey& key = signature.key();
|
||||||
string dotfile = (boost::format("CPT-%03d-%d") % ++count % key.first).str();
|
string dotfile = (boost::format("CPT-%03d-%d") % ++count % key.first).str();
|
||||||
|
@ -181,19 +181,20 @@ TEST(ADT, joint)
|
||||||
dot(joint, "Asia-ASTLBEX");
|
dot(joint, "Asia-ASTLBEX");
|
||||||
joint = apply(joint, pD, &mul);
|
joint = apply(joint, pD, &mul);
|
||||||
dot(joint, "Asia-ASTLBEXD");
|
dot(joint, "Asia-ASTLBEXD");
|
||||||
EXPECT_LONGS_EQUAL(346, (long)muls);
|
EXPECT_LONGS_EQUAL(346, muls);
|
||||||
gttoc_(asiaJoint);
|
gttoc_(asiaJoint);
|
||||||
tictoc_getNode(asiaJointNode, asiaJoint);
|
tictoc_getNode(asiaJointNode, asiaJoint);
|
||||||
elapsed = asiaJointNode->secs() + asiaJointNode->wall();
|
elapsed = asiaJointNode->secs() + asiaJointNode->wall();
|
||||||
tictoc_reset_();
|
tictoc_reset_();
|
||||||
printCounts("Asia joint");
|
printCounts("Asia joint");
|
||||||
|
|
||||||
|
// Form P(A,S,T,L) = P(A) P(S) P(T|A) P(L|S)
|
||||||
ADT pASTL = pA;
|
ADT pASTL = pA;
|
||||||
pASTL = apply(pASTL, pS, &mul);
|
pASTL = apply(pASTL, pS, &mul);
|
||||||
pASTL = apply(pASTL, pT, &mul);
|
pASTL = apply(pASTL, pT, &mul);
|
||||||
pASTL = apply(pASTL, pL, &mul);
|
pASTL = apply(pASTL, pL, &mul);
|
||||||
|
|
||||||
// test combine
|
// test combine to check that P(A) = \sum_{S,T,L} P(A,S,T,L)
|
||||||
ADT fAa = pASTL.combine(L, &add_).combine(T, &add_).combine(S, &add_);
|
ADT fAa = pASTL.combine(L, &add_).combine(T, &add_).combine(S, &add_);
|
||||||
EXPECT(assert_equal(pA, fAa));
|
EXPECT(assert_equal(pA, fAa));
|
||||||
ADT fAb = pASTL.combine(S, &add_).combine(T, &add_).combine(L, &add_);
|
ADT fAb = pASTL.combine(S, &add_).combine(T, &add_).combine(L, &add_);
|
||||||
|
|
|
@ -18,110 +18,135 @@
|
||||||
|
|
||||||
#include <gtsam/discrete/DiscreteBayesNet.h>
|
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
#include <gtsam/base/debug.h>
|
#include <gtsam/base/debug.h>
|
||||||
|
#include <gtsam/base/Testable.h>
|
||||||
|
#include <gtsam/base/Vector.h>
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
#include <boost/assign/std/map.hpp>
|
|
||||||
#include <boost/assign/list_inserter.hpp>
|
#include <boost/assign/list_inserter.hpp>
|
||||||
|
#include <boost/assign/std/map.hpp>
|
||||||
|
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(DiscreteBayesNet, Asia)
|
TEST(DiscreteBayesNet, bayesNet) {
|
||||||
{
|
DiscreteBayesNet bayesNet;
|
||||||
|
DiscreteKey Parent(0, 2), Child(1, 2);
|
||||||
|
|
||||||
|
auto prior = boost::make_shared<DiscreteConditional>(Parent % "6/4");
|
||||||
|
CHECK(assert_equal(Potentials::ADT({Parent}, "0.6 0.4"),
|
||||||
|
(Potentials::ADT)*prior));
|
||||||
|
bayesNet.push_back(prior);
|
||||||
|
|
||||||
|
auto conditional =
|
||||||
|
boost::make_shared<DiscreteConditional>(Child | Parent = "7/3 8/2");
|
||||||
|
EXPECT_LONGS_EQUAL(1, *(conditional->beginFrontals()));
|
||||||
|
Potentials::ADT expected(Child & Parent, "0.7 0.8 0.3 0.2");
|
||||||
|
CHECK(assert_equal(expected, (Potentials::ADT)*conditional));
|
||||||
|
bayesNet.push_back(conditional);
|
||||||
|
|
||||||
|
DiscreteFactorGraph fg(bayesNet);
|
||||||
|
LONGS_EQUAL(2, fg.back()->size());
|
||||||
|
|
||||||
|
// Check the marginals
|
||||||
|
const double expectedMarginal[2]{0.4, 0.6 * 0.3 + 0.4 * 0.2};
|
||||||
|
DiscreteMarginals marginals(fg);
|
||||||
|
for (size_t j = 0; j < 2; j++) {
|
||||||
|
Vector FT = marginals.marginalProbabilities(DiscreteKey(j, 2));
|
||||||
|
EXPECT_DOUBLES_EQUAL(expectedMarginal[j], FT[1], 1e-3);
|
||||||
|
EXPECT_DOUBLES_EQUAL(FT[0], 1.0 - FT[1], 1e-9);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(DiscreteBayesNet, Asia) {
|
||||||
DiscreteBayesNet asia;
|
DiscreteBayesNet asia;
|
||||||
// DiscreteKey A("Asia"), S("Smoking"), T("Tuberculosis"), L("LungCancer"), B(
|
DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2),
|
||||||
// "Bronchitis"), E("Either"), X("XRay"), D("Dyspnoea");
|
Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
|
||||||
DiscreteKey A(0,2), S(4,2), T(3,2), L(6,2), B(7,2), E(5,2), X(2,2), D(1,2);
|
|
||||||
|
|
||||||
// TODO: make a version that doesn't use the parser
|
asia.add(Asia % "99/1");
|
||||||
asia.add(A % "99/1");
|
asia.add(Smoking % "50/50");
|
||||||
asia.add(S % "50/50");
|
|
||||||
|
|
||||||
asia.add(T | A = "99/1 95/5");
|
asia.add(Tuberculosis | Asia = "99/1 95/5");
|
||||||
asia.add(L | S = "99/1 90/10");
|
asia.add(LungCancer | Smoking = "99/1 90/10");
|
||||||
asia.add(B | S = "70/30 40/60");
|
asia.add(Bronchitis | Smoking = "70/30 40/60");
|
||||||
|
|
||||||
asia.add((E | T, L) = "F T T T");
|
asia.add((Either | Tuberculosis, LungCancer) = "F T T T");
|
||||||
|
|
||||||
asia.add(X | E = "95/5 2/98");
|
asia.add(XRay | Either = "95/5 2/98");
|
||||||
// next lines are same as asia.add((D | E, B) = "9/1 2/8 3/7 1/9");
|
asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9");
|
||||||
DiscreteConditional::shared_ptr actual =
|
|
||||||
boost::make_shared<DiscreteConditional>((D | E, B) = "9/1 2/8 3/7 1/9");
|
|
||||||
asia.push_back(actual);
|
|
||||||
// GTSAM_PRINT(asia);
|
|
||||||
|
|
||||||
// Convert to factor graph
|
// Convert to factor graph
|
||||||
DiscreteFactorGraph fg(asia);
|
DiscreteFactorGraph fg(asia);
|
||||||
// GTSAM_PRINT(fg);
|
LONGS_EQUAL(3, fg.back()->size());
|
||||||
LONGS_EQUAL(3,fg.back()->size());
|
|
||||||
Potentials::ADT expected(B & D & E, "0.9 0.3 0.1 0.7 0.2 0.1 0.8 0.9");
|
// Check the marginals we know (of the parent-less nodes)
|
||||||
CHECK(assert_equal(expected,(Potentials::ADT)*actual));
|
DiscreteMarginals marginals(fg);
|
||||||
|
Vector2 va(0.99, 0.01), vs(0.5, 0.5);
|
||||||
|
EXPECT(assert_equal(va, marginals.marginalProbabilities(Asia)));
|
||||||
|
EXPECT(assert_equal(vs, marginals.marginalProbabilities(Smoking)));
|
||||||
|
|
||||||
// Create solver and eliminate
|
// Create solver and eliminate
|
||||||
Ordering ordering;
|
Ordering ordering;
|
||||||
ordering += Key(0),Key(1),Key(2),Key(3),Key(4),Key(5),Key(6),Key(7);
|
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
|
||||||
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||||
// GTSAM_PRINT(*chordal);
|
DiscreteConditional expected2(Bronchitis % "11/9");
|
||||||
DiscreteConditional expected2(B % "11/9");
|
EXPECT(assert_equal(expected2, *chordal->back()));
|
||||||
CHECK(assert_equal(expected2,*chordal->back()));
|
|
||||||
|
|
||||||
// solve
|
// solve
|
||||||
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
||||||
DiscreteFactor::Values expectedMPE;
|
DiscreteFactor::Values expectedMPE;
|
||||||
insert(expectedMPE)(A.first, 0)(D.first, 0)(X.first, 0)(T.first, 0)(S.first,
|
insert(expectedMPE)(Asia.first, 0)(Dyspnea.first, 0)(XRay.first, 0)(
|
||||||
0)(E.first, 0)(L.first, 0)(B.first, 0);
|
Tuberculosis.first, 0)(Smoking.first, 0)(Either.first, 0)(
|
||||||
|
LungCancer.first, 0)(Bronchitis.first, 0);
|
||||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||||
|
|
||||||
// add evidence, we were in Asia and we have Dispnoea
|
// add evidence, we were in Asia and we have dyspnea
|
||||||
fg.add(A, "0 1");
|
fg.add(Asia, "0 1");
|
||||||
fg.add(D, "0 1");
|
fg.add(Dyspnea, "0 1");
|
||||||
// fg.product().dot("fg");
|
|
||||||
|
|
||||||
// solve again, now with evidence
|
// solve again, now with evidence
|
||||||
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||||
// GTSAM_PRINT(*chordal2);
|
|
||||||
DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize();
|
DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize();
|
||||||
DiscreteFactor::Values expectedMPE2;
|
DiscreteFactor::Values expectedMPE2;
|
||||||
insert(expectedMPE2)(A.first, 1)(D.first, 1)(X.first, 0)(T.first, 0)(S.first,
|
insert(expectedMPE2)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 0)(
|
||||||
1)(E.first, 0)(L.first, 0)(B.first, 1);
|
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 0)(
|
||||||
|
LungCancer.first, 0)(Bronchitis.first, 1);
|
||||||
EXPECT(assert_equal(expectedMPE2, *actualMPE2));
|
EXPECT(assert_equal(expectedMPE2, *actualMPE2));
|
||||||
|
|
||||||
// now sample from it
|
// now sample from it
|
||||||
DiscreteFactor::Values expectedSample;
|
DiscreteFactor::Values expectedSample;
|
||||||
SETDEBUG("DiscreteConditional::sample", false);
|
SETDEBUG("DiscreteConditional::sample", false);
|
||||||
insert(expectedSample)(A.first, 1)(D.first, 1)(X.first, 1)(T.first, 0)(
|
insert(expectedSample)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 1)(
|
||||||
S.first, 1)(E.first, 1)(L.first, 1)(B.first, 0);
|
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 1)(
|
||||||
|
LungCancer.first, 1)(Bronchitis.first, 0);
|
||||||
DiscreteFactor::sharedValues actualSample = chordal2->sample();
|
DiscreteFactor::sharedValues actualSample = chordal2->sample();
|
||||||
EXPECT(assert_equal(expectedSample, *actualSample));
|
EXPECT(assert_equal(expectedSample, *actualSample));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST_UNSAFE(DiscreteBayesNet, Sugar)
|
TEST_UNSAFE(DiscreteBayesNet, Sugar) {
|
||||||
{
|
DiscreteKey T(0, 2), L(1, 2), E(2, 2), C(8, 3), S(7, 2);
|
||||||
DiscreteKey T(0,2), L(1,2), E(2,2), D(3,2), C(8,3), S(7,2);
|
|
||||||
|
|
||||||
DiscreteBayesNet bn;
|
DiscreteBayesNet bn;
|
||||||
|
|
||||||
// test some mistakes
|
|
||||||
// add(bn, D);
|
|
||||||
// add(bn, D | E);
|
|
||||||
// add(bn, D | E = "blah");
|
|
||||||
|
|
||||||
// try logic
|
// try logic
|
||||||
bn.add((E | T, L) = "OR");
|
bn.add((E | T, L) = "OR");
|
||||||
bn.add((E | T, L) = "AND");
|
bn.add((E | T, L) = "AND");
|
||||||
|
|
||||||
// // try multivalued
|
// try multivalued
|
||||||
bn.add(C % "1/1/2");
|
bn.add(C % "1/1/2");
|
||||||
bn.add(C | S = "1/1/2 5/2/3");
|
bn.add(C | S = "1/1/2 5/2/3");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -130,4 +155,3 @@ int main() {
|
||||||
return TestRegistry::runAllTests(tr);
|
return TestRegistry::runAllTests(tr);
|
||||||
}
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
|
@ -1,261 +1,228 @@
|
||||||
///* ----------------------------------------------------------------------------
|
/* ----------------------------------------------------------------------------
|
||||||
//
|
|
||||||
// * GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||||
// * Atlanta, Georgia 30332-0415
|
* Atlanta, Georgia 30332-0415
|
||||||
// * All Rights Reserved
|
* All Rights Reserved
|
||||||
// * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
//
|
|
||||||
// * See LICENSE for the license information
|
* See LICENSE for the license information
|
||||||
//
|
|
||||||
// * -------------------------------------------------------------------------- */
|
* -------------------------------------------------------------------------- */
|
||||||
//
|
|
||||||
///*
|
/*
|
||||||
// * @file testDiscreteBayesTree.cpp
|
* @file testDiscreteBayesTree.cpp
|
||||||
// * @date sept 15, 2012
|
* @date sept 15, 2012
|
||||||
// * @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
// */
|
*/
|
||||||
//
|
|
||||||
//#include <gtsam/discrete/DiscreteBayesNet.h>
|
#include <gtsam/base/Vector.h>
|
||||||
//#include <gtsam/discrete/DiscreteBayesTree.h>
|
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||||
//#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteBayesTree.h>
|
||||||
//
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
//#include <boost/assign/std/vector.hpp>
|
#include <gtsam/inference/BayesNet.h>
|
||||||
//using namespace boost::assign;
|
|
||||||
//
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
//
|
|
||||||
//using namespace std;
|
#include <vector>
|
||||||
//using namespace gtsam;
|
|
||||||
//
|
using namespace std;
|
||||||
//static bool debug = false;
|
using namespace gtsam;
|
||||||
//
|
|
||||||
///**
|
static bool debug = false;
|
||||||
// * Custom clique class to debug shortcuts
|
|
||||||
// */
|
/* ************************************************************************* */
|
||||||
////class Clique: public BayesTreeCliqueBaseOrdered<Clique, DiscreteConditional> {
|
|
||||||
////
|
TEST_UNSAFE(DiscreteBayesTree, ThinTree) {
|
||||||
////protected:
|
const int nrNodes = 15;
|
||||||
////
|
const size_t nrStates = 2;
|
||||||
////public:
|
|
||||||
////
|
// define variables
|
||||||
//// typedef BayesTreeCliqueBaseOrdered<Clique, DiscreteConditional> Base;
|
vector<DiscreteKey> key;
|
||||||
//// typedef boost::shared_ptr<Clique> shared_ptr;
|
for (int i = 0; i < nrNodes; i++) {
|
||||||
////
|
DiscreteKey key_i(i, nrStates);
|
||||||
//// // Constructors
|
key.push_back(key_i);
|
||||||
//// Clique() {
|
}
|
||||||
//// }
|
|
||||||
//// Clique(const DiscreteConditional::shared_ptr& conditional) :
|
// create a thin-tree Bayesnet, a la Jean-Guillaume
|
||||||
//// Base(conditional) {
|
DiscreteBayesNet bayesNet;
|
||||||
//// }
|
bayesNet.add(key[14] % "1/3");
|
||||||
//// Clique(
|
|
||||||
//// const std::pair<DiscreteConditional::shared_ptr,
|
bayesNet.add(key[13] | key[14] = "1/3 3/1");
|
||||||
//// DiscreteConditional::FactorType::shared_ptr>& result) :
|
bayesNet.add(key[12] | key[14] = "3/1 3/1");
|
||||||
//// Base(result) {
|
|
||||||
//// }
|
bayesNet.add((key[11] | key[13], key[14]) = "1/4 2/3 3/2 4/1");
|
||||||
////
|
bayesNet.add((key[10] | key[13], key[14]) = "1/4 3/2 2/3 4/1");
|
||||||
//// /// print index signature only
|
bayesNet.add((key[9] | key[12], key[14]) = "4/1 2/3 F 1/4");
|
||||||
//// void printSignature(const std::string& s = "Clique: ",
|
bayesNet.add((key[8] | key[12], key[14]) = "T 1/4 3/2 4/1");
|
||||||
//// const KeyFormatter& indexFormatter = DefaultKeyFormatter) const {
|
|
||||||
//// ((IndexConditionalOrdered::shared_ptr) conditional_)->print(s, indexFormatter);
|
bayesNet.add((key[7] | key[11], key[13]) = "1/4 2/3 3/2 4/1");
|
||||||
//// }
|
bayesNet.add((key[6] | key[11], key[13]) = "1/4 3/2 2/3 4/1");
|
||||||
////
|
bayesNet.add((key[5] | key[10], key[13]) = "4/1 2/3 3/2 1/4");
|
||||||
//// /// evaluate value of sub-tree
|
bayesNet.add((key[4] | key[10], key[13]) = "2/3 1/4 3/2 4/1");
|
||||||
//// double evaluate(const DiscreteConditional::Values & values) {
|
|
||||||
//// double result = (*(this->conditional_))(values);
|
bayesNet.add((key[3] | key[9], key[12]) = "1/4 2/3 3/2 4/1");
|
||||||
//// // evaluate all children and multiply into result
|
bayesNet.add((key[2] | key[9], key[12]) = "1/4 8/2 2/3 4/1");
|
||||||
//// for(boost::shared_ptr<Clique> c: children_)
|
bayesNet.add((key[1] | key[8], key[12]) = "4/1 2/3 3/2 1/4");
|
||||||
//// result *= c->evaluate(values);
|
bayesNet.add((key[0] | key[8], key[12]) = "2/3 1/4 3/2 4/1");
|
||||||
//// return result;
|
|
||||||
//// }
|
if (debug) {
|
||||||
////
|
GTSAM_PRINT(bayesNet);
|
||||||
////};
|
bayesNet.saveGraph("/tmp/discreteBayesNet.dot");
|
||||||
//
|
}
|
||||||
////typedef BayesTreeOrdered<DiscreteConditional, Clique> DiscreteBayesTree;
|
|
||||||
////
|
// create a BayesTree out of a Bayes net
|
||||||
/////* ************************************************************************* */
|
auto bayesTree = DiscreteFactorGraph(bayesNet).eliminateMultifrontal();
|
||||||
////double evaluate(const DiscreteBayesTree& tree,
|
if (debug) {
|
||||||
//// const DiscreteConditional::Values & values) {
|
GTSAM_PRINT(*bayesTree);
|
||||||
//// return tree.root()->evaluate(values);
|
bayesTree->saveGraph("/tmp/discreteBayesTree.dot");
|
||||||
////}
|
}
|
||||||
//
|
|
||||||
///* ************************************************************************* */
|
// Check frontals and parents
|
||||||
//
|
for (size_t i : {13, 14, 9, 3, 2, 8, 1, 0, 10, 5, 4}) {
|
||||||
//TEST_UNSAFE( DiscreteBayesTree, thinTree ) {
|
auto clique_i = (*bayesTree)[i];
|
||||||
//
|
EXPECT_LONGS_EQUAL(i, *(clique_i->conditional_->beginFrontals()));
|
||||||
// const int nrNodes = 15;
|
}
|
||||||
// const size_t nrStates = 2;
|
|
||||||
//
|
auto R = bayesTree->roots().front();
|
||||||
// // define variables
|
|
||||||
// vector<DiscreteKey> key;
|
// Check whether BN and BT give the same answer on all configurations
|
||||||
// for (int i = 0; i < nrNodes; i++) {
|
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
||||||
// DiscreteKey key_i(i, nrStates);
|
key[0] & key[1] & key[2] & key[3] & key[4] & key[5] & key[6] & key[7] &
|
||||||
// key.push_back(key_i);
|
key[8] & key[9] & key[10] & key[11] & key[12] & key[13] & key[14]);
|
||||||
// }
|
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||||
//
|
DiscreteFactor::Values x = allPosbValues[i];
|
||||||
// // create a thin-tree Bayesnet, a la Jean-Guillaume
|
double expected = bayesNet.evaluate(x);
|
||||||
// DiscreteBayesNet bayesNet;
|
double actual = bayesTree->evaluate(x);
|
||||||
// bayesNet.add(key[14] % "1/3");
|
DOUBLES_EQUAL(expected, actual, 1e-9);
|
||||||
//
|
}
|
||||||
// bayesNet.add(key[13] | key[14] = "1/3 3/1");
|
|
||||||
// bayesNet.add(key[12] | key[14] = "3/1 3/1");
|
// Calculate all some marginals for Values==all1
|
||||||
//
|
Vector marginals = Vector::Zero(15);
|
||||||
// bayesNet.add((key[11] | key[13], key[14]) = "1/4 2/3 3/2 4/1");
|
double joint_12_14 = 0, joint_9_12_14 = 0, joint_8_12_14 = 0, joint_8_12 = 0,
|
||||||
// bayesNet.add((key[10] | key[13], key[14]) = "1/4 3/2 2/3 4/1");
|
joint82 = 0, joint12 = 0, joint24 = 0, joint45 = 0, joint46 = 0,
|
||||||
// bayesNet.add((key[9] | key[12], key[14]) = "4/1 2/3 F 1/4");
|
joint_4_11 = 0, joint_11_13 = 0, joint_11_13_14 = 0,
|
||||||
// bayesNet.add((key[8] | key[12], key[14]) = "T 1/4 3/2 4/1");
|
joint_11_12_13_14 = 0, joint_9_11_12_13 = 0, joint_8_11_12_13 = 0;
|
||||||
//
|
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||||
// bayesNet.add((key[7] | key[11], key[13]) = "1/4 2/3 3/2 4/1");
|
DiscreteFactor::Values x = allPosbValues[i];
|
||||||
// bayesNet.add((key[6] | key[11], key[13]) = "1/4 3/2 2/3 4/1");
|
double px = bayesTree->evaluate(x);
|
||||||
// bayesNet.add((key[5] | key[10], key[13]) = "4/1 2/3 3/2 1/4");
|
for (size_t i = 0; i < 15; i++)
|
||||||
// bayesNet.add((key[4] | key[10], key[13]) = "2/3 1/4 3/2 4/1");
|
if (x[i]) marginals[i] += px;
|
||||||
//
|
if (x[12] && x[14]) {
|
||||||
// bayesNet.add((key[3] | key[9], key[12]) = "1/4 2/3 3/2 4/1");
|
joint_12_14 += px;
|
||||||
// bayesNet.add((key[2] | key[9], key[12]) = "1/4 8/2 2/3 4/1");
|
if (x[9]) joint_9_12_14 += px;
|
||||||
// bayesNet.add((key[1] | key[8], key[12]) = "4/1 2/3 3/2 1/4");
|
if (x[8]) joint_8_12_14 += px;
|
||||||
// bayesNet.add((key[0] | key[8], key[12]) = "2/3 1/4 3/2 4/1");
|
}
|
||||||
//
|
if (x[8] && x[12]) joint_8_12 += px;
|
||||||
//// if (debug) {
|
if (x[2]) {
|
||||||
//// GTSAM_PRINT(bayesNet);
|
if (x[8]) joint82 += px;
|
||||||
//// bayesNet.saveGraph("/tmp/discreteBayesNet.dot");
|
if (x[1]) joint12 += px;
|
||||||
//// }
|
}
|
||||||
//
|
if (x[4]) {
|
||||||
// // create a BayesTree out of a Bayes net
|
if (x[2]) joint24 += px;
|
||||||
// DiscreteBayesTree bayesTree(bayesNet);
|
if (x[5]) joint45 += px;
|
||||||
// if (debug) {
|
if (x[6]) joint46 += px;
|
||||||
// GTSAM_PRINT(bayesTree);
|
if (x[11]) joint_4_11 += px;
|
||||||
// bayesTree.saveGraph("/tmp/discreteBayesTree.dot");
|
}
|
||||||
// }
|
if (x[11] && x[13]) {
|
||||||
//
|
joint_11_13 += px;
|
||||||
// // Check whether BN and BT give the same answer on all configurations
|
if (x[8] && x[12]) joint_8_11_12_13 += px;
|
||||||
// // Also calculate all some marginals
|
if (x[9] && x[12]) joint_9_11_12_13 += px;
|
||||||
// Vector marginals = zero(15);
|
if (x[14]) {
|
||||||
// double joint_12_14 = 0, joint_9_12_14 = 0, joint_8_12_14 = 0, joint_8_12 = 0,
|
joint_11_13_14 += px;
|
||||||
// joint82 = 0, joint12 = 0, joint24 = 0, joint45 = 0, joint46 = 0,
|
if (x[12]) {
|
||||||
// joint_4_11 = 0;
|
joint_11_12_13_14 += px;
|
||||||
// vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
}
|
||||||
// key[0] & key[1] & key[2] & key[3] & key[4] & key[5] & key[6] & key[7]
|
}
|
||||||
// & key[8] & key[9] & key[10] & key[11] & key[12] & key[13] & key[14]);
|
}
|
||||||
// for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
}
|
||||||
// DiscreteFactor::Values x = allPosbValues[i];
|
DiscreteFactor::Values all1 = allPosbValues.back();
|
||||||
// double expected = evaluate(bayesNet, x);
|
|
||||||
// double actual = evaluate(bayesTree, x);
|
// check separator marginal P(S0)
|
||||||
// DOUBLES_EQUAL(expected, actual, 1e-9);
|
auto clique = (*bayesTree)[0];
|
||||||
// // collect marginals
|
DiscreteFactorGraph separatorMarginal0 =
|
||||||
// for (size_t i = 0; i < 15; i++)
|
clique->separatorMarginal(EliminateDiscrete);
|
||||||
// if (x[i])
|
DOUBLES_EQUAL(joint_8_12, separatorMarginal0(all1), 1e-9);
|
||||||
// marginals[i] += actual;
|
|
||||||
// // calculate shortcut 8 and 0
|
// check separator marginal P(S9), should be P(14)
|
||||||
// if (x[12] && x[14])
|
clique = (*bayesTree)[9];
|
||||||
// joint_12_14 += actual;
|
DiscreteFactorGraph separatorMarginal9 =
|
||||||
// if (x[9] && x[12] & x[14])
|
clique->separatorMarginal(EliminateDiscrete);
|
||||||
// joint_9_12_14 += actual;
|
DOUBLES_EQUAL(marginals[14], separatorMarginal9(all1), 1e-9);
|
||||||
// if (x[8] && x[12] & x[14])
|
|
||||||
// joint_8_12_14 += actual;
|
// check separator marginal of root, should be empty
|
||||||
// if (x[8] && x[12])
|
clique = (*bayesTree)[11];
|
||||||
// joint_8_12 += actual;
|
DiscreteFactorGraph separatorMarginal11 =
|
||||||
// if (x[8] && x[2])
|
clique->separatorMarginal(EliminateDiscrete);
|
||||||
// joint82 += actual;
|
LONGS_EQUAL(0, separatorMarginal11.size());
|
||||||
// if (x[1] && x[2])
|
|
||||||
// joint12 += actual;
|
// check shortcut P(S9||R) to root
|
||||||
// if (x[2] && x[4])
|
clique = (*bayesTree)[9];
|
||||||
// joint24 += actual;
|
DiscreteBayesNet shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
// if (x[4] && x[5])
|
LONGS_EQUAL(1, shortcut.size());
|
||||||
// joint45 += actual;
|
DOUBLES_EQUAL(joint_11_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// if (x[4] && x[6])
|
|
||||||
// joint46 += actual;
|
// check shortcut P(S8||R) to root
|
||||||
// if (x[4] && x[11])
|
clique = (*bayesTree)[8];
|
||||||
// joint_4_11 += actual;
|
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
// }
|
DOUBLES_EQUAL(joint_11_12_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// DiscreteFactor::Values all1 = allPosbValues.back();
|
|
||||||
//
|
// check shortcut P(S2||R) to root
|
||||||
// Clique::shared_ptr R = bayesTree.root();
|
clique = (*bayesTree)[2];
|
||||||
//
|
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
// // check separator marginal P(S0)
|
DOUBLES_EQUAL(joint_9_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// Clique::shared_ptr c = bayesTree[0];
|
|
||||||
// DiscreteFactorGraph separatorMarginal0 = c->separatorMarginal(R,
|
// check shortcut P(S0||R) to root
|
||||||
// EliminateDiscrete);
|
clique = (*bayesTree)[0];
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_8_12, separatorMarginal0(all1), 1e-9);
|
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
//
|
DOUBLES_EQUAL(joint_8_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// // check separator marginal P(S9), should be P(14)
|
|
||||||
// c = bayesTree[9];
|
// calculate all shortcuts to root
|
||||||
// DiscreteFactorGraph separatorMarginal9 = c->separatorMarginal(R,
|
DiscreteBayesTree::Nodes cliques = bayesTree->nodes();
|
||||||
// EliminateDiscrete);
|
for (auto clique : cliques) {
|
||||||
// EXPECT_DOUBLES_EQUAL(marginals[14], separatorMarginal9(all1), 1e-9);
|
DiscreteBayesNet shortcut = clique.second->shortcut(R, EliminateDiscrete);
|
||||||
//
|
if (debug) {
|
||||||
// // check separator marginal of root, should be empty
|
clique.second->conditional_->printSignature();
|
||||||
// c = bayesTree[11];
|
shortcut.print("shortcut:");
|
||||||
// DiscreteFactorGraph separatorMarginal11 = c->separatorMarginal(R,
|
}
|
||||||
// EliminateDiscrete);
|
}
|
||||||
// EXPECT_LONGS_EQUAL(0, separatorMarginal11.size());
|
|
||||||
//
|
// Check all marginals
|
||||||
// // check shortcut P(S9||R) to root
|
DiscreteFactor::shared_ptr marginalFactor;
|
||||||
// c = bayesTree[9];
|
for (size_t i = 0; i < 15; i++) {
|
||||||
// DiscreteBayesNet shortcut = c->shortcut(R, EliminateDiscrete);
|
marginalFactor = bayesTree->marginalFactor(i, EliminateDiscrete);
|
||||||
// EXPECT_LONGS_EQUAL(0, shortcut.size());
|
double actual = (*marginalFactor)(all1);
|
||||||
//
|
DOUBLES_EQUAL(marginals[i], actual, 1e-9);
|
||||||
// // check shortcut P(S8||R) to root
|
}
|
||||||
// c = bayesTree[8];
|
|
||||||
// shortcut = c->shortcut(R, EliminateDiscrete);
|
DiscreteBayesNet::shared_ptr actualJoint;
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_12_14/marginals[14], evaluate(shortcut,all1),
|
|
||||||
// 1e-9);
|
// Check joint P(8, 2)
|
||||||
//
|
actualJoint = bayesTree->jointBayesNet(8, 2, EliminateDiscrete);
|
||||||
// // check shortcut P(S2||R) to root
|
DOUBLES_EQUAL(joint82, actualJoint->evaluate(all1), 1e-9);
|
||||||
// c = bayesTree[2];
|
|
||||||
// shortcut = c->shortcut(R, EliminateDiscrete);
|
// Check joint P(1, 2)
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_9_12_14/marginals[14], evaluate(shortcut,all1),
|
actualJoint = bayesTree->jointBayesNet(1, 2, EliminateDiscrete);
|
||||||
// 1e-9);
|
DOUBLES_EQUAL(joint12, actualJoint->evaluate(all1), 1e-9);
|
||||||
//
|
|
||||||
// // check shortcut P(S0||R) to root
|
// Check joint P(2, 4)
|
||||||
// c = bayesTree[0];
|
actualJoint = bayesTree->jointBayesNet(2, 4, EliminateDiscrete);
|
||||||
// shortcut = c->shortcut(R, EliminateDiscrete);
|
DOUBLES_EQUAL(joint24, actualJoint->evaluate(all1), 1e-9);
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_8_12_14/marginals[14], evaluate(shortcut,all1),
|
|
||||||
// 1e-9);
|
// Check joint P(4, 5)
|
||||||
//
|
actualJoint = bayesTree->jointBayesNet(4, 5, EliminateDiscrete);
|
||||||
// // calculate all shortcuts to root
|
DOUBLES_EQUAL(joint45, actualJoint->evaluate(all1), 1e-9);
|
||||||
// DiscreteBayesTree::Nodes cliques = bayesTree.nodes();
|
|
||||||
// for(Clique::shared_ptr c: cliques) {
|
// Check joint P(4, 6)
|
||||||
// DiscreteBayesNet shortcut = c->shortcut(R, EliminateDiscrete);
|
actualJoint = bayesTree->jointBayesNet(4, 6, EliminateDiscrete);
|
||||||
// if (debug) {
|
DOUBLES_EQUAL(joint46, actualJoint->evaluate(all1), 1e-9);
|
||||||
// c->printSignature();
|
|
||||||
// shortcut.print("shortcut:");
|
// Check joint P(4, 11)
|
||||||
// }
|
actualJoint = bayesTree->jointBayesNet(4, 11, EliminateDiscrete);
|
||||||
// }
|
DOUBLES_EQUAL(joint_4_11, actualJoint->evaluate(all1), 1e-9);
|
||||||
//
|
}
|
||||||
// // Check all marginals
|
|
||||||
// DiscreteFactor::shared_ptr marginalFactor;
|
|
||||||
// for (size_t i = 0; i < 15; i++) {
|
|
||||||
// marginalFactor = bayesTree.marginalFactor(i, EliminateDiscrete);
|
|
||||||
// double actual = (*marginalFactor)(all1);
|
|
||||||
// EXPECT_DOUBLES_EQUAL(marginals[i], actual, 1e-9);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// DiscreteBayesNet::shared_ptr actualJoint;
|
|
||||||
//
|
|
||||||
// // Check joint P(8,2) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(8, 2, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint82, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(1,2) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(1, 2, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint12, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(2,4)
|
|
||||||
// actualJoint = bayesTree.jointBayesNet(2, 4, EliminateDiscrete);
|
|
||||||
// EXPECT_DOUBLES_EQUAL(joint24, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(4,5) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(4, 5, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint46, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(4,6) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(4, 6, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint46, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(4,11)
|
|
||||||
// actualJoint = bayesTree.jointBayesNet(4, 11, EliminateDiscrete);
|
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_4_11, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
//}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() {
|
int main() {
|
||||||
|
@ -263,4 +230,3 @@ int main() {
|
||||||
return TestRegistry::runAllTests(tr);
|
return TestRegistry::runAllTests(tr);
|
||||||
}
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -16,9 +16,9 @@
|
||||||
* @date Feb 14, 2011
|
* @date Feb 14, 2011
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <boost/make_shared.hpp>
|
|
||||||
#include <boost/assign/std/map.hpp>
|
#include <boost/assign/std/map.hpp>
|
||||||
#include <boost/assign/std/vector.hpp>
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
#include <boost/make_shared.hpp>
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
@ -36,6 +36,11 @@ TEST( DiscreteConditional, constructors)
|
||||||
DiscreteConditional::shared_ptr expected1 = //
|
DiscreteConditional::shared_ptr expected1 = //
|
||||||
boost::make_shared<DiscreteConditional>(X | Y = "1/1 2/3 1/4");
|
boost::make_shared<DiscreteConditional>(X | Y = "1/1 2/3 1/4");
|
||||||
EXPECT(expected1);
|
EXPECT(expected1);
|
||||||
|
EXPECT_LONGS_EQUAL(0, *(expected1->beginFrontals()));
|
||||||
|
EXPECT_LONGS_EQUAL(2, *(expected1->beginParents()));
|
||||||
|
EXPECT(expected1->endParents() == expected1->end());
|
||||||
|
EXPECT(expected1->endFrontals() == expected1->beginParents());
|
||||||
|
|
||||||
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
||||||
DiscreteConditional actual1(1, f1);
|
DiscreteConditional actual1(1, f1);
|
||||||
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
||||||
|
@ -43,71 +48,68 @@ TEST( DiscreteConditional, constructors)
|
||||||
DecisionTreeFactor f2(X & Y & Z,
|
DecisionTreeFactor f2(X & Y & Z,
|
||||||
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
||||||
DiscreteConditional actual2(1, f2);
|
DiscreteConditional actual2(1, f2);
|
||||||
DecisionTreeFactor::shared_ptr actual2factor = actual2.toFactor();
|
EXPECT(assert_equal(f2 / *f2.sum(1), *actual2.toFactor(), 1e-9));
|
||||||
// EXPECT(assert_equal(f2, *actual2factor, 1e-9));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, constructors_alt_interface)
|
TEST(DiscreteConditional, constructors_alt_interface) {
|
||||||
{
|
DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering !
|
||||||
DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering !
|
|
||||||
|
|
||||||
Signature::Table table;
|
Signature::Table table;
|
||||||
Signature::Row r1, r2, r3;
|
Signature::Row r1, r2, r3;
|
||||||
r1 += 1.0, 1.0; r2 += 2.0, 3.0; r3 += 1.0, 4.0;
|
r1 += 1.0, 1.0;
|
||||||
|
r2 += 2.0, 3.0;
|
||||||
|
r3 += 1.0, 4.0;
|
||||||
table += r1, r2, r3;
|
table += r1, r2, r3;
|
||||||
DiscreteConditional::shared_ptr expected1 = //
|
auto actual1 = boost::make_shared<DiscreteConditional>(X | Y = table);
|
||||||
boost::make_shared<DiscreteConditional>(X | Y = table);
|
EXPECT(actual1);
|
||||||
EXPECT(expected1);
|
|
||||||
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
||||||
DiscreteConditional actual1(1, f1);
|
DiscreteConditional expected1(1, f1);
|
||||||
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
EXPECT(assert_equal(expected1, *actual1, 1e-9));
|
||||||
|
|
||||||
DecisionTreeFactor f2(X & Y & Z,
|
DecisionTreeFactor f2(
|
||||||
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
X & Y & Z, "0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
||||||
DiscreteConditional actual2(1, f2);
|
DiscreteConditional actual2(1, f2);
|
||||||
DecisionTreeFactor::shared_ptr actual2factor = actual2.toFactor();
|
EXPECT(assert_equal(f2 / *f2.sum(1), *actual2.toFactor(), 1e-9));
|
||||||
// EXPECT(assert_equal(f2, *actual2factor, 1e-9));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, constructors2)
|
TEST(DiscreteConditional, constructors2) {
|
||||||
{
|
|
||||||
// Declare keys and ordering
|
// Declare keys and ordering
|
||||||
DiscreteKey C(0,2), B(1,2);
|
DiscreteKey C(0, 2), B(1, 2);
|
||||||
DecisionTreeFactor expected(C & B, "0.8 0.75 0.2 0.25");
|
DecisionTreeFactor actual(C & B, "0.8 0.75 0.2 0.25");
|
||||||
Signature signature((C | B) = "4/1 3/1");
|
Signature signature((C | B) = "4/1 3/1");
|
||||||
DiscreteConditional actual(signature);
|
DiscreteConditional expected(signature);
|
||||||
DecisionTreeFactor::shared_ptr actualFactor = actual.toFactor();
|
DecisionTreeFactor::shared_ptr expectedFactor = expected.toFactor();
|
||||||
EXPECT(assert_equal(expected, *actualFactor));
|
EXPECT(assert_equal(*expectedFactor, actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, constructors3)
|
TEST(DiscreteConditional, constructors3) {
|
||||||
{
|
|
||||||
// Declare keys and ordering
|
// Declare keys and ordering
|
||||||
DiscreteKey C(0,2), B(1,2), A(2,2);
|
DiscreteKey C(0, 2), B(1, 2), A(2, 2);
|
||||||
DecisionTreeFactor expected(C & B & A, "0.8 0.5 0.5 0.2 0.2 0.5 0.5 0.8");
|
DecisionTreeFactor actual(C & B & A, "0.8 0.5 0.5 0.2 0.2 0.5 0.5 0.8");
|
||||||
Signature signature((C | B, A) = "4/1 1/1 1/1 1/4");
|
Signature signature((C | B, A) = "4/1 1/1 1/1 1/4");
|
||||||
DiscreteConditional actual(signature);
|
DiscreteConditional expected(signature);
|
||||||
DecisionTreeFactor::shared_ptr actualFactor = actual.toFactor();
|
DecisionTreeFactor::shared_ptr expectedFactor = expected.toFactor();
|
||||||
EXPECT(assert_equal(expected, *actualFactor));
|
EXPECT(assert_equal(*expectedFactor, actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, Combine) {
|
TEST(DiscreteConditional, Combine) {
|
||||||
DiscreteKey A(0, 2), B(1, 2);
|
DiscreteKey A(0, 2), B(1, 2);
|
||||||
vector<DiscreteConditional::shared_ptr> c;
|
vector<DiscreteConditional::shared_ptr> c;
|
||||||
c.push_back(boost::make_shared<DiscreteConditional>(A | B = "1/2 2/1"));
|
c.push_back(boost::make_shared<DiscreteConditional>(A | B = "1/2 2/1"));
|
||||||
c.push_back(boost::make_shared<DiscreteConditional>(B % "1/2"));
|
c.push_back(boost::make_shared<DiscreteConditional>(B % "1/2"));
|
||||||
DecisionTreeFactor factor(A & B, "0.111111 0.444444 0.222222 0.222222");
|
DecisionTreeFactor factor(A & B, "0.111111 0.444444 0.222222 0.222222");
|
||||||
DiscreteConditional expected(2, factor);
|
DiscreteConditional actual(2, factor);
|
||||||
DiscreteConditional::shared_ptr actual = DiscreteConditional::Combine(
|
auto expected = DiscreteConditional::Combine(c.begin(), c.end());
|
||||||
c.begin(), c.end());
|
EXPECT(assert_equal(*expected, actual, 1e-5));
|
||||||
EXPECT(assert_equal(expected, *actual,1e-5));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
int main() {
|
||||||
|
TestResult tr;
|
||||||
|
return TestRegistry::runAllTests(tr);
|
||||||
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteEliminationTree.h>
|
#include <gtsam/discrete/DiscreteEliminationTree.h>
|
||||||
#include <gtsam/discrete/DiscreteBayesTree.h>
|
#include <gtsam/discrete/DiscreteBayesTree.h>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
|
|
|
@ -146,8 +146,7 @@ TEST_UNSAFE( DiscreteMarginals, truss ) {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// Second truss example with non-trivial factors
|
// Second truss example with non-trivial factors
|
||||||
TEST_UNSAFE( DiscreteMarginals, truss2 ) {
|
TEST_UNSAFE(DiscreteMarginals, truss2) {
|
||||||
|
|
||||||
const int nrNodes = 5;
|
const int nrNodes = 5;
|
||||||
const size_t nrStates = 2;
|
const size_t nrStates = 2;
|
||||||
|
|
||||||
|
@ -160,40 +159,39 @@ TEST_UNSAFE( DiscreteMarginals, truss2 ) {
|
||||||
|
|
||||||
// create graph and add three truss potentials
|
// create graph and add three truss potentials
|
||||||
DiscreteFactorGraph graph;
|
DiscreteFactorGraph graph;
|
||||||
graph.add(key[0] & key[2] & key[4],"1 2 3 4 5 6 7 8");
|
graph.add(key[0] & key[2] & key[4], "1 2 3 4 5 6 7 8");
|
||||||
graph.add(key[1] & key[3] & key[4],"1 2 3 4 5 6 7 8");
|
graph.add(key[1] & key[3] & key[4], "1 2 3 4 5 6 7 8");
|
||||||
graph.add(key[2] & key[3] & key[4],"1 2 3 4 5 6 7 8");
|
graph.add(key[2] & key[3] & key[4], "1 2 3 4 5 6 7 8");
|
||||||
|
|
||||||
// Calculate the marginals by brute force
|
// Calculate the marginals by brute force
|
||||||
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
vector<DiscreteFactor::Values> allPosbValues =
|
||||||
key[0] & key[1] & key[2] & key[3] & key[4]);
|
cartesianProduct(key[0] & key[1] & key[2] & key[3] & key[4]);
|
||||||
Vector T = Z_5x1, F = Z_5x1;
|
Vector T = Z_5x1, F = Z_5x1;
|
||||||
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||||
DiscreteFactor::Values x = allPosbValues[i];
|
DiscreteFactor::Values x = allPosbValues[i];
|
||||||
double px = graph(x);
|
double px = graph(x);
|
||||||
for (size_t j=0;j<5;j++)
|
for (size_t j = 0; j < 5; j++)
|
||||||
if (x[j]) T[j]+=px; else F[j]+=px;
|
if (x[j])
|
||||||
// cout << x[0] << " " << x[1] << " "<< x[2] << " " << x[3] << " " << x[4] << " :\t" << px << endl;
|
T[j] += px;
|
||||||
|
else
|
||||||
|
F[j] += px;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check all marginals given by a sequential solver and Marginals
|
// Check all marginals given by a sequential solver and Marginals
|
||||||
// DiscreteSequentialSolver solver(graph);
|
// DiscreteSequentialSolver solver(graph);
|
||||||
DiscreteMarginals marginals(graph);
|
DiscreteMarginals marginals(graph);
|
||||||
for (size_t j=0;j<5;j++) {
|
for (size_t j = 0; j < 5; j++) {
|
||||||
double sum = T[j]+F[j];
|
double sum = T[j] + F[j];
|
||||||
T[j]/=sum;
|
T[j] /= sum;
|
||||||
F[j]/=sum;
|
F[j] /= sum;
|
||||||
|
|
||||||
// // solver
|
|
||||||
// Vector actualV = solver.marginalProbabilities(key[j]);
|
|
||||||
// EXPECT(assert_equal((Vector(2) << F[j], T[j]), actualV));
|
|
||||||
|
|
||||||
// Marginals
|
// Marginals
|
||||||
vector<double> table;
|
vector<double> table;
|
||||||
table += F[j],T[j];
|
table += F[j], T[j];
|
||||||
DecisionTreeFactor expectedM(key[j],table);
|
DecisionTreeFactor expectedM(key[j], table);
|
||||||
DiscreteFactor::shared_ptr actualM = marginals(j);
|
DiscreteFactor::shared_ptr actualM = marginals(j);
|
||||||
EXPECT(assert_equal(expectedM, *boost::dynamic_pointer_cast<DecisionTreeFactor>(actualM)));
|
EXPECT(assert_equal(
|
||||||
|
expectedM, *boost::dynamic_pointer_cast<DecisionTreeFactor>(actualM)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,36 +11,43 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file testSignature
|
* @file testSignature
|
||||||
* @brief Tests focusing on the details of Signatures to evaluate boost compliance
|
* @brief Tests focusing on the details of Signatures to evaluate boost
|
||||||
|
* compliance
|
||||||
* @author Alex Cunningham
|
* @author Alex Cunningham
|
||||||
* @date Sept 19th 2011
|
* @date Sept 19th 2011
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <boost/assign/std/vector.hpp>
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/base/Testable.h>
|
||||||
#include <gtsam/discrete/Signature.h>
|
#include <gtsam/discrete/Signature.h>
|
||||||
|
|
||||||
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
DiscreteKey X(0,2), Y(1,3), Z(2,2);
|
DiscreteKey X(0, 2), Y(1, 3), Z(2, 2);
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(testSignature, simple_conditional) {
|
TEST(testSignature, simple_conditional) {
|
||||||
Signature sig(X | Y = "1/1 2/3 1/4");
|
Signature sig(X | Y = "1/1 2/3 1/4");
|
||||||
|
Signature::Table table = *sig.table();
|
||||||
|
vector<double> row[3]{{0.5, 0.5}, {0.4, 0.6}, {0.2, 0.8}};
|
||||||
|
CHECK(row[0] == table[0]);
|
||||||
|
CHECK(row[1] == table[1]);
|
||||||
|
CHECK(row[2] == table[2]);
|
||||||
DiscreteKey actKey = sig.key();
|
DiscreteKey actKey = sig.key();
|
||||||
LONGS_EQUAL((long)X.first, (long)actKey.first);
|
LONGS_EQUAL(X.first, actKey.first);
|
||||||
|
|
||||||
DiscreteKeys actKeys = sig.discreteKeysParentsFirst();
|
DiscreteKeys actKeys = sig.discreteKeys();
|
||||||
LONGS_EQUAL(2, (long)actKeys.size());
|
LONGS_EQUAL(2, actKeys.size());
|
||||||
LONGS_EQUAL((long)Y.first, (long)actKeys.front().first);
|
LONGS_EQUAL(X.first, actKeys.front().first);
|
||||||
LONGS_EQUAL((long)X.first, (long)actKeys.back().first);
|
LONGS_EQUAL(Y.first, actKeys.back().first);
|
||||||
|
|
||||||
vector<double> actCpt = sig.cpt();
|
vector<double> actCpt = sig.cpt();
|
||||||
EXPECT_LONGS_EQUAL(6, (long)actCpt.size());
|
EXPECT_LONGS_EQUAL(6, actCpt.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -54,17 +61,20 @@ TEST(testSignature, simple_conditional_nonparser) {
|
||||||
|
|
||||||
Signature sig(X | Y = table);
|
Signature sig(X | Y = table);
|
||||||
DiscreteKey actKey = sig.key();
|
DiscreteKey actKey = sig.key();
|
||||||
EXPECT_LONGS_EQUAL((long)X.first, (long)actKey.first);
|
EXPECT_LONGS_EQUAL(X.first, actKey.first);
|
||||||
|
|
||||||
DiscreteKeys actKeys = sig.discreteKeysParentsFirst();
|
DiscreteKeys actKeys = sig.discreteKeys();
|
||||||
LONGS_EQUAL(2, (long)actKeys.size());
|
LONGS_EQUAL(2, actKeys.size());
|
||||||
LONGS_EQUAL((long)Y.first, (long)actKeys.front().first);
|
LONGS_EQUAL(X.first, actKeys.front().first);
|
||||||
LONGS_EQUAL((long)X.first, (long)actKeys.back().first);
|
LONGS_EQUAL(Y.first, actKeys.back().first);
|
||||||
|
|
||||||
vector<double> actCpt = sig.cpt();
|
vector<double> actCpt = sig.cpt();
|
||||||
EXPECT_LONGS_EQUAL(6, (long)actCpt.size());
|
EXPECT_LONGS_EQUAL(6, actCpt.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
int main() {
|
||||||
|
TestResult tr;
|
||||||
|
return TestRegistry::runAllTests(tr);
|
||||||
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -60,7 +60,7 @@ public:
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// print with optional string
|
/// print with optional string
|
||||||
virtual void print(const std::string& s = "") const ;
|
void print(const std::string& s = "") const override;
|
||||||
|
|
||||||
/// assert equality up to a tolerance
|
/// assert equality up to a tolerance
|
||||||
bool equals(const Cal3DS2& K, double tol = 10e-9) const;
|
bool equals(const Cal3DS2& K, double tol = 10e-9) const;
|
||||||
|
@ -86,7 +86,7 @@ public:
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// @return a deep copy of this object
|
/// @return a deep copy of this object
|
||||||
virtual boost::shared_ptr<Base> clone() const {
|
boost::shared_ptr<Base> clone() const override {
|
||||||
return boost::shared_ptr<Base>(new Cal3DS2(*this));
|
return boost::shared_ptr<Base>(new Cal3DS2(*this));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@ public:
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// print with optional string
|
/// print with optional string
|
||||||
virtual void print(const std::string& s = "") const ;
|
virtual void print(const std::string& s = "") const;
|
||||||
|
|
||||||
/// assert equality up to a tolerance
|
/// assert equality up to a tolerance
|
||||||
bool equals(const Cal3DS2_Base& K, double tol = 10e-9) const;
|
bool equals(const Cal3DS2_Base& K, double tol = 10e-9) const;
|
||||||
|
|
|
@ -75,7 +75,7 @@ public:
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// print with optional string
|
/// print with optional string
|
||||||
virtual void print(const std::string& s = "") const ;
|
void print(const std::string& s = "") const override;
|
||||||
|
|
||||||
/// assert equality up to a tolerance
|
/// assert equality up to a tolerance
|
||||||
bool equals(const Cal3Unified& K, double tol = 10e-9) const;
|
bool equals(const Cal3Unified& K, double tol = 10e-9) const;
|
||||||
|
|
|
@ -175,7 +175,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// return calibration
|
/// return calibration
|
||||||
const Calibration& calibration() const {
|
const Calibration& calibration() const override {
|
||||||
return K_;
|
return K_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -107,9 +107,9 @@ public:
|
||||||
|
|
||||||
// If needed, apply chain rule
|
// If needed, apply chain rule
|
||||||
if (Dpose)
|
if (Dpose)
|
||||||
*Dpose = Dpi_pn * *Dpose;
|
*Dpose = Dpi_pn * *Dpose;
|
||||||
if (Dpoint)
|
if (Dpoint)
|
||||||
*Dpoint = Dpi_pn * *Dpoint;
|
*Dpoint = Dpi_pn * *Dpoint;
|
||||||
|
|
||||||
return pi;
|
return pi;
|
||||||
}
|
}
|
||||||
|
@ -361,7 +361,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// return calibration
|
/// return calibration
|
||||||
virtual const CALIBRATION& calibration() const {
|
const CALIBRATION& calibration() const override {
|
||||||
return *K_;
|
return *K_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ public:
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// print
|
/// print
|
||||||
virtual void print(const std::string& s = "") const {
|
void print(const std::string& s = "") const override {
|
||||||
Base::print(s);
|
Base::print(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,8 +19,10 @@
|
||||||
#include <gtsam/geometry/concepts.h>
|
#include <gtsam/geometry/concepts.h>
|
||||||
#include <gtsam/base/concepts.h>
|
#include <gtsam/base/concepts.h>
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <iostream>
|
||||||
|
#include <limits>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -36,10 +38,10 @@ Pose3::Pose3(const Pose2& pose2) :
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::Create(const Rot3& R, const Point3& t, OptionalJacobian<6, 3> H1,
|
Pose3 Pose3::Create(const Rot3& R, const Point3& t, OptionalJacobian<6, 3> HR,
|
||||||
OptionalJacobian<6, 3> H2) {
|
OptionalJacobian<6, 3> Ht) {
|
||||||
if (H1) *H1 << I_3x3, Z_3x3;
|
if (HR) *HR << I_3x3, Z_3x3;
|
||||||
if (H2) *H2 << Z_3x3, R.transpose();
|
if (Ht) *Ht << Z_3x3, R.transpose();
|
||||||
return Pose3(R, t);
|
return Pose3(R, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,15 +74,15 @@ Matrix6 Pose3::adjointMap(const Vector6& xi) {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
||||||
OptionalJacobian<6,6> H) {
|
OptionalJacobian<6, 6> Hxi) {
|
||||||
if (H) {
|
if (Hxi) {
|
||||||
H->setZero();
|
Hxi->setZero();
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
Vector6 dxi;
|
Vector6 dxi;
|
||||||
dxi.setZero();
|
dxi.setZero();
|
||||||
dxi(i) = 1.0;
|
dxi(i) = 1.0;
|
||||||
Matrix6 Gi = adjointMap(dxi);
|
Matrix6 Gi = adjointMap(dxi);
|
||||||
H->col(i) = Gi * y;
|
Hxi->col(i) = Gi * y;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return adjointMap(xi) * y;
|
return adjointMap(xi) * y;
|
||||||
|
@ -88,15 +90,15 @@ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||||
OptionalJacobian<6,6> H) {
|
OptionalJacobian<6, 6> Hxi) {
|
||||||
if (H) {
|
if (Hxi) {
|
||||||
H->setZero();
|
Hxi->setZero();
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
Vector6 dxi;
|
Vector6 dxi;
|
||||||
dxi.setZero();
|
dxi.setZero();
|
||||||
dxi(i) = 1.0;
|
dxi(i) = 1.0;
|
||||||
Matrix6 GTi = adjointMap(dxi).transpose();
|
Matrix6 GTi = adjointMap(dxi).transpose();
|
||||||
H->col(i) = GTi * y;
|
Hxi->col(i) = GTi * y;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return adjointMap(xi).transpose() * y;
|
return adjointMap(xi).transpose() * y;
|
||||||
|
@ -116,8 +118,8 @@ bool Pose3::equals(const Pose3& pose, double tol) const {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/** Modified from Murray94book version (which assumes w and v normalized?) */
|
/** Modified from Murray94book version (which assumes w and v normalized?) */
|
||||||
Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) {
|
Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi) {
|
||||||
if (H) *H = ExpmapDerivative(xi);
|
if (Hxi) *Hxi = ExpmapDerivative(xi);
|
||||||
|
|
||||||
// get angular velocity omega and translational velocity v from twist xi
|
// get angular velocity omega and translational velocity v from twist xi
|
||||||
Vector3 omega(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5));
|
Vector3 omega(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5));
|
||||||
|
@ -125,8 +127,8 @@ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) {
|
||||||
Rot3 R = Rot3::Expmap(omega);
|
Rot3 R = Rot3::Expmap(omega);
|
||||||
double theta2 = omega.dot(omega);
|
double theta2 = omega.dot(omega);
|
||||||
if (theta2 > std::numeric_limits<double>::epsilon()) {
|
if (theta2 > std::numeric_limits<double>::epsilon()) {
|
||||||
Vector3 t_parallel = omega * omega.dot(v); // translation parallel to axis
|
Vector3 t_parallel = omega * omega.dot(v); // translation parallel to axis
|
||||||
Vector3 omega_cross_v = omega.cross(v); // points towards axis
|
Vector3 omega_cross_v = omega.cross(v); // points towards axis
|
||||||
Vector3 t = (omega_cross_v - R * omega_cross_v + t_parallel) / theta2;
|
Vector3 t = (omega_cross_v - R * omega_cross_v + t_parallel) / theta2;
|
||||||
return Pose3(R, t);
|
return Pose3(R, t);
|
||||||
} else {
|
} else {
|
||||||
|
@ -135,10 +137,10 @@ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) {
|
Vector6 Pose3::Logmap(const Pose3& pose, OptionalJacobian<6, 6> Hpose) {
|
||||||
if (H) *H = LogmapDerivative(p);
|
if (Hpose) *Hpose = LogmapDerivative(pose);
|
||||||
const Vector3 w = Rot3::Logmap(p.rotation());
|
const Vector3 w = Rot3::Logmap(pose.rotation());
|
||||||
const Vector3 T = p.translation();
|
const Vector3 T = pose.translation();
|
||||||
const double t = w.norm();
|
const double t = w.norm();
|
||||||
if (t < 1e-10) {
|
if (t < 1e-10) {
|
||||||
Vector6 log;
|
Vector6 log;
|
||||||
|
@ -158,33 +160,33 @@ Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::ChartAtOrigin::Retract(const Vector6& xi, ChartJacobian H) {
|
Pose3 Pose3::ChartAtOrigin::Retract(const Vector6& xi, ChartJacobian Hxi) {
|
||||||
#ifdef GTSAM_POSE3_EXPMAP
|
#ifdef GTSAM_POSE3_EXPMAP
|
||||||
return Expmap(xi, H);
|
return Expmap(xi, Hxi);
|
||||||
#else
|
#else
|
||||||
Matrix3 DR;
|
Matrix3 DR;
|
||||||
Rot3 R = Rot3::Retract(xi.head<3>(), H ? &DR : 0);
|
Rot3 R = Rot3::Retract(xi.head<3>(), Hxi ? &DR : 0);
|
||||||
if (H) {
|
if (Hxi) {
|
||||||
*H = I_6x6;
|
*Hxi = I_6x6;
|
||||||
H->topLeftCorner<3,3>() = DR;
|
Hxi->topLeftCorner<3, 3>() = DR;
|
||||||
}
|
}
|
||||||
return Pose3(R, Point3(xi.tail<3>()));
|
return Pose3(R, Point3(xi.tail<3>()));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::ChartAtOrigin::Local(const Pose3& T, ChartJacobian H) {
|
Vector6 Pose3::ChartAtOrigin::Local(const Pose3& pose, ChartJacobian Hpose) {
|
||||||
#ifdef GTSAM_POSE3_EXPMAP
|
#ifdef GTSAM_POSE3_EXPMAP
|
||||||
return Logmap(T, H);
|
return Logmap(pose, Hpose);
|
||||||
#else
|
#else
|
||||||
Matrix3 DR;
|
Matrix3 DR;
|
||||||
Vector3 omega = Rot3::LocalCoordinates(T.rotation(), H ? &DR : 0);
|
Vector3 omega = Rot3::LocalCoordinates(pose.rotation(), Hpose ? &DR : 0);
|
||||||
if (H) {
|
if (Hpose) {
|
||||||
*H = I_6x6;
|
*Hpose = I_6x6;
|
||||||
H->topLeftCorner<3,3>() = DR;
|
Hpose->topLeftCorner<3, 3>() = DR;
|
||||||
}
|
}
|
||||||
Vector6 xi;
|
Vector6 xi;
|
||||||
xi << omega, T.translation();
|
xi << omega, pose.translation();
|
||||||
return xi;
|
return xi;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -261,16 +263,16 @@ Matrix6 Pose3::LogmapDerivative(const Pose3& pose) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
const Point3& Pose3::translation(OptionalJacobian<3, 6> H) const {
|
const Point3& Pose3::translation(OptionalJacobian<3, 6> Hself) const {
|
||||||
if (H) *H << Z_3x3, rotation().matrix();
|
if (Hself) *Hself << Z_3x3, rotation().matrix();
|
||||||
return t_;
|
return t_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
const Rot3& Pose3::rotation(OptionalJacobian<3, 6> H) const {
|
const Rot3& Pose3::rotation(OptionalJacobian<3, 6> Hself) const {
|
||||||
if (H) {
|
if (Hself) {
|
||||||
*H << I_3x3, Z_3x3;
|
*Hself << I_3x3, Z_3x3;
|
||||||
}
|
}
|
||||||
return R_;
|
return R_;
|
||||||
}
|
}
|
||||||
|
@ -284,9 +286,10 @@ Matrix4 Pose3::matrix() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::transformPoseFrom(const Pose3& aTb) const {
|
Pose3 Pose3::transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself,
|
||||||
|
OptionalJacobian<6, 6> HaTb) const {
|
||||||
const Pose3& wTa = *this;
|
const Pose3& wTa = *this;
|
||||||
return wTa * aTb;
|
return wTa.compose(aTb, Hself, HaTb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -299,101 +302,101 @@ Pose3 Pose3::transform_to(const Pose3& pose) const {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> H1,
|
Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself,
|
||||||
OptionalJacobian<6, 6> H2) const {
|
OptionalJacobian<6, 6> HwTb) const {
|
||||||
if (H1) *H1 = -wTb.inverse().AdjointMap() * AdjointMap();
|
if (Hself) *Hself = -wTb.inverse().AdjointMap() * AdjointMap();
|
||||||
if (H2) *H2 = I_6x6;
|
if (HwTb) *HwTb = I_6x6;
|
||||||
const Pose3& wTa = *this;
|
const Pose3& wTa = *this;
|
||||||
return wTa.inverse() * wTb;
|
return wTa.inverse() * wTb;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Point3 Pose3::transformFrom(const Point3& p, OptionalJacobian<3,6> Dpose,
|
Point3 Pose3::transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself,
|
||||||
OptionalJacobian<3,3> Dpoint) const {
|
OptionalJacobian<3, 3> Hpoint) const {
|
||||||
// Only get matrix once, to avoid multiple allocations,
|
// Only get matrix once, to avoid multiple allocations,
|
||||||
// as well as multiple conversions in the Quaternion case
|
// as well as multiple conversions in the Quaternion case
|
||||||
const Matrix3 R = R_.matrix();
|
const Matrix3 R = R_.matrix();
|
||||||
if (Dpose) {
|
if (Hself) {
|
||||||
Dpose->leftCols<3>() = R * skewSymmetric(-p.x(), -p.y(), -p.z());
|
Hself->leftCols<3>() = R * skewSymmetric(-point.x(), -point.y(), -point.z());
|
||||||
Dpose->rightCols<3>() = R;
|
Hself->rightCols<3>() = R;
|
||||||
}
|
}
|
||||||
if (Dpoint) {
|
if (Hpoint) {
|
||||||
*Dpoint = R;
|
*Hpoint = R;
|
||||||
}
|
}
|
||||||
return R_ * p + t_;
|
return R_ * point + t_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Point3 Pose3::transformTo(const Point3& p, OptionalJacobian<3,6> Dpose,
|
Point3 Pose3::transformTo(const Point3& point, OptionalJacobian<3, 6> Hself,
|
||||||
OptionalJacobian<3,3> Dpoint) const {
|
OptionalJacobian<3, 3> Hpoint) const {
|
||||||
// Only get transpose once, to avoid multiple allocations,
|
// Only get transpose once, to avoid multiple allocations,
|
||||||
// as well as multiple conversions in the Quaternion case
|
// as well as multiple conversions in the Quaternion case
|
||||||
const Matrix3 Rt = R_.transpose();
|
const Matrix3 Rt = R_.transpose();
|
||||||
const Point3 q(Rt*(p - t_));
|
const Point3 q(Rt*(point - t_));
|
||||||
if (Dpose) {
|
if (Hself) {
|
||||||
const double wx = q.x(), wy = q.y(), wz = q.z();
|
const double wx = q.x(), wy = q.y(), wz = q.z();
|
||||||
(*Dpose) <<
|
(*Hself) <<
|
||||||
0.0, -wz, +wy,-1.0, 0.0, 0.0,
|
0.0, -wz, +wy,-1.0, 0.0, 0.0,
|
||||||
+wz, 0.0, -wx, 0.0,-1.0, 0.0,
|
+wz, 0.0, -wx, 0.0,-1.0, 0.0,
|
||||||
-wy, +wx, 0.0, 0.0, 0.0,-1.0;
|
-wy, +wx, 0.0, 0.0, 0.0,-1.0;
|
||||||
}
|
}
|
||||||
if (Dpoint) {
|
if (Hpoint) {
|
||||||
*Dpoint = Rt;
|
*Hpoint = Rt;
|
||||||
}
|
}
|
||||||
return q;
|
return q;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> H1,
|
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> Hself,
|
||||||
OptionalJacobian<1, 3> H2) const {
|
OptionalJacobian<1, 3> Hpoint) const {
|
||||||
Matrix36 D_local_pose;
|
Matrix36 D_local_pose;
|
||||||
Matrix3 D_local_point;
|
Matrix3 D_local_point;
|
||||||
Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0);
|
Point3 local = transformTo(point, Hself ? &D_local_pose : 0, Hpoint ? &D_local_point : 0);
|
||||||
if (!H1 && !H2) {
|
if (!Hself && !Hpoint) {
|
||||||
return local.norm();
|
return local.norm();
|
||||||
} else {
|
} else {
|
||||||
Matrix13 D_r_local;
|
Matrix13 D_r_local;
|
||||||
const double r = norm3(local, D_r_local);
|
const double r = norm3(local, D_r_local);
|
||||||
if (H1) *H1 = D_r_local * D_local_pose;
|
if (Hself) *Hself = D_r_local * D_local_pose;
|
||||||
if (H2) *H2 = D_r_local * D_local_point;
|
if (Hpoint) *Hpoint = D_r_local * D_local_point;
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> H1,
|
double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> Hself,
|
||||||
OptionalJacobian<1, 6> H2) const {
|
OptionalJacobian<1, 6> Hpose) const {
|
||||||
Matrix13 D_local_point;
|
Matrix13 D_local_point;
|
||||||
double r = range(pose.translation(), H1, H2 ? &D_local_point : 0);
|
double r = range(pose.translation(), Hself, Hpose ? &D_local_point : 0);
|
||||||
if (H2) *H2 << Matrix13::Zero(), D_local_point * pose.rotation().matrix();
|
if (Hpose) *Hpose << Matrix13::Zero(), D_local_point * pose.rotation().matrix();
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Unit3 Pose3::bearing(const Point3& point, OptionalJacobian<2, 6> H1,
|
Unit3 Pose3::bearing(const Point3& point, OptionalJacobian<2, 6> Hself,
|
||||||
OptionalJacobian<2, 3> H2) const {
|
OptionalJacobian<2, 3> Hpoint) const {
|
||||||
Matrix36 D_local_pose;
|
Matrix36 D_local_pose;
|
||||||
Matrix3 D_local_point;
|
Matrix3 D_local_point;
|
||||||
Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0);
|
Point3 local = transformTo(point, Hself ? &D_local_pose : 0, Hpoint ? &D_local_point : 0);
|
||||||
if (!H1 && !H2) {
|
if (!Hself && !Hpoint) {
|
||||||
return Unit3(local);
|
return Unit3(local);
|
||||||
} else {
|
} else {
|
||||||
Matrix23 D_b_local;
|
Matrix23 D_b_local;
|
||||||
Unit3 b = Unit3::FromPoint3(local, D_b_local);
|
Unit3 b = Unit3::FromPoint3(local, D_b_local);
|
||||||
if (H1) *H1 = D_b_local * D_local_pose;
|
if (Hself) *Hself = D_b_local * D_local_pose;
|
||||||
if (H2) *H2 = D_b_local * D_local_point;
|
if (Hpoint) *Hpoint = D_b_local * D_local_point;
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> H1,
|
Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself,
|
||||||
OptionalJacobian<2, 6> H2) const {
|
OptionalJacobian<2, 6> Hpose) const {
|
||||||
if (H2) {
|
if (Hpose) {
|
||||||
H2->setZero();
|
Hpose->setZero();
|
||||||
return bearing(pose.translation(), H1, H2.cols<3>(3));
|
return bearing(pose.translation(), Hself, Hpose.cols<3>(3));
|
||||||
}
|
}
|
||||||
return bearing(pose.translation(), H1, boost::none);
|
return bearing(pose.translation(), Hself, boost::none);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -75,8 +75,8 @@ public:
|
||||||
|
|
||||||
/// Named constructor with derivatives
|
/// Named constructor with derivatives
|
||||||
static Pose3 Create(const Rot3& R, const Point3& t,
|
static Pose3 Create(const Rot3& R, const Point3& t,
|
||||||
OptionalJacobian<6, 3> H1 = boost::none,
|
OptionalJacobian<6, 3> HR = boost::none,
|
||||||
OptionalJacobian<6, 3> H2 = boost::none);
|
OptionalJacobian<6, 3> Ht = boost::none);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create Pose3 by aligning two point pairs
|
* Create Pose3 by aligning two point pairs
|
||||||
|
@ -117,10 +117,10 @@ public:
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// Exponential map at identity - create a rotation from canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$
|
/// Exponential map at identity - create a rotation from canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$
|
||||||
static Pose3 Expmap(const Vector6& xi, OptionalJacobian<6, 6> H = boost::none);
|
static Pose3 Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi = boost::none);
|
||||||
|
|
||||||
/// Log map at identity - return the canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$ of this rotation
|
/// Log map at identity - return the canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$ of this rotation
|
||||||
static Vector6 Logmap(const Pose3& p, OptionalJacobian<6, 6> H = boost::none);
|
static Vector6 Logmap(const Pose3& pose, OptionalJacobian<6, 6> Hpose = boost::none);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate Adjoint map, transforming a twist in the this pose's (i.e, body) frame to the world spatial frame
|
* Calculate Adjoint map, transforming a twist in the this pose's (i.e, body) frame to the world spatial frame
|
||||||
|
@ -157,7 +157,7 @@ public:
|
||||||
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
|
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
|
||||||
*/
|
*/
|
||||||
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y,
|
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y,
|
||||||
OptionalJacobian<6, 6> = boost::none);
|
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||||
|
|
||||||
// temporary fix for wrappers until case issue is resolved
|
// temporary fix for wrappers until case issue is resolved
|
||||||
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
|
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
|
||||||
|
@ -167,7 +167,7 @@ public:
|
||||||
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
||||||
*/
|
*/
|
||||||
static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y,
|
static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||||
OptionalJacobian<6, 6> H = boost::none);
|
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||||
|
|
||||||
/// Derivative of Expmap
|
/// Derivative of Expmap
|
||||||
static Matrix6 ExpmapDerivative(const Vector6& xi);
|
static Matrix6 ExpmapDerivative(const Vector6& xi);
|
||||||
|
@ -177,8 +177,8 @@ public:
|
||||||
|
|
||||||
// Chart at origin, depends on compile-time flag GTSAM_POSE3_EXPMAP
|
// Chart at origin, depends on compile-time flag GTSAM_POSE3_EXPMAP
|
||||||
struct ChartAtOrigin {
|
struct ChartAtOrigin {
|
||||||
static Pose3 Retract(const Vector6& v, ChartJacobian H = boost::none);
|
static Pose3 Retract(const Vector6& xi, ChartJacobian Hxi = boost::none);
|
||||||
static Vector6 Local(const Pose3& r, ChartJacobian H = boost::none);
|
static Vector6 Local(const Pose3& pose, ChartJacobian Hpose = boost::none);
|
||||||
};
|
};
|
||||||
|
|
||||||
using LieGroup<Pose3, 6>::inverse; // version with derivative
|
using LieGroup<Pose3, 6>::inverse; // version with derivative
|
||||||
|
@ -201,38 +201,38 @@ public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief takes point in Pose coordinates and transforms it to world coordinates
|
* @brief takes point in Pose coordinates and transforms it to world coordinates
|
||||||
* @param p point in Pose coordinates
|
* @param point point in Pose coordinates
|
||||||
* @param Dpose optional 3*6 Jacobian wrpt this pose
|
* @param Hself optional 3*6 Jacobian wrpt this pose
|
||||||
* @param Dpoint optional 3*3 Jacobian wrpt point
|
* @param Hpoint optional 3*3 Jacobian wrpt point
|
||||||
* @return point in world coordinates
|
* @return point in world coordinates
|
||||||
*/
|
*/
|
||||||
Point3 transformFrom(const Point3& p, OptionalJacobian<3, 6> Dpose =
|
Point3 transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself =
|
||||||
boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const;
|
boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/** syntactic sugar for transformFrom */
|
/** syntactic sugar for transformFrom */
|
||||||
inline Point3 operator*(const Point3& p) const {
|
inline Point3 operator*(const Point3& point) const {
|
||||||
return transformFrom(p);
|
return transformFrom(point);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief takes point in world coordinates and transforms it to Pose coordinates
|
* @brief takes point in world coordinates and transforms it to Pose coordinates
|
||||||
* @param p point in world coordinates
|
* @param point point in world coordinates
|
||||||
* @param Dpose optional 3*6 Jacobian wrpt this pose
|
* @param Hself optional 3*6 Jacobian wrpt this pose
|
||||||
* @param Dpoint optional 3*3 Jacobian wrpt point
|
* @param Hpoint optional 3*3 Jacobian wrpt point
|
||||||
* @return point in Pose coordinates
|
* @return point in Pose coordinates
|
||||||
*/
|
*/
|
||||||
Point3 transformTo(const Point3& p, OptionalJacobian<3, 6> Dpose =
|
Point3 transformTo(const Point3& point, OptionalJacobian<3, 6> Hself =
|
||||||
boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const;
|
boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// get rotation
|
/// get rotation
|
||||||
const Rot3& rotation(OptionalJacobian<3, 6> H = boost::none) const;
|
const Rot3& rotation(OptionalJacobian<3, 6> Hself = boost::none) const;
|
||||||
|
|
||||||
/// get translation
|
/// get translation
|
||||||
const Point3& translation(OptionalJacobian<3, 6> H = boost::none) const;
|
const Point3& translation(OptionalJacobian<3, 6> Hself = boost::none) const;
|
||||||
|
|
||||||
/// get x
|
/// get x
|
||||||
double x() const {
|
double x() const {
|
||||||
|
@ -252,36 +252,44 @@ public:
|
||||||
/** convert to 4*4 matrix */
|
/** convert to 4*4 matrix */
|
||||||
Matrix4 matrix() const;
|
Matrix4 matrix() const;
|
||||||
|
|
||||||
/** receives a pose in local coordinates and transforms it to world coordinates */
|
/**
|
||||||
Pose3 transformPoseFrom(const Pose3& pose) const;
|
* Assuming self == wTa, takes a pose aTb in local coordinates
|
||||||
|
* and transforms it to world coordinates wTb = wTa * aTb.
|
||||||
|
* This is identical to compose.
|
||||||
|
*/
|
||||||
|
Pose3 transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself = boost::none,
|
||||||
|
OptionalJacobian<6, 6> HaTb = boost::none) const;
|
||||||
|
|
||||||
/** receives a pose in world coordinates and transforms it to local coordinates */
|
/**
|
||||||
Pose3 transformPoseTo(const Pose3& pose, OptionalJacobian<6, 6> H1 = boost::none,
|
* Assuming self == wTa, takes a pose wTb in world coordinates
|
||||||
OptionalJacobian<6, 6> H2 = boost::none) const;
|
* and transforms it to local coordinates aTb = inv(wTa) * wTb
|
||||||
|
*/
|
||||||
|
Pose3 transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself = boost::none,
|
||||||
|
OptionalJacobian<6, 6> HwTb = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate range to a landmark
|
* Calculate range to a landmark
|
||||||
* @param point 3D location of landmark
|
* @param point 3D location of landmark
|
||||||
* @return range (double)
|
* @return range (double)
|
||||||
*/
|
*/
|
||||||
double range(const Point3& point, OptionalJacobian<1, 6> H1 = boost::none,
|
double range(const Point3& point, OptionalJacobian<1, 6> Hself = boost::none,
|
||||||
OptionalJacobian<1, 3> H2 = boost::none) const;
|
OptionalJacobian<1, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate range to another pose
|
* Calculate range to another pose
|
||||||
* @param pose Other SO(3) pose
|
* @param pose Other SO(3) pose
|
||||||
* @return range (double)
|
* @return range (double)
|
||||||
*/
|
*/
|
||||||
double range(const Pose3& pose, OptionalJacobian<1, 6> H1 = boost::none,
|
double range(const Pose3& pose, OptionalJacobian<1, 6> Hself = boost::none,
|
||||||
OptionalJacobian<1, 6> H2 = boost::none) const;
|
OptionalJacobian<1, 6> Hpose = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate bearing to a landmark
|
* Calculate bearing to a landmark
|
||||||
* @param point 3D location of landmark
|
* @param point 3D location of landmark
|
||||||
* @return bearing (Unit3)
|
* @return bearing (Unit3)
|
||||||
*/
|
*/
|
||||||
Unit3 bearing(const Point3& point, OptionalJacobian<2, 6> H1 = boost::none,
|
Unit3 bearing(const Point3& point, OptionalJacobian<2, 6> Hself = boost::none,
|
||||||
OptionalJacobian<2, 3> H2 = boost::none) const;
|
OptionalJacobian<2, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate bearing to another pose
|
* Calculate bearing to another pose
|
||||||
|
@ -289,8 +297,8 @@ public:
|
||||||
* information is ignored.
|
* information is ignored.
|
||||||
* @return bearing (Unit3)
|
* @return bearing (Unit3)
|
||||||
*/
|
*/
|
||||||
Unit3 bearing(const Pose3& pose, OptionalJacobian<2, 6> H1 = boost::none,
|
Unit3 bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself = boost::none,
|
||||||
OptionalJacobian<2, 6> H2 = boost::none) const;
|
OptionalJacobian<2, 6> Hpose = boost::none) const;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Advanced Interface
|
/// @name Advanced Interface
|
||||||
|
@ -321,20 +329,20 @@ public:
|
||||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
/// @name Deprecated
|
/// @name Deprecated
|
||||||
/// @{
|
/// @{
|
||||||
Point3 transform_from(const Point3& p,
|
Point3 transform_from(const Point3& point,
|
||||||
OptionalJacobian<3, 6> Dpose = boost::none,
|
OptionalJacobian<3, 6> Hself = boost::none,
|
||||||
OptionalJacobian<3, 3> Dpoint = boost::none) const {
|
OptionalJacobian<3, 3> Hpoint = boost::none) const {
|
||||||
return transformFrom(p, Dpose, Dpoint);
|
return transformFrom(point, Hself, Hpoint);
|
||||||
}
|
}
|
||||||
Point3 transform_to(const Point3& p,
|
Point3 transform_to(const Point3& point,
|
||||||
OptionalJacobian<3, 6> Dpose = boost::none,
|
OptionalJacobian<3, 6> Hself = boost::none,
|
||||||
OptionalJacobian<3, 3> Dpoint = boost::none) const {
|
OptionalJacobian<3, 3> Hpoint = boost::none) const {
|
||||||
return transformTo(p, Dpose, Dpoint);
|
return transformTo(point, Hself, Hpoint);
|
||||||
}
|
}
|
||||||
Pose3 transform_pose_to(const Pose3& pose,
|
Pose3 transform_pose_to(const Pose3& pose,
|
||||||
OptionalJacobian<6, 6> H1 = boost::none,
|
OptionalJacobian<6, 6> Hself = boost::none,
|
||||||
OptionalJacobian<6, 6> H2 = boost::none) const {
|
OptionalJacobian<6, 6> Hpose = boost::none) const {
|
||||||
return transformPoseTo(pose, H1, H2);
|
return transformPoseTo(pose, Hself, Hpose);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* @deprecated: this function is neither here not there. */
|
* @deprecated: this function is neither here not there. */
|
||||||
|
|
|
@ -35,7 +35,7 @@ namespace gtsam {
|
||||||
|
|
||||||
// TODO(frank): is this better than SOn::Random?
|
// TODO(frank): is this better than SOn::Random?
|
||||||
// /* *************************************************************************
|
// /* *************************************************************************
|
||||||
// */ static Vector3 randomOmega(boost::mt19937 &rng) {
|
// */ static Vector3 randomOmega(std::mt19937 &rng) {
|
||||||
// static std::uniform_real_distribution<double> randomAngle(-M_PI, M_PI);
|
// static std::uniform_real_distribution<double> randomAngle(-M_PI, M_PI);
|
||||||
// return Unit3::Random(rng).unitVector() * randomAngle(rng);
|
// return Unit3::Random(rng).unitVector() * randomAngle(rng);
|
||||||
// }
|
// }
|
||||||
|
@ -43,7 +43,7 @@ namespace gtsam {
|
||||||
// /* *************************************************************************
|
// /* *************************************************************************
|
||||||
// */
|
// */
|
||||||
// // Create random SO(4) element using direct product of lie algebras.
|
// // Create random SO(4) element using direct product of lie algebras.
|
||||||
// SO4 SO4::Random(boost::mt19937 &rng) {
|
// SO4 SO4::Random(std::mt19937 &rng) {
|
||||||
// Vector6 delta;
|
// Vector6 delta;
|
||||||
// delta << randomOmega(rng), randomOmega(rng);
|
// delta << randomOmega(rng), randomOmega(rng);
|
||||||
// return SO4::Expmap(delta);
|
// return SO4::Expmap(delta);
|
||||||
|
|
|
@ -34,7 +34,7 @@ namespace gtsam {
|
||||||
using SO4 = SO<4>;
|
using SO4 = SO<4>;
|
||||||
|
|
||||||
// /// Random SO(4) element (no big claims about uniformity)
|
// /// Random SO(4) element (no big claims about uniformity)
|
||||||
// static SO4 Random(boost::mt19937 &rng);
|
// static SO4 Random(std::mt19937 &rng);
|
||||||
|
|
||||||
// Below are all declarations of SO<4> specializations.
|
// Below are all declarations of SO<4> specializations.
|
||||||
// They are *defined* in SO4.cpp.
|
// They are *defined* in SO4.cpp.
|
||||||
|
|
|
@ -292,6 +292,10 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
|
||||||
boost::none) const;
|
boost::none) const;
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
friend void save(Archive&, SO&, const unsigned int);
|
||||||
|
template <class Archive>
|
||||||
|
friend void load(Archive&, SO&, const unsigned int);
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
friend void serialize(Archive&, SO&, const unsigned int);
|
friend void serialize(Archive&, SO&, const unsigned int);
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
|
@ -329,6 +333,16 @@ template <>
|
||||||
SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
|
SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
|
||||||
DynamicJacobian H2) const;
|
DynamicJacobian H2) const;
|
||||||
|
|
||||||
|
/** Serialization function */
|
||||||
|
template<class Archive>
|
||||||
|
void serialize(
|
||||||
|
Archive& ar, SOn& Q,
|
||||||
|
const unsigned int file_version
|
||||||
|
) {
|
||||||
|
Matrix& M = Q.matrix_;
|
||||||
|
ar& BOOST_SERIALIZATION_NVP(M);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Define the traits. internal::LieGroup provides both Lie group and Testable
|
* Define the traits. internal::LieGroup provides both Lie group and Testable
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -90,6 +90,8 @@ public:
|
||||||
/// Copy assignment
|
/// Copy assignment
|
||||||
Unit3& operator=(const Unit3 & u) {
|
Unit3& operator=(const Unit3 & u) {
|
||||||
p_ = u.p_;
|
p_ = u.p_;
|
||||||
|
B_ = u.B_;
|
||||||
|
H_B_ = u.H_B_;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -418,6 +418,29 @@ TEST(Pose3, transform_to_rotate) {
|
||||||
EXPECT(assert_equal(expected, actual, 0.001));
|
EXPECT(assert_equal(expected, actual, 0.001));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Check transformPoseFrom and its pushforward
|
||||||
|
Pose3 transformPoseFrom_(const Pose3& wTa, const Pose3& aTb) {
|
||||||
|
return wTa.transformPoseFrom(aTb);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(Pose3, transformPoseFrom)
|
||||||
|
{
|
||||||
|
Matrix actual = (T2*T2).matrix();
|
||||||
|
Matrix expected = T2.matrix()*T2.matrix();
|
||||||
|
EXPECT(assert_equal(actual, expected, 1e-8));
|
||||||
|
|
||||||
|
Matrix H1, H2;
|
||||||
|
T2.transformPoseFrom(T2, H1, H2);
|
||||||
|
|
||||||
|
Matrix numericalH1 = numericalDerivative21(transformPoseFrom_, T2, T2);
|
||||||
|
EXPECT(assert_equal(numericalH1, H1, 5e-3));
|
||||||
|
EXPECT(assert_equal(T2.inverse().AdjointMap(), H1, 5e-3));
|
||||||
|
|
||||||
|
Matrix numericalH2 = numericalDerivative22(transformPoseFrom_, T2, T2);
|
||||||
|
EXPECT(assert_equal(numericalH2, H2, 1e-4));
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(Pose3, transformTo) {
|
TEST(Pose3, transformTo) {
|
||||||
Pose3 transform(Rot3::Rodrigues(0, 0, -1.570796), Point3(2, 4, 0));
|
Pose3 transform(Rot3::Rodrigues(0, 0, -1.570796), Point3(2, 4, 0));
|
||||||
|
|
|
@ -484,6 +484,15 @@ TEST(Unit3, ErrorBetweenFactor) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(Unit3, CopyAssign) {
|
||||||
|
Unit3 p{1, 0.2, 0.3};
|
||||||
|
|
||||||
|
EXPECT(p.error(p).isZero());
|
||||||
|
|
||||||
|
p = Unit3{-1, 2, 8};
|
||||||
|
EXPECT(p.error(p).isZero());
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(actualH, Serialization) {
|
TEST(actualH, Serialization) {
|
||||||
Unit3 p(0, 1, 0);
|
Unit3 p(0, 1, 0);
|
||||||
|
|
|
@ -70,3 +70,5 @@ namespace gtsam {
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#include <gtsam/inference/BayesNet-inst.h>
|
||||||
|
|
|
@ -299,7 +299,7 @@ namespace gtsam {
|
||||||
this->keys_.assign(clique->conditional()->beginParents(), clique->conditional()->endParents());
|
this->keys_.assign(clique->conditional()->beginParents(), clique->conditional()->endParents());
|
||||||
}
|
}
|
||||||
|
|
||||||
void print(const std::string& s="", const KeyFormatter& formatter = DefaultKeyFormatter) const {
|
void print(const std::string& s="", const KeyFormatter& formatter = DefaultKeyFormatter) const override {
|
||||||
clique->print(s + "stored clique", formatter);
|
clique->print(s + "stored clique", formatter);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -136,57 +136,61 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
// separator marginal, uses separator marginal of parent recursively
|
// separator marginal, uses separator marginal of parent recursively
|
||||||
// P(C) = P(F|S) P(S)
|
// P(C) = P(F|S) P(S)
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
template<class DERIVED, class FACTORGRAPH>
|
template <class DERIVED, class FACTORGRAPH>
|
||||||
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
||||||
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::separatorMarginal(Eliminate function) const
|
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::separatorMarginal(
|
||||||
{
|
Eliminate function) const {
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal);
|
gttic(BayesTreeCliqueBase_separatorMarginal);
|
||||||
// Check if the Separator marginal was already calculated
|
// Check if the Separator marginal was already calculated
|
||||||
if (!cachedSeparatorMarginal_)
|
if (!cachedSeparatorMarginal_) {
|
||||||
{
|
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
||||||
|
|
||||||
// If this is the root, there is no separator
|
// If this is the root, there is no separator
|
||||||
if (parent_.expired() /*(if we're the root)*/)
|
if (parent_.expired() /*(if we're the root)*/) {
|
||||||
{
|
|
||||||
// we are root, return empty
|
// we are root, return empty
|
||||||
FactorGraphType empty;
|
FactorGraphType empty;
|
||||||
cachedSeparatorMarginal_ = empty;
|
cachedSeparatorMarginal_ = empty;
|
||||||
}
|
} else {
|
||||||
else
|
// Flatten recursion in timing outline
|
||||||
{
|
gttoc(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
||||||
|
gttoc(BayesTreeCliqueBase_separatorMarginal);
|
||||||
|
|
||||||
// Obtain P(S) = \int P(Cp) = \int P(Fp|Sp) P(Sp)
|
// Obtain P(S) = \int P(Cp) = \int P(Fp|Sp) P(Sp)
|
||||||
// initialize P(Cp) with the parent separator marginal
|
// initialize P(Cp) with the parent separator marginal
|
||||||
derived_ptr parent(parent_.lock());
|
derived_ptr parent(parent_.lock());
|
||||||
gttoc(BayesTreeCliqueBase_separatorMarginal_cachemiss); // Flatten recursion in timing outline
|
FactorGraphType p_Cp(parent->separatorMarginal(function)); // P(Sp)
|
||||||
gttoc(BayesTreeCliqueBase_separatorMarginal);
|
|
||||||
FactorGraphType p_Cp(parent->separatorMarginal(function)); // P(Sp)
|
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal);
|
gttic(BayesTreeCliqueBase_separatorMarginal);
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
||||||
|
|
||||||
// now add the parent conditional
|
// now add the parent conditional
|
||||||
p_Cp += parent->conditional_; // P(Fp|Sp)
|
p_Cp += parent->conditional_; // P(Fp|Sp)
|
||||||
|
|
||||||
// The variables we want to keepSet are exactly the ones in S
|
// The variables we want to keepSet are exactly the ones in S
|
||||||
KeyVector indicesS(this->conditional()->beginParents(), this->conditional()->endParents());
|
KeyVector indicesS(this->conditional()->beginParents(),
|
||||||
cachedSeparatorMarginal_ = *p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), function);
|
this->conditional()->endParents());
|
||||||
|
auto separatorMarginal =
|
||||||
|
p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), function);
|
||||||
|
cachedSeparatorMarginal_.reset(*separatorMarginal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the shortcut P(S||B)
|
// return the shortcut P(S||B)
|
||||||
return *cachedSeparatorMarginal_; // return the cached version
|
return *cachedSeparatorMarginal_; // return the cached version
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
// marginal2, uses separator marginal of parent recursively
|
// marginal2, uses separator marginal of parent
|
||||||
// P(C) = P(F|S) P(S)
|
// P(C) = P(F|S) P(S)
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
template<class DERIVED, class FACTORGRAPH>
|
template <class DERIVED, class FACTORGRAPH>
|
||||||
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
||||||
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::marginal2(Eliminate function) const
|
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::marginal2(
|
||||||
{
|
Eliminate function) const {
|
||||||
gttic(BayesTreeCliqueBase_marginal2);
|
gttic(BayesTreeCliqueBase_marginal2);
|
||||||
// initialize with separator marginal P(S)
|
// initialize with separator marginal P(S)
|
||||||
FactorGraphType p_C = this->separatorMarginal(function);
|
FactorGraphType p_C = this->separatorMarginal(function);
|
||||||
|
|
|
@ -100,7 +100,7 @@ namespace gtsam {
|
||||||
bool equals(const DERIVED& other, double tol = 1e-9) const;
|
bool equals(const DERIVED& other, double tol = 1e-9) const;
|
||||||
|
|
||||||
/** print this node */
|
/** print this node */
|
||||||
void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
virtual void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
|
|
|
@ -65,6 +65,8 @@ namespace gtsam {
|
||||||
Conditional(size_t nrFrontals) : nrFrontals_(nrFrontals) {}
|
Conditional(size_t nrFrontals) : nrFrontals_(nrFrontals) {}
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
|
public:
|
||||||
/// @name Testable
|
/// @name Testable
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
@ -76,7 +78,6 @@ namespace gtsam {
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
public:
|
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
|
|
@ -28,9 +28,9 @@ namespace gtsam {
|
||||||
* with an ordering that does not include all of the variables. */
|
* with an ordering that does not include all of the variables. */
|
||||||
class InconsistentEliminationRequested : public std::exception {
|
class InconsistentEliminationRequested : public std::exception {
|
||||||
public:
|
public:
|
||||||
InconsistentEliminationRequested() throw() {}
|
InconsistentEliminationRequested() noexcept {}
|
||||||
virtual ~InconsistentEliminationRequested() throw() {}
|
virtual ~InconsistentEliminationRequested() noexcept {}
|
||||||
virtual const char* what() const throw() {
|
const char* what() const noexcept override {
|
||||||
return
|
return
|
||||||
"An inference algorithm was called with inconsistent arguments. The\n"
|
"An inference algorithm was called with inconsistent arguments. The\n"
|
||||||
"factor graph, ordering, or variable index were inconsistent with each\n"
|
"factor graph, ordering, or variable index were inconsistent with each\n"
|
||||||
|
|
|
@ -49,7 +49,7 @@ struct BinaryJacobianFactor: JacobianFactor {
|
||||||
|
|
||||||
// Fixed-size matrix update
|
// Fixed-size matrix update
|
||||||
void updateHessian(const KeyVector& infoKeys,
|
void updateHessian(const KeyVector& infoKeys,
|
||||||
SymmetricBlockMatrix* info) const {
|
SymmetricBlockMatrix* info) const override {
|
||||||
gttic(updateHessian_BinaryJacobianFactor);
|
gttic(updateHessian_BinaryJacobianFactor);
|
||||||
// Whiten the factor if it has a noise model
|
// Whiten the factor if it has a noise model
|
||||||
const SharedDiagonal& model = get_model();
|
const SharedDiagonal& model = get_model();
|
||||||
|
|
|
@ -80,7 +80,7 @@ public:
|
||||||
|
|
||||||
|
|
||||||
void print() const { Base::print(); }
|
void print() const { Base::print(); }
|
||||||
virtual void print(std::ostream &os) const;
|
void print(std::ostream &os) const override;
|
||||||
|
|
||||||
static std::string blasTranslator(const BLASKernel k) ;
|
static std::string blasTranslator(const BLASKernel k) ;
|
||||||
static BLASKernel blasTranslator(const std::string &s) ;
|
static BLASKernel blasTranslator(const std::string &s) ;
|
||||||
|
|
|
@ -41,6 +41,7 @@ namespace gtsam {
|
||||||
typedef boost::shared_ptr<This> shared_ptr;
|
typedef boost::shared_ptr<This> shared_ptr;
|
||||||
typedef boost::weak_ptr<This> weak_ptr;
|
typedef boost::weak_ptr<This> weak_ptr;
|
||||||
GaussianBayesTreeClique() {}
|
GaussianBayesTreeClique() {}
|
||||||
|
virtual ~GaussianBayesTreeClique() {}
|
||||||
GaussianBayesTreeClique(const boost::shared_ptr<GaussianConditional>& conditional) : Base(conditional) {}
|
GaussianBayesTreeClique(const boost::shared_ptr<GaussianConditional>& conditional) : Base(conditional) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -88,10 +88,10 @@ namespace gtsam {
|
||||||
|
|
||||||
/** print */
|
/** print */
|
||||||
void print(const std::string& = "GaussianConditional",
|
void print(const std::string& = "GaussianConditional",
|
||||||
const KeyFormatter& formatter = DefaultKeyFormatter) const;
|
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||||
|
|
||||||
/** equals function */
|
/** equals function */
|
||||||
bool equals(const GaussianFactor&cg, double tol = 1e-9) const;
|
bool equals(const GaussianFactor&cg, double tol = 1e-9) const override;
|
||||||
|
|
||||||
/** Return a view of the upper-triangular R block of the conditional */
|
/** Return a view of the upper-triangular R block of the conditional */
|
||||||
constABlock R() const { return Ab_.range(0, nrFrontals()); }
|
constABlock R() const { return Ab_.range(0, nrFrontals()); }
|
||||||
|
|
|
@ -57,7 +57,7 @@ namespace gtsam {
|
||||||
|
|
||||||
/// print
|
/// print
|
||||||
void print(const std::string& = "GaussianDensity",
|
void print(const std::string& = "GaussianDensity",
|
||||||
const KeyFormatter& formatter = DefaultKeyFormatter) const;
|
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||||
|
|
||||||
/// Mean \f$ \mu = R^{-1} d \f$
|
/// Mean \f$ \mu = R^{-1} d \f$
|
||||||
Vector mean() const;
|
Vector mean() const;
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
|
|
||||||
#include <gtsam/linear/GaussianBayesTree.h>
|
#include <gtsam/linear/GaussianBayesTree.h>
|
||||||
#include <gtsam/inference/ISAM.h>
|
#include <gtsam/inference/ISAM.h>
|
||||||
|
#include <gtsam/base/Testable.h>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
|
@ -43,4 +44,8 @@ namespace gtsam {
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// traits
|
||||||
|
template <>
|
||||||
|
struct traits<GaussianISAM> : public Testable<GaussianISAM> {};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -515,7 +515,7 @@ Vector JacobianFactor::error_vector(const VectorValues& c) const {
|
||||||
double JacobianFactor::error(const VectorValues& c) const {
|
double JacobianFactor::error(const VectorValues& c) const {
|
||||||
Vector e = unweighted_error(c);
|
Vector e = unweighted_error(c);
|
||||||
// Use the noise model distance function to get the correct error if available.
|
// Use the noise model distance function to get the correct error if available.
|
||||||
if (model_) return 0.5 * model_->distance(e);
|
if (model_) return 0.5 * model_->squaredMahalanobisDistance(e);
|
||||||
return 0.5 * e.dot(e);
|
return 0.5 * e.dot(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,12 +137,12 @@ Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Fair::weight(double error) const {
|
double Fair::weight(double distance) const {
|
||||||
return 1.0 / (1.0 + std::abs(error) / c_);
|
return 1.0 / (1.0 + std::abs(distance) / c_);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Fair::residual(double error) const {
|
double Fair::loss(double distance) const {
|
||||||
const double absError = std::abs(error);
|
const double absError = std::abs(distance);
|
||||||
const double normalizedError = absError / c_;
|
const double normalizedError = absError / c_;
|
||||||
const double c_2 = c_ * c_;
|
const double c_2 = c_ * c_;
|
||||||
return c_2 * (normalizedError - std::log1p(normalizedError));
|
return c_2 * (normalizedError - std::log1p(normalizedError));
|
||||||
|
@ -170,15 +170,15 @@ Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Huber::weight(double error) const {
|
double Huber::weight(double distance) const {
|
||||||
const double absError = std::abs(error);
|
const double absError = std::abs(distance);
|
||||||
return (absError <= k_) ? (1.0) : (k_ / absError);
|
return (absError <= k_) ? (1.0) : (k_ / absError);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Huber::residual(double error) const {
|
double Huber::loss(double distance) const {
|
||||||
const double absError = std::abs(error);
|
const double absError = std::abs(distance);
|
||||||
if (absError <= k_) { // |x| <= k
|
if (absError <= k_) { // |x| <= k
|
||||||
return error*error / 2;
|
return distance*distance / 2;
|
||||||
} else { // |x| > k
|
} else { // |x| > k
|
||||||
return k_ * (absError - (k_/2));
|
return k_ * (absError - (k_/2));
|
||||||
}
|
}
|
||||||
|
@ -208,12 +208,12 @@ Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Cauchy::weight(double error) const {
|
double Cauchy::weight(double distance) const {
|
||||||
return ksquared_ / (ksquared_ + error*error);
|
return ksquared_ / (ksquared_ + distance*distance);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Cauchy::residual(double error) const {
|
double Cauchy::loss(double distance) const {
|
||||||
const double val = std::log1p(error * error / ksquared_);
|
const double val = std::log1p(distance * distance / ksquared_);
|
||||||
return ksquared_ * val * 0.5;
|
return ksquared_ * val * 0.5;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,18 +241,18 @@ Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Tukey::weight(double error) const {
|
double Tukey::weight(double distance) const {
|
||||||
if (std::abs(error) <= c_) {
|
if (std::abs(distance) <= c_) {
|
||||||
const double one_minus_xc2 = 1.0 - error*error/csquared_;
|
const double one_minus_xc2 = 1.0 - distance*distance/csquared_;
|
||||||
return one_minus_xc2 * one_minus_xc2;
|
return one_minus_xc2 * one_minus_xc2;
|
||||||
}
|
}
|
||||||
return 0.0;
|
return 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
double Tukey::residual(double error) const {
|
double Tukey::loss(double distance) const {
|
||||||
double absError = std::abs(error);
|
double absError = std::abs(distance);
|
||||||
if (absError <= c_) {
|
if (absError <= c_) {
|
||||||
const double one_minus_xc2 = 1.0 - error*error/csquared_;
|
const double one_minus_xc2 = 1.0 - distance*distance/csquared_;
|
||||||
const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2;
|
const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2;
|
||||||
return csquared_ * (1 - t) / 6.0;
|
return csquared_ * (1 - t) / 6.0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -280,13 +280,13 @@ Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
|
||||||
|
|
||||||
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
|
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
|
||||||
|
|
||||||
double Welsch::weight(double error) const {
|
double Welsch::weight(double distance) const {
|
||||||
const double xc2 = (error*error)/csquared_;
|
const double xc2 = (distance*distance)/csquared_;
|
||||||
return std::exp(-xc2);
|
return std::exp(-xc2);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Welsch::residual(double error) const {
|
double Welsch::loss(double distance) const {
|
||||||
const double xc2 = (error*error)/csquared_;
|
const double xc2 = (distance*distance)/csquared_;
|
||||||
return csquared_ * 0.5 * -std::expm1(-xc2);
|
return csquared_ * 0.5 * -std::expm1(-xc2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,16 +311,16 @@ GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight)
|
||||||
: Base(reweight), c_(c) {
|
: Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
|
|
||||||
double GemanMcClure::weight(double error) const {
|
double GemanMcClure::weight(double distance) const {
|
||||||
const double c2 = c_*c_;
|
const double c2 = c_*c_;
|
||||||
const double c4 = c2*c2;
|
const double c4 = c2*c2;
|
||||||
const double c2error = c2 + error*error;
|
const double c2error = c2 + distance*distance;
|
||||||
return c4/(c2error*c2error);
|
return c4/(c2error*c2error);
|
||||||
}
|
}
|
||||||
|
|
||||||
double GemanMcClure::residual(double error) const {
|
double GemanMcClure::loss(double distance) const {
|
||||||
const double c2 = c_*c_;
|
const double c2 = c_*c_;
|
||||||
const double error2 = error*error;
|
const double error2 = distance*distance;
|
||||||
return 0.5 * (c2 * error2) / (c2 + error2);
|
return 0.5 * (c2 * error2) / (c2 + error2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,8 +345,8 @@ DCS::DCS(double c, const ReweightScheme reweight)
|
||||||
: Base(reweight), c_(c) {
|
: Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
|
|
||||||
double DCS::weight(double error) const {
|
double DCS::weight(double distance) const {
|
||||||
const double e2 = error*error;
|
const double e2 = distance*distance;
|
||||||
if (e2 > c_)
|
if (e2 > c_)
|
||||||
{
|
{
|
||||||
const double w = 2.0*c_/(c_ + e2);
|
const double w = 2.0*c_/(c_ + e2);
|
||||||
|
@ -356,10 +356,10 @@ double DCS::weight(double error) const {
|
||||||
return 1.0;
|
return 1.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
double DCS::residual(double error) const {
|
double DCS::loss(double distance) const {
|
||||||
// This is the simplified version of Eq 9 from (Agarwal13icra)
|
// This is the simplified version of Eq 9 from (Agarwal13icra)
|
||||||
// after you simplify and cancel terms.
|
// after you simplify and cancel terms.
|
||||||
const double e2 = error*error;
|
const double e2 = distance*distance;
|
||||||
const double e4 = e2*e2;
|
const double e4 = e2*e2;
|
||||||
const double c2 = c_*c_;
|
const double c2 = c_*c_;
|
||||||
|
|
||||||
|
@ -391,17 +391,17 @@ L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double L2WithDeadZone::weight(double error) const {
|
double L2WithDeadZone::weight(double distance) const {
|
||||||
// note that this code is slightly uglier than residual, because there are three distinct
|
// note that this code is slightly uglier than residual, because there are three distinct
|
||||||
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
|
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
|
||||||
// cases (deadzone, non-deadzone) in residual.
|
// cases (deadzone, non-deadzone) in residual.
|
||||||
if (std::abs(error) <= k_) return 0.0;
|
if (std::abs(distance) <= k_) return 0.0;
|
||||||
else if (error > k_) return (-k_+error)/error;
|
else if (distance > k_) return (-k_+distance)/distance;
|
||||||
else return (k_+error)/error;
|
else return (k_+distance)/distance;
|
||||||
}
|
}
|
||||||
|
|
||||||
double L2WithDeadZone::residual(double error) const {
|
double L2WithDeadZone::loss(double distance) const {
|
||||||
const double abs_error = std::abs(error);
|
const double abs_error = std::abs(distance);
|
||||||
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
|
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue