Merge branch 'develop' into feature/rot-print
commit
61b1a9e88d
|
@ -34,7 +34,7 @@ cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \
|
||||||
|
|
||||||
make -j$(nproc) install
|
make -j$(nproc) install
|
||||||
|
|
||||||
cd $CURRDIR/../gtsam_install/cython
|
cd cython
|
||||||
|
|
||||||
sudo $PYTHON setup.py install
|
sudo $PYTHON setup.py install
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ function configure()
|
||||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
|
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
|
||||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \
|
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \
|
||||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||||
-DCMAKE_VERBOSE_MAKEFILE=ON
|
-DCMAKE_VERBOSE_MAKEFILE=OFF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,8 @@ addons:
|
||||||
- clang-9
|
- clang-9
|
||||||
- build-essential pkg-config
|
- build-essential pkg-config
|
||||||
- cmake
|
- cmake
|
||||||
- libpython-dev python-numpy
|
- python3-dev libpython-dev
|
||||||
|
- python3-numpy
|
||||||
- libboost-all-dev
|
- libboost-all-dev
|
||||||
|
|
||||||
# before_install:
|
# before_install:
|
||||||
|
|
|
@ -454,12 +454,11 @@ endif()
|
||||||
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
|
set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
|
||||||
# Set up cache options
|
# Set up cache options
|
||||||
set(GTSAM_CYTHON_INSTALL_PATH "" CACHE PATH "Cython toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/cython")
|
# Cython install path appended with Build type (e.g. cython, cythonDebug, etc).
|
||||||
if(NOT GTSAM_CYTHON_INSTALL_PATH)
|
# This does not override custom values set from the command line
|
||||||
set(GTSAM_CYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/cython")
|
set(GTSAM_CYTHON_INSTALL_PATH "${PROJECT_BINARY_DIR}/cython${GTSAM_BUILD_TAG}" CACHE PATH "Cython toolbox destination, blank defaults to PROJECT_BINARY_DIR/cython<GTSAM_BUILD_TAG>")
|
||||||
endif()
|
|
||||||
set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
|
set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
|
||||||
add_subdirectory(cython)
|
add_subdirectory(cython ${GTSAM_CYTHON_INSTALL_PATH})
|
||||||
else()
|
else()
|
||||||
set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
|
set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
|
||||||
endif()
|
endif()
|
||||||
|
@ -537,54 +536,54 @@ endif()
|
||||||
|
|
||||||
print_build_options_for_target(gtsam)
|
print_build_options_for_target(gtsam)
|
||||||
|
|
||||||
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
||||||
|
|
||||||
if(GTSAM_USE_TBB)
|
if(GTSAM_USE_TBB)
|
||||||
message(STATUS " Use Intel TBB : Yes")
|
message(STATUS " Use Intel TBB : Yes")
|
||||||
elseif(TBB_FOUND)
|
elseif(TBB_FOUND)
|
||||||
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
|
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Use Intel TBB : TBB not found")
|
message(STATUS " Use Intel TBB : TBB not found")
|
||||||
endif()
|
endif()
|
||||||
if(GTSAM_USE_EIGEN_MKL)
|
if(GTSAM_USE_EIGEN_MKL)
|
||||||
message(STATUS " Eigen will use MKL : Yes")
|
message(STATUS " Eigen will use MKL : Yes")
|
||||||
elseif(MKL_FOUND)
|
elseif(MKL_FOUND)
|
||||||
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
|
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Eigen will use MKL : MKL not found")
|
message(STATUS " Eigen will use MKL : MKL not found")
|
||||||
endif()
|
endif()
|
||||||
if(GTSAM_USE_EIGEN_MKL_OPENMP)
|
if(GTSAM_USE_EIGEN_MKL_OPENMP)
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : Yes")
|
message(STATUS " Eigen will use MKL and OpenMP : Yes")
|
||||||
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
|
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
|
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
|
||||||
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
|
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
|
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
|
||||||
elseif(OPENMP_FOUND)
|
elseif(OPENMP_FOUND)
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
|
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
|
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
|
||||||
endif()
|
endif()
|
||||||
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
|
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
|
||||||
|
|
||||||
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
|
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
|
||||||
message(STATUS " Cheirality exceptions enabled : YES")
|
message(STATUS " Cheirality exceptions enabled : YES")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Cheirality exceptions enabled : NO")
|
message(STATUS " Cheirality exceptions enabled : NO")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||||
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
|
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
|
||||||
message(STATUS " Build with ccache : Yes")
|
message(STATUS " Build with ccache : Yes")
|
||||||
elseif(CCACHE_FOUND)
|
elseif(CCACHE_FOUND)
|
||||||
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
|
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
|
||||||
else()
|
else()
|
||||||
message(STATUS " Build with ccache : No")
|
message(STATUS " Build with ccache : No")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Packaging flags ")
|
message(STATUS "Packaging flags ")
|
||||||
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
|
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
|
||||||
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
|
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
|
||||||
|
|
||||||
message(STATUS "GTSAM flags ")
|
message(STATUS "GTSAM flags ")
|
||||||
print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
|
print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
|
||||||
|
@ -595,15 +594,19 @@ print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 al
|
||||||
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
|
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
|
||||||
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
||||||
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
||||||
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
||||||
|
|
||||||
message(STATUS "MATLAB toolbox flags ")
|
message(STATUS "MATLAB toolbox flags ")
|
||||||
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
|
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
|
||||||
|
if (${GTSAM_INSTALL_MATLAB_TOOLBOX})
|
||||||
|
message(STATUS " MATLAB root : ${MATLAB_ROOT}")
|
||||||
|
message(STATUS " MEX binary : ${MEX_COMMAND}")
|
||||||
|
endif()
|
||||||
|
|
||||||
message(STATUS "Cython toolbox flags ")
|
message(STATUS "Cython toolbox flags ")
|
||||||
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
||||||
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
|
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
||||||
endif()
|
endif()
|
||||||
message(STATUS "===============================================================")
|
message(STATUS "===============================================================")
|
||||||
|
|
||||||
|
|
|
@ -72,9 +72,9 @@ A Lie group is both a manifold *and* a group. Hence, a LIE_GROUP type should imp
|
||||||
However, we now also need to be able to evaluate the derivatives of compose and inverse.
|
However, we now also need to be able to evaluate the derivatives of compose and inverse.
|
||||||
Hence, we have the following extra valid static functions defined in the struct `gtsam::traits<T>`:
|
Hence, we have the following extra valid static functions defined in the struct `gtsam::traits<T>`:
|
||||||
|
|
||||||
* `r = traits<T>::Compose(p,q,Hq,Hp)`
|
* `r = traits<T>::Compose(p,q,Hp,Hq)`
|
||||||
* `q = traits<T>::Inverse(p,Hp)`
|
* `q = traits<T>::Inverse(p,Hp)`
|
||||||
* `r = traits<T>::Between(p,q,Hq,H2p)`
|
* `r = traits<T>::Between(p,q,Hp,Hq)`
|
||||||
|
|
||||||
where above the *H* arguments stand for optional Jacobian arguments.
|
where above the *H* arguments stand for optional Jacobian arguments.
|
||||||
That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor).
|
That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor).
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
|
# Set cmake policy to recognize the AppleClang compiler
|
||||||
|
# independently from the Clang compiler.
|
||||||
|
if(POLICY CMP0025)
|
||||||
|
cmake_policy(SET CMP0025 NEW)
|
||||||
|
endif()
|
||||||
|
|
||||||
# function: list_append_cache(var [new_values ...])
|
# function: list_append_cache(var [new_values ...])
|
||||||
# Like "list(APPEND ...)" but working for CACHE variables.
|
# Like "list(APPEND ...)" but working for CACHE variables.
|
||||||
|
|
|
@ -41,9 +41,8 @@ execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||||
function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
|
function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
|
||||||
# Paths for generated files
|
# Paths for generated files
|
||||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
get_filename_component(module_name "${interface_header}" NAME_WE)
|
||||||
set(generated_files_path "${PROJECT_BINARY_DIR}/cython/${module_name}")
|
set(generated_files_path "${install_path}")
|
||||||
wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
|
wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
|
||||||
install_cython_wrapped_library("${interface_header}" "${generated_files_path}" "${install_path}")
|
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
function(set_up_required_cython_packages)
|
function(set_up_required_cython_packages)
|
||||||
|
@ -138,6 +137,10 @@ function(cythonize target pyx_file output_lib_we output_dir include_dirs libs in
|
||||||
target_link_libraries(${target} "${libs}")
|
target_link_libraries(${target} "${libs}")
|
||||||
endif()
|
endif()
|
||||||
add_dependencies(${target} ${target}_pyx2cpp)
|
add_dependencies(${target} ${target}_pyx2cpp)
|
||||||
|
|
||||||
|
if(TARGET ${python_install_target})
|
||||||
|
add_dependencies(${python_install_target} ${target})
|
||||||
|
endif()
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Internal function that wraps a library and compiles the wrapper
|
# Internal function that wraps a library and compiles the wrapper
|
||||||
|
@ -150,9 +153,12 @@ function(wrap_library_cython interface_header generated_files_path extra_imports
|
||||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
get_filename_component(module_name "${interface_header}" NAME_WE)
|
||||||
|
|
||||||
# Wrap module to Cython pyx
|
# Wrap module to Cython pyx
|
||||||
message(STATUS "Cython wrapper generating ${module_name}.pyx")
|
message(STATUS "Cython wrapper generating ${generated_files_path}/${module_name}.pyx")
|
||||||
set(generated_pyx "${generated_files_path}/${module_name}.pyx")
|
set(generated_pyx "${generated_files_path}/${module_name}.pyx")
|
||||||
file(MAKE_DIRECTORY "${generated_files_path}")
|
if(NOT EXISTS ${generated_files_path})
|
||||||
|
file(MAKE_DIRECTORY "${generated_files_path}")
|
||||||
|
endif()
|
||||||
|
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT ${generated_pyx}
|
OUTPUT ${generated_pyx}
|
||||||
DEPENDS ${interface_header} wrap
|
DEPENDS ${interface_header} wrap
|
||||||
|
@ -175,46 +181,6 @@ function(wrap_library_cython interface_header generated_files_path extra_imports
|
||||||
COMMAND cmake -E remove_directory ${generated_files_path})
|
COMMAND cmake -E remove_directory ${generated_files_path})
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Internal function that installs a wrap toolbox
|
|
||||||
function(install_cython_wrapped_library interface_header generated_files_path install_path)
|
|
||||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
|
||||||
|
|
||||||
# NOTE: only installs .pxd and .pyx and binary files (not .cpp) - the trailing slash on the directory name
|
|
||||||
# here prevents creating the top-level module name directory in the destination.
|
|
||||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH/subdirectory if there is one
|
|
||||||
get_filename_component(location "${install_path}" PATH)
|
|
||||||
get_filename_component(name "${install_path}" NAME)
|
|
||||||
message(STATUS "Installing Cython Toolbox to ${location}${GTSAM_BUILD_TAG}/${name}") #${GTSAM_CYTHON_INSTALL_PATH}"
|
|
||||||
|
|
||||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
|
||||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
|
||||||
string(TOUPPER "${build_type}" build_type_upper)
|
|
||||||
if(${build_type_upper} STREQUAL "RELEASE")
|
|
||||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
|
||||||
else()
|
|
||||||
set(build_type_tag "${build_type}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
install(DIRECTORY "${generated_files_path}/" DESTINATION "${location}${build_type_tag}/${name}"
|
|
||||||
CONFIGURATIONS "${build_type}"
|
|
||||||
PATTERN "build" EXCLUDE
|
|
||||||
PATTERN "CMakeFiles" EXCLUDE
|
|
||||||
PATTERN "Makefile" EXCLUDE
|
|
||||||
PATTERN "*.cmake" EXCLUDE
|
|
||||||
PATTERN "*.cpp" EXCLUDE
|
|
||||||
PATTERN "*.py" EXCLUDE)
|
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
install(DIRECTORY "${generated_files_path}/" DESTINATION ${install_path}
|
|
||||||
PATTERN "build" EXCLUDE
|
|
||||||
PATTERN "CMakeFiles" EXCLUDE
|
|
||||||
PATTERN "Makefile" EXCLUDE
|
|
||||||
PATTERN "*.cmake" EXCLUDE
|
|
||||||
PATTERN "*.cpp" EXCLUDE
|
|
||||||
PATTERN "*.py" EXCLUDE)
|
|
||||||
endif()
|
|
||||||
endfunction()
|
|
||||||
|
|
||||||
# Helper function to install Cython scripts and handle multiple build types where the scripts
|
# Helper function to install Cython scripts and handle multiple build types where the scripts
|
||||||
# should be installed to all build type toolboxes
|
# should be installed to all build type toolboxes
|
||||||
#
|
#
|
||||||
|
@ -232,50 +198,7 @@ function(install_cython_scripts source_directory dest_directory patterns)
|
||||||
foreach(pattern ${patterns})
|
foreach(pattern ${patterns})
|
||||||
list(APPEND patterns_args PATTERN "${pattern}")
|
list(APPEND patterns_args PATTERN "${pattern}")
|
||||||
endforeach()
|
endforeach()
|
||||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
|
||||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
file(COPY "${source_directory}" DESTINATION "${dest_directory}"
|
||||||
string(TOUPPER "${build_type}" build_type_upper)
|
|
||||||
if(${build_type_upper} STREQUAL "RELEASE")
|
|
||||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
|
||||||
else()
|
|
||||||
set(build_type_tag "${build_type}")
|
|
||||||
endif()
|
|
||||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
|
|
||||||
get_filename_component(location "${dest_directory}" PATH)
|
|
||||||
get_filename_component(name "${dest_directory}" NAME)
|
|
||||||
install(DIRECTORY "${source_directory}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}"
|
|
||||||
FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
install(DIRECTORY "${source_directory}" DESTINATION "${dest_directory}" FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# Helper function to install specific files and handle multiple build types where the scripts
|
|
||||||
# should be installed to all build type toolboxes
|
|
||||||
#
|
|
||||||
# Arguments:
|
|
||||||
# source_files: The source files to be installed.
|
|
||||||
# dest_directory: The destination directory to install to.
|
|
||||||
function(install_cython_files source_files dest_directory)
|
|
||||||
|
|
||||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
|
||||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
|
||||||
string(TOUPPER "${build_type}" build_type_upper)
|
|
||||||
if(${build_type_upper} STREQUAL "RELEASE")
|
|
||||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
|
||||||
else()
|
|
||||||
set(build_type_tag "${build_type}")
|
|
||||||
endif()
|
|
||||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
|
|
||||||
get_filename_component(location "${dest_directory}" PATH)
|
|
||||||
get_filename_component(name "${dest_directory}" NAME)
|
|
||||||
install(FILES "${source_files}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}")
|
|
||||||
endforeach()
|
|
||||||
else()
|
|
||||||
install(FILES "${source_files}" DESTINATION "${dest_directory}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
endfunction()
|
|
||||||
|
|
||||||
|
|
|
@ -3,16 +3,32 @@ include(GtsamCythonWrap)
|
||||||
|
|
||||||
# Create the cython toolbox for the gtsam library
|
# Create the cython toolbox for the gtsam library
|
||||||
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
|
# Add the new make target command
|
||||||
|
set(python_install_target python-install)
|
||||||
|
add_custom_target(${python_install_target}
|
||||||
|
COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_CYTHON_INSTALL_PATH}/setup.py install
|
||||||
|
WORKING_DIRECTORY ${GTSAM_CYTHON_INSTALL_PATH})
|
||||||
|
|
||||||
# build and include the eigency version of eigency
|
# build and include the eigency version of eigency
|
||||||
add_subdirectory(gtsam_eigency)
|
add_subdirectory(gtsam_eigency)
|
||||||
include_directories(${PROJECT_BINARY_DIR}/cython/gtsam_eigency)
|
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
|
||||||
|
|
||||||
# Fix for error "C1128: number of sections exceeded object file format limit"
|
# Fix for error "C1128: number of sections exceeded object file format limit"
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
add_compile_options(/bigobj)
|
add_compile_options(/bigobj)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# wrap gtsam
|
# First set up all the package related files.
|
||||||
|
# This also ensures the below wrap operations work correctly.
|
||||||
|
set(CYTHON_INSTALL_REQUIREMENTS_FILE "${PROJECT_SOURCE_DIR}/cython/requirements.txt")
|
||||||
|
|
||||||
|
# Install the custom-generated __init__.py
|
||||||
|
# This makes the cython (sub-)directories into python packages, so gtsam can be found while wrapping gtsam_unstable
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam/__init__.py COPYONLY)
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable/__init__.py COPYONLY)
|
||||||
|
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${GTSAM_CYTHON_INSTALL_PATH}/setup.py)
|
||||||
|
|
||||||
|
# Wrap gtsam
|
||||||
add_custom_target(gtsam_header DEPENDS "../gtsam.h")
|
add_custom_target(gtsam_header DEPENDS "../gtsam.h")
|
||||||
wrap_and_install_library_cython("../gtsam.h" # interface_header
|
wrap_and_install_library_cython("../gtsam.h" # interface_header
|
||||||
"" # extra imports
|
"" # extra imports
|
||||||
|
@ -20,8 +36,9 @@ if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
gtsam # library to link with
|
gtsam # library to link with
|
||||||
"wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
|
"wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
|
||||||
)
|
)
|
||||||
|
add_dependencies(${python_install_target} gtsam gtsam_header)
|
||||||
|
|
||||||
# wrap gtsam_unstable
|
# Wrap gtsam_unstable
|
||||||
if(GTSAM_BUILD_UNSTABLE)
|
if(GTSAM_BUILD_UNSTABLE)
|
||||||
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
|
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
|
||||||
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
|
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
|
||||||
|
@ -30,17 +47,9 @@ if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||||
gtsam_unstable # library to link with
|
gtsam_unstable # library to link with
|
||||||
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
|
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
|
||||||
)
|
)
|
||||||
|
add_dependencies(${python_install_target} gtsam_unstable gtsam_unstable_header)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
file(READ "${PROJECT_SOURCE_DIR}/cython/requirements.txt" CYTHON_INSTALL_REQUIREMENTS)
|
|
||||||
file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
|
|
||||||
|
|
||||||
# Install the custom-generated __init__.py
|
|
||||||
# This is to make the build/cython/gtsam folder a python package, so gtsam can be found while wrapping gtsam_unstable
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py COPYONLY)
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py COPYONLY)
|
|
||||||
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${PROJECT_BINARY_DIR}/cython/setup.py)
|
|
||||||
install_cython_files("${PROJECT_BINARY_DIR}/cython/setup.py" "${GTSAM_CYTHON_INSTALL_PATH}")
|
|
||||||
# install scripts and tests
|
# install scripts and tests
|
||||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||||
|
|
134
cython/README.md
134
cython/README.md
|
@ -1,43 +1,51 @@
|
||||||
# Python Wrapper
|
# Python Wrapper
|
||||||
|
|
||||||
This is the Cython/Python wrapper around the GTSAM C++ library.
|
This is the Python wrapper around the GTSAM C++ library. We use Cython to generate the bindings to the underlying C++ code.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- If you want to build the GTSAM python library for a specific python version (eg 3.6),
|
||||||
|
use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used.
|
||||||
|
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment),
|
||||||
|
then the environment should be active while building GTSAM.
|
||||||
|
- This wrapper needs `Cython(>=0.25.2)`, `backports_abc(>=0.5)`, and `numpy(>=1.11.0)`. These can be installed as follows:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -r <gtsam_folder>/cython/requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
- For compatibility with GTSAM's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
|
||||||
|
named `gtsam_eigency`, to interface between C++'s Eigen and Python's numpy.
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
- if you want to build the gtsam python library for a specific python version (eg 2.7), use the `-DGTSAM_PYTHON_VERSION=2.7` option when running `cmake` otherwise the default interpreter will be used.
|
- Run cmake with the `GTSAM_INSTALL_CYTHON_TOOLBOX` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory defined by `GTSAM_CYTHON_INSTALL_PATH`, which is by default `<PROJECT_BINARY_DIR>/cython` in Release mode and `<PROJECT_BINARY_DIR>/cython<CMAKE_BUILD_TYPE>` for other modes.
|
||||||
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment) then the environment should be active while building gtsam.
|
|
||||||
- This wrapper needs Cython(>=0.25.2), backports_abc>=0.5, and numpy. These can be installed as follows:
|
|
||||||
|
|
||||||
```bash
|
- Build GTSAM and the wrapper with `make`.
|
||||||
pip install -r <gtsam_folder>/cython/requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
- For compatibility with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
|
- To install, simply run `make python-install`.
|
||||||
named **gtsam_eigency**, to interface between C++'s Eigen and Python's numpy.
|
- The same command can be used to install into a virtual environment if it is active.
|
||||||
|
- **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory.
|
||||||
|
|
||||||
- Build and install gtsam using cmake with `GTSAM_INSTALL_CYTHON_TOOLBOX` enabled.
|
- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly.
|
||||||
The wrapped module will be installed to `GTSAM_CYTHON_INSTALL_PATH`, which is
|
|
||||||
by default: `<your CMAKE_INSTALL_PREFIX>/cython`
|
|
||||||
|
|
||||||
- To use the library without installing system-wide: modify your `PYTHONPATH` to include the `GTSAM_CYTHON_INSTALL_PATH`:
|
|
||||||
```bash
|
|
||||||
export PYTHONPATH=$PYTHONPATH:<GTSAM_CYTHON_INSTALL_PATH>
|
|
||||||
```
|
|
||||||
- To install system-wide: run `make install` then navigate to `GTSAM_CYTHON_INSTALL_PATH` and run `python setup.py install`
|
|
||||||
- (the same command can be used to install into a virtual environment if it is active)
|
|
||||||
- note: if you don't want gtsam to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install gtsam to a subdirectory of the build directory.
|
|
||||||
- if you run `setup.py` from the build directory rather than the installation directory, the script will warn you with the message: `setup.py is being run from an unexpected location`.
|
|
||||||
Before `make install` is run, not all the components of the package have been copied across, so running `setup.py` from the build directory would result in an incomplete package.
|
|
||||||
|
|
||||||
## Unit Tests
|
## Unit Tests
|
||||||
|
|
||||||
The Cython toolbox also has a small set of unit tests located in the
|
The Cython toolbox also has a small set of unit tests located in the
|
||||||
test directory. To run them:
|
test directory. To run them:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd <your GTSAM_CYTHON_INSTALL_PATH>
|
cd <GTSAM_CYTHON_INSTALL_PATH>
|
||||||
python -m unittest discover
|
python -m unittest discover
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Utils
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
## Writing Your Own Scripts
|
## Writing Your Own Scripts
|
||||||
|
|
||||||
|
@ -46,79 +54,63 @@ See the tests for examples.
|
||||||
### Some Important Notes:
|
### Some Important Notes:
|
||||||
|
|
||||||
- Vector/Matrix:
|
- Vector/Matrix:
|
||||||
+ GTSAM expects double-precision floating point vectors and matrices.
|
|
||||||
Hence, you should pass numpy matrices with dtype=float, or 'float64'.
|
- GTSAM expects double-precision floating point vectors and matrices.
|
||||||
+ Also, GTSAM expects *column-major* matrices, unlike the default storage
|
Hence, you should pass numpy matrices with `dtype=float`, or `float64`.
|
||||||
scheme in numpy. Hence, you should pass column-major matrices to gtsam using
|
- Also, GTSAM expects _column-major_ matrices, unlike the default storage
|
||||||
|
scheme in numpy. Hence, you should pass column-major matrices to GTSAM using
|
||||||
the flag order='F'. And you always get column-major matrices back.
|
the flag order='F'. And you always get column-major matrices back.
|
||||||
For more details, see: https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed
|
For more details, see [this link](https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed).
|
||||||
+ Passing row-major matrices of different dtype, e.g. 'int', will also work
|
- Passing row-major matrices of different dtype, e.g. `int`, will also work
|
||||||
as the wrapper converts them to column-major and dtype float for you,
|
as the wrapper converts them to column-major and dtype float for you,
|
||||||
using numpy.array.astype(float, order='F', copy=False).
|
using numpy.array.astype(float, order='F', copy=False).
|
||||||
However, this will result a copy if your matrix is not in the expected type
|
However, this will result a copy if your matrix is not in the expected type
|
||||||
and storage order.
|
and storage order.
|
||||||
|
|
||||||
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>_ in Python.
|
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>\_ in Python.
|
||||||
Examples: noiseModel_Gaussian, noiseModel_mEstimator_Tukey
|
|
||||||
|
Examples: `noiseModel_Gaussian`, `noiseModel_mEstimator_Tukey`
|
||||||
|
|
||||||
- Casting from a base class to a derive class must be done explicitly.
|
- Casting from a base class to a derive class must be done explicitly.
|
||||||
Examples:
|
|
||||||
```Python
|
|
||||||
noiseBase = factor.noiseModel()
|
|
||||||
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Wrapping Your Own Project That Uses GTSAM
|
Examples:
|
||||||
|
|
||||||
- Set PYTHONPATH to include ${GTSAM_CYTHON_INSTALL_PATH}
|
```python
|
||||||
+ so that it can find gtsam Cython header: gtsam/gtsam.pxd
|
noiseBase = factor.noiseModel()
|
||||||
|
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
|
||||||
|
```
|
||||||
|
|
||||||
- In your CMakeList.txt
|
## Wrapping Custom GTSAM-based Project
|
||||||
```cmake
|
|
||||||
find_package(GTSAM REQUIRED) # Make sure gtsam's install folder is in your PATH
|
|
||||||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${GTSAM_DIR}/../GTSAMCMakeTools")
|
|
||||||
|
|
||||||
# Wrap
|
Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python).
|
||||||
include(GtsamCythonWrap)
|
|
||||||
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
|
|
||||||
wrap_and_install_library_cython("your_project_interface.h"
|
|
||||||
"from gtsam.gtsam cimport *" # extra import of gtsam/gtsam.pxd Cython header
|
|
||||||
"your_install_path"
|
|
||||||
"libraries_to_link_with_the_cython_module"
|
|
||||||
"dependencies_which_need_to_be_built_before_the_wrapper"
|
|
||||||
)
|
|
||||||
#Optional: install_cython_scripts and install_cython_files. See GtsamCythonWrap.cmake.
|
|
||||||
```
|
|
||||||
|
|
||||||
## KNOWN ISSUES
|
## KNOWN ISSUES
|
||||||
|
|
||||||
- Doesn't work with python3 installed from homebrew
|
- Doesn't work with python3 installed from homebrew
|
||||||
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
|
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
|
||||||
- Guess: 64 vs 32b? disutils Compiler flags?
|
- Guess: 64 vs 32b? disutils Compiler flags?
|
||||||
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
|
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
|
||||||
- Upgrading to 0.25 solves the problem
|
- Upgrading to 0.25 solves the problem
|
||||||
- Need default constructor and default copy constructor for almost every classes... :(
|
- Need default constructor and default copy constructor for almost every classes... :(
|
||||||
- support these constructors by default and declare "delete" for special classes?
|
- support these constructors by default and declare "delete" for special classes?
|
||||||
|
|
||||||
|
|
||||||
### TODO
|
### TODO
|
||||||
|
|
||||||
- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
|
- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
|
||||||
- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in gtsam?)
|
- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in GTSAM?)
|
||||||
- [ ] inner namespaces ==> inner packages?
|
- [ ] inner namespaces ==> inner packages?
|
||||||
- [ ] Wrap fixed-size Matrices/Vectors?
|
- [ ] Wrap fixed-size Matrices/Vectors?
|
||||||
|
|
||||||
|
|
||||||
### Completed/Cancelled:
|
### Completed/Cancelled:
|
||||||
|
|
||||||
- [x] Fix Python tests: don't use " import <package> * ": Bad style!!! (18-03-17 19:50)
|
- [x] Fix Python tests: don't use " import <package> \* ": Bad style!!! (18-03-17 19:50)
|
||||||
- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
|
- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
|
||||||
- [x] Wrap unstable @done (18-03-17 15:30)
|
- [x] Wrap unstable @done (18-03-17 15:30)
|
||||||
- [x] Unify cython/gtsam.h and the original gtsam.h @done (18-03-17 15:30)
|
- [x] Unify cython/GTSAM.h and the original GTSAM.h @done (18-03-17 15:30)
|
||||||
- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
|
- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
|
||||||
- [x] 06-03-17: manage to remove the requirements for default and copy constructors
|
- [x] 06-03-17: manage to remove the requirements for default and copy constructors
|
||||||
- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>.
|
- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>.
|
||||||
- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: ```gtsam::JointMarginal __pyx_t_1;``` Users don't have to wrap this constructor, however.
|
- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: `GTSAM::JointMarginal __pyx_t_1;` Users don't have to wrap this constructor, however.
|
||||||
- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
|
- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
|
||||||
- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
|
- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
|
||||||
- [x] CMake install script @done (25-11-16 02:30)
|
- [x] CMake install script @done (25-11-16 02:30)
|
||||||
|
@ -132,7 +124,7 @@ wrap_and_install_library_cython("your_project_interface.h"
|
||||||
- [x] Casting from parent and grandparents @done (16-11-16 17:00)
|
- [x] Casting from parent and grandparents @done (16-11-16 17:00)
|
||||||
- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
|
- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
|
||||||
- [x] Support "print obj" @done (16-11-16 17:00)
|
- [x] Support "print obj" @done (16-11-16 17:00)
|
||||||
- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
|
- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
|
||||||
- [x] Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34)
|
- [x] Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34)
|
||||||
- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
|
- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
|
||||||
- [x] [Nice to have] parse typedef @done (16-09-13 17:19)
|
- [x] [Nice to have] parse typedef @done (16-09-13 17:19)
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
"""
|
||||||
|
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||||
|
Atlanta, Georgia 30332-0415
|
||||||
|
All Rights Reserved
|
||||||
|
|
||||||
|
See LICENSE for the license information
|
||||||
|
|
||||||
|
FrobeniusFactor unit tests.
|
||||||
|
Author: Frank Dellaert
|
||||||
|
"""
|
||||||
|
# pylint: disable=no-name-in-module, import-error, invalid-name
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4,
|
||||||
|
FrobeniusWormholeFactor, SOn)
|
||||||
|
|
||||||
|
id = SO4()
|
||||||
|
v1 = np.array([0, 0, 0, 0.1, 0, 0])
|
||||||
|
Q1 = SO4.Expmap(v1)
|
||||||
|
v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03])
|
||||||
|
Q2 = SO4.Expmap(v2)
|
||||||
|
|
||||||
|
|
||||||
|
class TestFrobeniusFactorSO4(unittest.TestCase):
|
||||||
|
"""Test FrobeniusFactor factors."""
|
||||||
|
|
||||||
|
def test_frobenius_factor(self):
|
||||||
|
"""Test creation of a factor that calculates the Frobenius norm."""
|
||||||
|
factor = FrobeniusFactorSO4(1, 2)
|
||||||
|
actual = factor.evaluateError(Q1, Q2)
|
||||||
|
expected = (Q2.matrix()-Q1.matrix()).transpose().reshape((16,))
|
||||||
|
np.testing.assert_array_equal(actual, expected)
|
||||||
|
|
||||||
|
def test_frobenius_between_factor(self):
|
||||||
|
"""Test creation of a Frobenius BetweenFactor."""
|
||||||
|
factor = FrobeniusBetweenFactorSO4(1, 2, Q1.between(Q2))
|
||||||
|
actual = factor.evaluateError(Q1, Q2)
|
||||||
|
expected = np.zeros((16,))
|
||||||
|
np.testing.assert_array_almost_equal(actual, expected)
|
||||||
|
|
||||||
|
def test_frobenius_wormhole_factor(self):
|
||||||
|
"""Test creation of a factor that calculates Shonan error."""
|
||||||
|
R1 = SO3.Expmap(v1[3:])
|
||||||
|
R2 = SO3.Expmap(v2[3:])
|
||||||
|
factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4)
|
||||||
|
I4 = SOn(4)
|
||||||
|
Q1 = I4.retract(v1)
|
||||||
|
Q2 = I4.retract(v2)
|
||||||
|
actual = factor.evaluateError(Q1, Q2)
|
||||||
|
expected = np.zeros((12,))
|
||||||
|
np.testing.assert_array_almost_equal(actual, expected, decimal=4)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
|
@ -17,7 +17,8 @@ import unittest
|
||||||
import gtsam
|
import gtsam
|
||||||
from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer,
|
from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer,
|
||||||
GaussNewtonParams, LevenbergMarquardtOptimizer,
|
GaussNewtonParams, LevenbergMarquardtOptimizer,
|
||||||
LevenbergMarquardtParams, NonlinearFactorGraph, Ordering,
|
LevenbergMarquardtParams, PCGSolverParameters,
|
||||||
|
DummyPreconditionerParameters, NonlinearFactorGraph, Ordering,
|
||||||
Point2, PriorFactorPoint2, Values)
|
Point2, PriorFactorPoint2, Values)
|
||||||
from gtsam.utils.test_case import GtsamTestCase
|
from gtsam.utils.test_case import GtsamTestCase
|
||||||
|
|
||||||
|
@ -61,6 +62,16 @@ class TestScenario(GtsamTestCase):
|
||||||
fg, initial_values, lmParams).optimize()
|
fg, initial_values, lmParams).optimize()
|
||||||
self.assertAlmostEqual(0, fg.error(actual2))
|
self.assertAlmostEqual(0, fg.error(actual2))
|
||||||
|
|
||||||
|
# Levenberg-Marquardt
|
||||||
|
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||||
|
lmParams.setLinearSolverType("ITERATIVE")
|
||||||
|
cgParams = PCGSolverParameters()
|
||||||
|
cgParams.setPreconditionerParams(DummyPreconditionerParameters())
|
||||||
|
lmParams.setIterativeParams(cgParams)
|
||||||
|
actual2 = LevenbergMarquardtOptimizer(
|
||||||
|
fg, initial_values, lmParams).optimize()
|
||||||
|
self.assertAlmostEqual(0, fg.error(actual2))
|
||||||
|
|
||||||
# Dogleg
|
# Dogleg
|
||||||
dlParams = DoglegParams()
|
dlParams = DoglegParams()
|
||||||
dlParams.setOrdering(ordering)
|
dlParams.setOrdering(ordering)
|
||||||
|
|
|
@ -4,6 +4,12 @@ Author: Jing Wu and Frank Dellaert
|
||||||
"""
|
"""
|
||||||
# pylint: disable=invalid-name
|
# pylint: disable=invalid-name
|
||||||
|
|
||||||
|
import sys
|
||||||
|
if sys.version_info.major >= 3:
|
||||||
|
from io import StringIO
|
||||||
|
else:
|
||||||
|
from cStringIO import StringIO
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
@ -37,11 +43,24 @@ class TestOptimizeComet(GtsamTestCase):
|
||||||
self.optimizer = gtsam.GaussNewtonOptimizer(
|
self.optimizer = gtsam.GaussNewtonOptimizer(
|
||||||
graph, initial, self.params)
|
graph, initial, self.params)
|
||||||
|
|
||||||
|
self.lmparams = gtsam.LevenbergMarquardtParams()
|
||||||
|
self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer(
|
||||||
|
graph, initial, self.lmparams
|
||||||
|
)
|
||||||
|
|
||||||
|
# setup output capture
|
||||||
|
self.capturedOutput = StringIO()
|
||||||
|
sys.stdout = self.capturedOutput
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
"""Reset print capture."""
|
||||||
|
sys.stdout = sys.__stdout__
|
||||||
|
|
||||||
def test_simple_printing(self):
|
def test_simple_printing(self):
|
||||||
"""Test with a simple hook."""
|
"""Test with a simple hook."""
|
||||||
|
|
||||||
# Provide a hook that just prints
|
# Provide a hook that just prints
|
||||||
def hook(_, error: float):
|
def hook(_, error):
|
||||||
print(error)
|
print(error)
|
||||||
|
|
||||||
# Only thing we require from optimizer is an iterate method
|
# Only thing we require from optimizer is an iterate method
|
||||||
|
@ -51,6 +70,16 @@ class TestOptimizeComet(GtsamTestCase):
|
||||||
actual = self.optimizer.values()
|
actual = self.optimizer.values()
|
||||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||||
|
|
||||||
|
def test_lm_simple_printing(self):
|
||||||
|
"""Make sure we are properly terminating LM"""
|
||||||
|
def hook(_, error):
|
||||||
|
print(error)
|
||||||
|
|
||||||
|
gtsam_optimize(self.lmoptimizer, self.lmparams, hook)
|
||||||
|
|
||||||
|
actual = self.lmoptimizer.values()
|
||||||
|
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
|
||||||
|
|
||||||
@unittest.skip("Not a test we want run every time, as needs comet.ml account")
|
@unittest.skip("Not a test we want run every time, as needs comet.ml account")
|
||||||
def test_comet(self):
|
def test_comet(self):
|
||||||
"""Test with a comet hook."""
|
"""Test with a comet hook."""
|
||||||
|
@ -65,7 +94,7 @@ class TestOptimizeComet(GtsamTestCase):
|
||||||
+ str(time.hour)+":"+str(time.minute)+":"+str(time.second))
|
+ str(time.hour)+":"+str(time.minute)+":"+str(time.second))
|
||||||
|
|
||||||
# I want to do some comet thing here
|
# I want to do some comet thing here
|
||||||
def hook(optimizer, error: float):
|
def hook(optimizer, error):
|
||||||
comet.log_metric("Karcher error",
|
comet.log_metric("Karcher error",
|
||||||
error, optimizer.iterations())
|
error, optimizer.iterations())
|
||||||
|
|
||||||
|
@ -76,4 +105,4 @@ class TestOptimizeComet(GtsamTestCase):
|
||||||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected)
|
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
|
@ -4,15 +4,11 @@ Author: Jing Wu and Frank Dellaert
|
||||||
"""
|
"""
|
||||||
# pylint: disable=invalid-name
|
# pylint: disable=invalid-name
|
||||||
|
|
||||||
from typing import TypeVar
|
|
||||||
|
|
||||||
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
|
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
|
||||||
import gtsam
|
import gtsam
|
||||||
|
|
||||||
T = TypeVar('T')
|
|
||||||
|
|
||||||
|
def optimize(optimizer, check_convergence, hook):
|
||||||
def optimize(optimizer: T, check_convergence, hook):
|
|
||||||
""" Given an optimizer and a convergence check, iterate until convergence.
|
""" Given an optimizer and a convergence check, iterate until convergence.
|
||||||
After each iteration, hook(optimizer, error) is called.
|
After each iteration, hook(optimizer, error) is called.
|
||||||
After the function, use values and errors to get the result.
|
After the function, use values and errors to get the result.
|
||||||
|
@ -36,8 +32,8 @@ def optimize(optimizer: T, check_convergence, hook):
|
||||||
current_error = new_error
|
current_error = new_error
|
||||||
|
|
||||||
|
|
||||||
def gtsam_optimize(optimizer: NonlinearOptimizer,
|
def gtsam_optimize(optimizer,
|
||||||
params: NonlinearOptimizerParams,
|
params,
|
||||||
hook):
|
hook):
|
||||||
""" Given an optimizer and params, iterate until convergence.
|
""" Given an optimizer and params, iterate until convergence.
|
||||||
After each iteration, hook(optimizer) is called.
|
After each iteration, hook(optimizer) is called.
|
||||||
|
@ -50,5 +46,7 @@ def gtsam_optimize(optimizer: NonlinearOptimizer,
|
||||||
def check_convergence(optimizer, current_error, new_error):
|
def check_convergence(optimizer, current_error, new_error):
|
||||||
return (optimizer.iterations() >= params.getMaxIterations()) or (
|
return (optimizer.iterations() >= params.getMaxIterations()) or (
|
||||||
gtsam.checkConvergence(params.getRelativeErrorTol(), params.getAbsoluteErrorTol(), params.getErrorTol(),
|
gtsam.checkConvergence(params.getRelativeErrorTol(), params.getAbsoluteErrorTol(), params.getErrorTol(),
|
||||||
current_error, new_error))
|
current_error, new_error)) or (
|
||||||
|
isinstance(optimizer, gtsam.LevenbergMarquardtOptimizer) and optimizer.lambda_() > params.getlambdaUpperBound())
|
||||||
optimize(optimizer, check_convergence, hook)
|
optimize(optimizer, check_convergence, hook)
|
||||||
|
return optimizer.values()
|
||||||
|
|
|
@ -4,11 +4,11 @@ include(GtsamCythonWrap)
|
||||||
# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
|
# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
|
||||||
# and eigency's cython pxd headers can be found when cythonizing gtsam
|
# and eigency's cython pxd headers can be found when cythonizing gtsam
|
||||||
file(COPY "." DESTINATION ".")
|
file(COPY "." DESTINATION ".")
|
||||||
set(OUTPUT_DIR "${PROJECT_BINARY_DIR}/cython/gtsam_eigency")
|
set(OUTPUT_DIR "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency")
|
||||||
set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
|
set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
|
||||||
|
|
||||||
# This is to make the build/cython/gtsam_eigency folder a python package
|
# This is to make the build/cython/gtsam_eigency folder a python package
|
||||||
configure_file(__init__.py.in ${PROJECT_BINARY_DIR}/cython/gtsam_eigency/__init__.py)
|
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
|
||||||
|
|
||||||
# include eigency headers
|
# include eigency headers
|
||||||
include_directories(${EIGENCY_INCLUDE_DIR})
|
include_directories(${EIGENCY_INCLUDE_DIR})
|
||||||
|
@ -16,8 +16,8 @@ include_directories(${EIGENCY_INCLUDE_DIR})
|
||||||
# Cythonize and build eigency
|
# Cythonize and build eigency
|
||||||
message(STATUS "Cythonize and build eigency")
|
message(STATUS "Cythonize and build eigency")
|
||||||
# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
|
# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
|
||||||
# a part of the gtsam_eigency package and generate the function call import_gtsam_igency__conversions()
|
# a part of the gtsam_eigency package and generate the function call import_gtsam_eigency__conversions()
|
||||||
# in conversions_api.h correctly!!!
|
# in conversions_api.h correctly!
|
||||||
cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
|
cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
|
||||||
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
||||||
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
|
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
|
||||||
|
@ -37,13 +37,6 @@ add_dependencies(cythonize_eigency_core cythonize_eigency_conversions)
|
||||||
add_custom_target(cythonize_eigency)
|
add_custom_target(cythonize_eigency)
|
||||||
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
|
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
|
||||||
|
|
||||||
# install
|
if(TARGET ${python_install_target})
|
||||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
add_dependencies(${python_install_target} cythonize_eigency)
|
||||||
DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}"
|
endif()
|
||||||
PATTERN "CMakeLists.txt" EXCLUDE
|
|
||||||
PATTERN "__init__.py.in" EXCLUDE)
|
|
||||||
install(TARGETS cythonize_eigency_core cythonize_eigency_conversions
|
|
||||||
DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency")
|
|
||||||
install(FILES ${OUTPUT_DIR}/conversions_api.h DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
|
|
||||||
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
|
|
||||||
install(FILES ${OUTPUT_DIR}/__init__.py DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
|
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
Cython>=0.25.2
|
Cython>=0.25.2
|
||||||
backports_abc>=0.5
|
backports_abc>=0.5
|
||||||
numpy>=1.12.0
|
numpy>=1.11.0
|
||||||
|
|
|
@ -7,6 +7,22 @@ except ImportError:
|
||||||
|
|
||||||
packages = find_packages()
|
packages = find_packages()
|
||||||
|
|
||||||
|
package_data = {
|
||||||
|
package:
|
||||||
|
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')]
|
||||||
|
for package in packages
|
||||||
|
}
|
||||||
|
|
||||||
|
cython_install_requirements = open("${CYTHON_INSTALL_REQUIREMENTS_FILE}").readlines()
|
||||||
|
|
||||||
|
install_requires = [line.strip() \
|
||||||
|
for line in cython_install_requirements \
|
||||||
|
if len(line.strip()) > 0 and not line.strip().startswith('#')
|
||||||
|
]
|
||||||
|
|
||||||
|
# Cleaner to read in the contents rather than copy them over.
|
||||||
|
readme_contents = open("${PROJECT_SOURCE_DIR}/README.md").read()
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='gtsam',
|
name='gtsam',
|
||||||
description='Georgia Tech Smoothing And Mapping library',
|
description='Georgia Tech Smoothing And Mapping library',
|
||||||
|
@ -16,7 +32,7 @@ setup(
|
||||||
author_email='frank.dellaert@gtsam.org',
|
author_email='frank.dellaert@gtsam.org',
|
||||||
license='Simplified BSD license',
|
license='Simplified BSD license',
|
||||||
keywords='slam sam robotics localization mapping optimization',
|
keywords='slam sam robotics localization mapping optimization',
|
||||||
long_description='''${README_CONTENTS}''',
|
long_description=readme_contents,
|
||||||
long_description_content_type='text/markdown',
|
long_description_content_type='text/markdown',
|
||||||
python_requires='>=2.7',
|
python_requires='>=2.7',
|
||||||
# https://pypi.org/pypi?%3Aaction=list_classifiers
|
# https://pypi.org/pypi?%3Aaction=list_classifiers
|
||||||
|
@ -34,11 +50,6 @@ setup(
|
||||||
],
|
],
|
||||||
|
|
||||||
packages=packages,
|
packages=packages,
|
||||||
package_data={package:
|
package_data=package_data,
|
||||||
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')]
|
install_requires=install_requires
|
||||||
for package in packages
|
|
||||||
},
|
|
||||||
install_requires=[line.strip() for line in '''
|
|
||||||
${CYTHON_INSTALL_REQUIREMENTS}
|
|
||||||
'''.splitlines() if len(line.strip()) > 0 and not line.strip().startswith('#')]
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
# How to build a GTSAM debian package
|
|
||||||
|
|
||||||
To use the ``debuild`` command, install the ``devscripts`` package
|
|
||||||
|
|
||||||
sudo apt install devscripts
|
|
||||||
|
|
||||||
Change into the gtsam directory, then run:
|
|
||||||
|
|
||||||
debuild -us -uc -j4
|
|
||||||
|
|
||||||
Adjust the ``-j4`` depending on how many CPUs you want to build on in
|
|
||||||
parallel.
|
|
|
@ -1,5 +0,0 @@
|
||||||
gtsam (4.0.0-1berndpfrommer) bionic; urgency=medium
|
|
||||||
|
|
||||||
* initial release
|
|
||||||
|
|
||||||
-- Bernd Pfrommer <bernd.pfrommer@gmail.com> Wed, 18 Jul 2018 20:36:44 -0400
|
|
|
@ -1 +0,0 @@
|
||||||
9
|
|
|
@ -1,15 +0,0 @@
|
||||||
Source: gtsam
|
|
||||||
Section: libs
|
|
||||||
Priority: optional
|
|
||||||
Maintainer: Frank Dellaert <frank@cc.gatech.edu>
|
|
||||||
Uploaders: Jose Luis Blanco Claraco <joseluisblancoc@gmail.com>, Bernd Pfrommer <bernd.pfrommer@gmail.com>
|
|
||||||
Build-Depends: cmake, libboost-all-dev (>= 1.58), libeigen3-dev, libtbb-dev, debhelper (>=9)
|
|
||||||
Standards-Version: 3.9.7
|
|
||||||
Homepage: https://github.com/borglab/gtsam
|
|
||||||
Vcs-Browser: https://github.com/borglab/gtsam
|
|
||||||
|
|
||||||
Package: libgtsam-dev
|
|
||||||
Architecture: any
|
|
||||||
Depends: ${shlibs:Depends}, ${misc:Depends}, libboost-serialization-dev, libboost-system-dev, libboost-filesystem-dev, libboost-thread-dev, libboost-program-options-dev, libboost-date-time-dev, libboost-timer-dev, libboost-chrono-dev, libboost-regex-dev
|
|
||||||
Description: Georgia Tech Smoothing and Mapping Library
|
|
||||||
gtsam: Georgia Tech Smoothing and Mapping library for SLAM type applications
|
|
|
@ -1,15 +0,0 @@
|
||||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
|
||||||
Upstream-Name: gtsam
|
|
||||||
Source: https://bitbucket.org/gtborg/gtsam.git
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: 2017, Frank Dellaert
|
|
||||||
License: BSD
|
|
||||||
|
|
||||||
Files: gtsam/3rdparty/CCOLAMD/*
|
|
||||||
Copyright: 2005-2011, Univ. of Florida. Authors: Timothy A. Davis, Sivasankaran Rajamanickam, and Stefan Larimore. Closely based on COLAMD by Davis, Stefan Larimore, in collaboration with Esmond Ng, and John Gilbert. http://www.cise.ufl.edu/research/sparse
|
|
||||||
License: GNU LESSER GENERAL PUBLIC LICENSE
|
|
||||||
|
|
||||||
Files: gtsam/3rdparty/Eigen/*
|
|
||||||
Copyright: 2017, Multiple Authors
|
|
||||||
License: MPL2
|
|
|
@ -1,29 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
# See debhelper(7) (uncomment to enable)
|
|
||||||
# output every command that modifies files on the build system.
|
|
||||||
export DH_VERBOSE = 1
|
|
||||||
|
|
||||||
# Makefile target name for running unit tests:
|
|
||||||
GTSAM_TEST_TARGET = check
|
|
||||||
|
|
||||||
# see FEATURE AREAS in dpkg-buildflags(1)
|
|
||||||
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
|
|
||||||
|
|
||||||
# see ENVIRONMENT in dpkg-buildflags(1)
|
|
||||||
# package maintainers to append CFLAGS
|
|
||||||
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
|
|
||||||
# package maintainers to append LDFLAGS
|
|
||||||
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
|
|
||||||
|
|
||||||
%:
|
|
||||||
dh $@ --parallel
|
|
||||||
|
|
||||||
# dh_make generated override targets
|
|
||||||
# This is example for Cmake (See https://bugs.debian.org/641051 )
|
|
||||||
override_dh_auto_configure:
|
|
||||||
dh_auto_configure -- -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DGTSAM_BUILD_TESTS=ON -DGTSAM_BUILD_WRAP=OFF -DGTSAM_BUILD_DOCS=OFF -DGTSAM_INSTALL_CPPUNITLITE=OFF -DGTSAM_INSTALL_GEOGRAPHICLIB=OFF -DGTSAM_BUILD_TYPE_POSTFIXES=OFF -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF
|
|
||||||
|
|
||||||
override_dh_auto_test-arch:
|
|
||||||
# Tests for arch-dependent :
|
|
||||||
echo "[override_dh_auto_test-arch]"
|
|
||||||
dh_auto_build -O--buildsystem=cmake -- $(GTSAM_TEST_TARGET)
|
|
|
@ -1 +0,0 @@
|
||||||
3.0 (quilt)
|
|
|
@ -2291,15 +2291,11 @@ uncalibration
|
||||||
used in the residual).
|
used in the residual).
|
||||||
\end_layout
|
\end_layout
|
||||||
|
|
||||||
\begin_layout Standard
|
|
||||||
\begin_inset Note Note
|
|
||||||
status collapsed
|
|
||||||
|
|
||||||
\begin_layout Section
|
\begin_layout Section
|
||||||
Noise models of prior factors
|
Noise models of prior factors
|
||||||
\end_layout
|
\end_layout
|
||||||
|
|
||||||
\begin_layout Plain Layout
|
\begin_layout Standard
|
||||||
The simplest way to describe noise models is by an example.
|
The simplest way to describe noise models is by an example.
|
||||||
Let's take a prior factor on a 3D pose
|
Let's take a prior factor on a 3D pose
|
||||||
\begin_inset Formula $x\in\SE 3$
|
\begin_inset Formula $x\in\SE 3$
|
||||||
|
@ -2353,7 +2349,7 @@ e\left(x\right)=\norm{h\left(x\right)}_{\Sigma}^{2}=h\left(x\right)^{\t}\Sigma^{
|
||||||
useful answer out quickly ]
|
useful answer out quickly ]
|
||||||
\end_layout
|
\end_layout
|
||||||
|
|
||||||
\begin_layout Plain Layout
|
\begin_layout Standard
|
||||||
The density induced by a noise model on the prior factor is Gaussian in
|
The density induced by a noise model on the prior factor is Gaussian in
|
||||||
the tangent space about the linearization point.
|
the tangent space about the linearization point.
|
||||||
Suppose that the pose is linearized at
|
Suppose that the pose is linearized at
|
||||||
|
@ -2431,7 +2427,7 @@ Here we see that the update
|
||||||
.
|
.
|
||||||
\end_layout
|
\end_layout
|
||||||
|
|
||||||
\begin_layout Plain Layout
|
\begin_layout Standard
|
||||||
This means that to draw random pose samples, we actually draw random samples
|
This means that to draw random pose samples, we actually draw random samples
|
||||||
of
|
of
|
||||||
\begin_inset Formula $\delta x$
|
\begin_inset Formula $\delta x$
|
||||||
|
@ -2456,7 +2452,7 @@ This means that to draw random pose samples, we actually draw random samples
|
||||||
Noise models of between factors
|
Noise models of between factors
|
||||||
\end_layout
|
\end_layout
|
||||||
|
|
||||||
\begin_layout Plain Layout
|
\begin_layout Standard
|
||||||
The noise model of a BetweenFactor is a bit more complicated.
|
The noise model of a BetweenFactor is a bit more complicated.
|
||||||
The unwhitened error is
|
The unwhitened error is
|
||||||
\begin_inset Formula
|
\begin_inset Formula
|
||||||
|
@ -2516,11 +2512,6 @@ e\left(\delta x_{1}\right) & \approx\norm{\log\left(z^{-1}\left(x_{1}\exp\delta
|
||||||
\end_inset
|
\end_inset
|
||||||
|
|
||||||
|
|
||||||
\end_layout
|
|
||||||
|
|
||||||
\end_inset
|
|
||||||
|
|
||||||
|
|
||||||
\end_layout
|
\end_layout
|
||||||
|
|
||||||
\end_body
|
\end_body
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,21 @@
|
||||||
|
# Instructions
|
||||||
|
|
||||||
|
Build all docker images, in order:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
(cd ubuntu-boost-tbb && ./build.sh)
|
||||||
|
(cd ubuntu-gtsam && ./build.sh)
|
||||||
|
(cd ubuntu-gtsam-python && ./build.sh)
|
||||||
|
(cd ubuntu-gtsam-python-vnc && ./build.sh)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then launch with:
|
||||||
|
|
||||||
|
docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic
|
||||||
|
|
||||||
|
Then open a remote VNC X client, for example:
|
||||||
|
|
||||||
|
sudo apt-get install tigervnc-viewer
|
||||||
|
xtigervncviewer :5900
|
||||||
|
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Get the base Ubuntu image from Docker Hub
|
|
||||||
FROM ubuntu:bionic
|
|
||||||
|
|
||||||
# Update apps on the base image
|
|
||||||
RUN apt-get -y update && apt-get install -y
|
|
||||||
|
|
||||||
# Install C++
|
|
||||||
RUN apt-get -y install build-essential
|
|
||||||
|
|
||||||
# Install boost and cmake
|
|
||||||
RUN apt-get -y install libboost-all-dev cmake
|
|
||||||
|
|
||||||
# Install TBB
|
|
||||||
RUN apt-get -y install libtbb-dev
|
|
||||||
|
|
||||||
# Install latest Eigen
|
|
||||||
RUN apt-get install -y libeigen3-dev
|
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
# Basic Ubuntu 18.04 image with Boost and TBB installed. To be used for building further downstream packages.
|
||||||
|
|
||||||
|
# Get the base Ubuntu image from Docker Hub
|
||||||
|
FROM ubuntu:bionic
|
||||||
|
|
||||||
|
# Disable GUI prompts
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
|
||||||
|
# Update apps on the base image
|
||||||
|
RUN apt-get -y update && apt-get -y install
|
||||||
|
|
||||||
|
# Install C++
|
||||||
|
RUN apt-get -y install build-essential apt-utils
|
||||||
|
|
||||||
|
# Install boost and cmake
|
||||||
|
RUN apt-get -y install libboost-all-dev cmake
|
||||||
|
|
||||||
|
# Install TBB
|
||||||
|
RUN apt-get -y install libtbb-dev
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic .
|
|
@ -0,0 +1,20 @@
|
||||||
|
# This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction.
|
||||||
|
|
||||||
|
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||||
|
FROM dellaert/ubuntu-gtsam-python:bionic
|
||||||
|
|
||||||
|
# Things needed to get a python GUI
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
RUN apt install -y python-tk
|
||||||
|
RUN python3 -m pip install matplotlib
|
||||||
|
|
||||||
|
# Install a VNC X-server, Frame buffer, and windows manager
|
||||||
|
RUN apt install -y x11vnc xvfb fluxbox
|
||||||
|
|
||||||
|
# Finally, install wmctrl needed for bootstrap script
|
||||||
|
RUN apt install -y wmctrl
|
||||||
|
|
||||||
|
# Copy bootstrap script and make sure it runs
|
||||||
|
COPY bootstrap.sh /
|
||||||
|
|
||||||
|
CMD '/bootstrap.sh'
|
|
@ -0,0 +1,111 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Based on: http://www.richud.com/wiki/Ubuntu_Fluxbox_GUI_with_x11vnc_and_Xvfb
|
||||||
|
|
||||||
|
main() {
|
||||||
|
log_i "Starting xvfb virtual display..."
|
||||||
|
launch_xvfb
|
||||||
|
log_i "Starting window manager..."
|
||||||
|
launch_window_manager
|
||||||
|
log_i "Starting VNC server..."
|
||||||
|
run_vnc_server
|
||||||
|
}
|
||||||
|
|
||||||
|
launch_xvfb() {
|
||||||
|
local xvfbLockFilePath="/tmp/.X1-lock"
|
||||||
|
if [ -f "${xvfbLockFilePath}" ]
|
||||||
|
then
|
||||||
|
log_i "Removing xvfb lock file '${xvfbLockFilePath}'..."
|
||||||
|
if ! rm -v "${xvfbLockFilePath}"
|
||||||
|
then
|
||||||
|
log_e "Failed to remove xvfb lock file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set defaults if the user did not specify envs.
|
||||||
|
export DISPLAY=${XVFB_DISPLAY:-:1}
|
||||||
|
local screen=${XVFB_SCREEN:-0}
|
||||||
|
local resolution=${XVFB_RESOLUTION:-1280x960x24}
|
||||||
|
local timeout=${XVFB_TIMEOUT:-5}
|
||||||
|
|
||||||
|
# Start and wait for either Xvfb to be fully up or we hit the timeout.
|
||||||
|
Xvfb ${DISPLAY} -screen ${screen} ${resolution} &
|
||||||
|
local loopCount=0
|
||||||
|
until xdpyinfo -display ${DISPLAY} > /dev/null 2>&1
|
||||||
|
do
|
||||||
|
loopCount=$((loopCount+1))
|
||||||
|
sleep 1
|
||||||
|
if [ ${loopCount} -gt ${timeout} ]
|
||||||
|
then
|
||||||
|
log_e "xvfb failed to start"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
launch_window_manager() {
|
||||||
|
local timeout=${XVFB_TIMEOUT:-5}
|
||||||
|
|
||||||
|
# Start and wait for either fluxbox to be fully up or we hit the timeout.
|
||||||
|
fluxbox &
|
||||||
|
local loopCount=0
|
||||||
|
until wmctrl -m > /dev/null 2>&1
|
||||||
|
do
|
||||||
|
loopCount=$((loopCount+1))
|
||||||
|
sleep 1
|
||||||
|
if [ ${loopCount} -gt ${timeout} ]
|
||||||
|
then
|
||||||
|
log_e "fluxbox failed to start"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
run_vnc_server() {
|
||||||
|
local passwordArgument='-nopw'
|
||||||
|
|
||||||
|
if [ -n "${VNC_SERVER_PASSWORD}" ]
|
||||||
|
then
|
||||||
|
local passwordFilePath="${HOME}/.x11vnc.pass"
|
||||||
|
if ! x11vnc -storepasswd "${VNC_SERVER_PASSWORD}" "${passwordFilePath}"
|
||||||
|
then
|
||||||
|
log_e "Failed to store x11vnc password"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
passwordArgument=-"-rfbauth ${passwordFilePath}"
|
||||||
|
log_i "The VNC server will ask for a password"
|
||||||
|
else
|
||||||
|
log_w "The VNC server will NOT ask for a password"
|
||||||
|
fi
|
||||||
|
|
||||||
|
x11vnc -ncache 10 -ncache_cr -display ${DISPLAY} -forever ${passwordArgument} &
|
||||||
|
wait $!
|
||||||
|
}
|
||||||
|
|
||||||
|
log_i() {
|
||||||
|
log "[INFO] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_w() {
|
||||||
|
log "[WARN] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_e() {
|
||||||
|
log "[ERROR] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log() {
|
||||||
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] ${@}"
|
||||||
|
}
|
||||||
|
|
||||||
|
control_c() {
|
||||||
|
echo ""
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
|
trap control_c SIGINT SIGTERM SIGHUP
|
||||||
|
|
||||||
|
main
|
||||||
|
|
||||||
|
exit
|
|
@ -0,0 +1,4 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
# Needs to be run in docker/ubuntu-gtsam-python-vnc directory
|
||||||
|
docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic .
|
|
@ -0,0 +1,5 @@
|
||||||
|
# After running this script, connect VNC client to 0.0.0.0:5900
|
||||||
|
docker run -it \
|
||||||
|
--workdir="/usr/src/gtsam" \
|
||||||
|
-p 5900:5900 \
|
||||||
|
dellaert/ubuntu-gtsam-python-vnc:bionic
|
|
@ -0,0 +1,31 @@
|
||||||
|
# GTSAM Ubuntu image with Python wrapper support.
|
||||||
|
|
||||||
|
# Get the base Ubuntu/GTSAM image from Docker Hub
|
||||||
|
FROM dellaert/ubuntu-gtsam:bionic
|
||||||
|
|
||||||
|
# Install pip
|
||||||
|
RUN apt-get install -y python3-pip python3-dev
|
||||||
|
|
||||||
|
# Install python wrapper requirements
|
||||||
|
RUN python3 -m pip install -U -r /usr/src/gtsam/cython/requirements.txt
|
||||||
|
|
||||||
|
# Run cmake again, now with cython toolbox on
|
||||||
|
WORKDIR /usr/src/gtsam/build
|
||||||
|
RUN cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGTSAM_WITH_EIGEN_MKL=OFF \
|
||||||
|
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TESTS=OFF \
|
||||||
|
-DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \
|
||||||
|
-DGTSAM_PYTHON_VERSION=3\
|
||||||
|
..
|
||||||
|
|
||||||
|
# Build again, as ubuntu-gtsam image cleaned
|
||||||
|
RUN make -j4 install && make clean
|
||||||
|
|
||||||
|
# Needed to run python wrapper:
|
||||||
|
RUN echo 'export PYTHONPATH=/usr/local/cython/:$PYTHONPATH' >> /root/.bashrc
|
||||||
|
|
||||||
|
# Run bash
|
||||||
|
CMD ["bash"]
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic .
|
|
@ -0,0 +1,36 @@
|
||||||
|
# Ubuntu image with GTSAM installed. Configured with Boost and TBB support.
|
||||||
|
|
||||||
|
# Get the base Ubuntu image from Docker Hub
|
||||||
|
FROM dellaert/ubuntu-boost-tbb:bionic
|
||||||
|
|
||||||
|
# Install git
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y git
|
||||||
|
|
||||||
|
# Install compiler
|
||||||
|
RUN apt-get install -y build-essential
|
||||||
|
|
||||||
|
# Clone GTSAM (develop branch)
|
||||||
|
WORKDIR /usr/src/
|
||||||
|
RUN git clone --single-branch --branch develop https://github.com/borglab/gtsam.git
|
||||||
|
|
||||||
|
# Change to build directory. Will be created automatically.
|
||||||
|
WORKDIR /usr/src/gtsam/build
|
||||||
|
# Run cmake
|
||||||
|
RUN cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DGTSAM_WITH_EIGEN_MKL=OFF \
|
||||||
|
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \
|
||||||
|
-DGTSAM_BUILD_TESTS=OFF \
|
||||||
|
-DGTSAM_INSTALL_CYTHON_TOOLBOX=OFF \
|
||||||
|
..
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN make -j4 install && make clean
|
||||||
|
|
||||||
|
# Needed to link with GTSAM
|
||||||
|
RUN echo 'export LD_LIBRARY_PATH=/usr/local/lib:LD_LIBRARY_PATH' >> /root/.bashrc
|
||||||
|
|
||||||
|
# Run bash
|
||||||
|
CMD ["bash"]
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Build command for Docker image
|
||||||
|
# TODO(dellaert): use docker compose and/or cmake
|
||||||
|
docker build --no-cache -t dellaert/ubuntu-gtsam:bionic .
|
|
@ -1,7 +1,4 @@
|
||||||
set (excluded_examples
|
set (excluded_examples
|
||||||
DiscreteBayesNet_FG.cpp
|
|
||||||
UGM_chain.cpp
|
|
||||||
UGM_small.cpp
|
|
||||||
elaboratePoint2KalmanFilter.cpp
|
elaboratePoint2KalmanFilter.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109
|
||||||
|
VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809
|
||||||
|
VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403
|
||||||
|
EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
|
||||||
|
EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
|
||||||
|
EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
|
|
@ -0,0 +1,11 @@
|
||||||
|
VERTEX_SE3:QUAT 0 0 0 0 0 0 0 1
|
||||||
|
VERTEX_SE3:QUAT 1 0 0 0 0 0 0 1
|
||||||
|
VERTEX_SE3:QUAT 2 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
|
||||||
|
VERTEX_SE3:QUAT 3 0 0 0 -0.00499994 -0.00499994 -0.00499994 0.999963
|
||||||
|
VERTEX_SE3:QUAT 4 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
|
||||||
|
EDGE_SE3:QUAT 1 2 1 2 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 2 3 -3.26795e-07 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 3 4 1 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 3 1 6.9282e-07 2 0 0 0 1 1.73205e-07 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 1 4 -1 1 0 0 0 -0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||||
|
EDGE_SE3:QUAT 0 1 0 0 0 0 0 0 1 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
|
@ -0,0 +1,83 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file DiscreteBayesNetExample.cpp
|
||||||
|
* @brief Discrete Bayes Net example with famous Asia Bayes Network
|
||||||
|
* @author Frank Dellaert
|
||||||
|
* @date JULY 10, 2020
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
|
||||||
|
#include <iomanip>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
int main(int argc, char **argv) {
|
||||||
|
DiscreteBayesNet asia;
|
||||||
|
DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2),
|
||||||
|
Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
|
||||||
|
asia.add(Asia % "99/1");
|
||||||
|
asia.add(Smoking % "50/50");
|
||||||
|
|
||||||
|
asia.add(Tuberculosis | Asia = "99/1 95/5");
|
||||||
|
asia.add(LungCancer | Smoking = "99/1 90/10");
|
||||||
|
asia.add(Bronchitis | Smoking = "70/30 40/60");
|
||||||
|
|
||||||
|
asia.add((Either | Tuberculosis, LungCancer) = "F T T T");
|
||||||
|
|
||||||
|
asia.add(XRay | Either = "95/5 2/98");
|
||||||
|
asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9");
|
||||||
|
|
||||||
|
// print
|
||||||
|
vector<string> pretty = {"Asia", "Dyspnea", "XRay", "Tuberculosis",
|
||||||
|
"Smoking", "Either", "LungCancer", "Bronchitis"};
|
||||||
|
auto formatter = [pretty](Key key) { return pretty[key]; };
|
||||||
|
asia.print("Asia", formatter);
|
||||||
|
|
||||||
|
// Convert to factor graph
|
||||||
|
DiscreteFactorGraph fg(asia);
|
||||||
|
|
||||||
|
// Create solver and eliminate
|
||||||
|
Ordering ordering;
|
||||||
|
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
|
||||||
|
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||||
|
|
||||||
|
// solve
|
||||||
|
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||||
|
GTSAM_PRINT(*mpe);
|
||||||
|
|
||||||
|
// We can also build a Bayes tree (directed junction tree).
|
||||||
|
// The elimination order above will do fine:
|
||||||
|
auto bayesTree = fg.eliminateMultifrontal(ordering);
|
||||||
|
bayesTree->print("bayesTree", formatter);
|
||||||
|
|
||||||
|
// add evidence, we were in Asia and we have dyspnea
|
||||||
|
fg.add(Asia, "0 1");
|
||||||
|
fg.add(Dyspnea, "0 1");
|
||||||
|
|
||||||
|
// solve again, now with evidence
|
||||||
|
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||||
|
DiscreteFactor::sharedValues mpe2 = chordal2->optimize();
|
||||||
|
GTSAM_PRINT(*mpe2);
|
||||||
|
|
||||||
|
// We can also sample from it
|
||||||
|
cout << "\n10 samples:" << endl;
|
||||||
|
for (size_t i = 0; i < 10; i++) {
|
||||||
|
DiscreteFactor::sharedValues sample = chordal2->sample();
|
||||||
|
GTSAM_PRINT(*sample);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -15,105 +15,106 @@
|
||||||
* @author Abhijit
|
* @author Abhijit
|
||||||
* @date Jun 4, 2012
|
* @date Jun 4, 2012
|
||||||
*
|
*
|
||||||
* We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, p529]
|
* We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009,
|
||||||
* You may be familiar with other graphical model packages like BNT (available
|
* p529] You may be familiar with other graphical model packages like BNT
|
||||||
* at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this is used as an
|
* (available at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this
|
||||||
* example. The following demo is same as that in the above link, except that
|
* is used as an example. The following demo is same as that in the above link,
|
||||||
* everything is using GTSAM.
|
* except that everything is using GTSAM.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteSequentialSolver.h>
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
int main(int argc, char **argv) {
|
||||||
|
// Define keys and a print function
|
||||||
|
Key C(1), S(2), R(3), W(4);
|
||||||
|
auto print = [=](DiscreteFactor::sharedValues values) {
|
||||||
|
cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C])
|
||||||
|
<< " Sprinkler = " << static_cast<bool>((*values)[S])
|
||||||
|
<< " Rain = " << boolalpha << static_cast<bool>((*values)[R])
|
||||||
|
<< " WetGrass = " << static_cast<bool>((*values)[W]) << endl;
|
||||||
|
};
|
||||||
|
|
||||||
// We assume binary state variables
|
// We assume binary state variables
|
||||||
// we have 0 == "False" and 1 == "True"
|
// we have 0 == "False" and 1 == "True"
|
||||||
const size_t nrStates = 2;
|
const size_t nrStates = 2;
|
||||||
|
|
||||||
// define variables
|
// define variables
|
||||||
DiscreteKey Cloudy(1, nrStates), Sprinkler(2, nrStates), Rain(3, nrStates),
|
DiscreteKey Cloudy(C, nrStates), Sprinkler(S, nrStates), Rain(R, nrStates),
|
||||||
WetGrass(4, nrStates);
|
WetGrass(W, nrStates);
|
||||||
|
|
||||||
// create Factor Graph of the bayes net
|
// create Factor Graph of the bayes net
|
||||||
DiscreteFactorGraph graph;
|
DiscreteFactorGraph graph;
|
||||||
|
|
||||||
// add factors
|
// add factors
|
||||||
graph.add(Cloudy, "0.5 0.5"); //P(Cloudy)
|
graph.add(Cloudy, "0.5 0.5"); // P(Cloudy)
|
||||||
graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); //P(Sprinkler | Cloudy)
|
graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); // P(Sprinkler | Cloudy)
|
||||||
graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); //P(Rain | Cloudy)
|
graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); // P(Rain | Cloudy)
|
||||||
graph.add(Sprinkler & Rain & WetGrass,
|
graph.add(Sprinkler & Rain & WetGrass,
|
||||||
"1 0 0.1 0.9 0.1 0.9 0.001 0.99"); //P(WetGrass | Sprinkler, Rain)
|
"1 0 0.1 0.9 0.1 0.9 0.001 0.99"); // P(WetGrass | Sprinkler, Rain)
|
||||||
|
|
||||||
// Alternatively we can also create a DiscreteBayesNet, add DiscreteConditional
|
// Alternatively we can also create a DiscreteBayesNet, add
|
||||||
// factors and create a FactorGraph from it. (See testDiscreteBayesNet.cpp)
|
// DiscreteConditional factors and create a FactorGraph from it. (See
|
||||||
|
// testDiscreteBayesNet.cpp)
|
||||||
|
|
||||||
// Since this is a relatively small distribution, we can as well print
|
// Since this is a relatively small distribution, we can as well print
|
||||||
// the whole distribution..
|
// the whole distribution..
|
||||||
cout << "Distribution of Example: " << endl;
|
cout << "Distribution of Example: " << endl;
|
||||||
cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10)
|
cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10)
|
||||||
<< "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
|
<< "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
|
||||||
<< endl;
|
<< endl;
|
||||||
for (size_t a = 0; a < nrStates; a++)
|
for (size_t a = 0; a < nrStates; a++)
|
||||||
for (size_t m = 0; m < nrStates; m++)
|
for (size_t m = 0; m < nrStates; m++)
|
||||||
for (size_t h = 0; h < nrStates; h++)
|
for (size_t h = 0; h < nrStates; h++)
|
||||||
for (size_t c = 0; c < nrStates; c++) {
|
for (size_t c = 0; c < nrStates; c++) {
|
||||||
DiscreteFactor::Values values;
|
DiscreteFactor::Values values;
|
||||||
values[Cloudy.first] = c;
|
values[C] = c;
|
||||||
values[Sprinkler.first] = h;
|
values[S] = h;
|
||||||
values[Rain.first] = m;
|
values[R] = m;
|
||||||
values[WetGrass.first] = a;
|
values[W] = a;
|
||||||
double prodPot = graph(values);
|
double prodPot = graph(values);
|
||||||
cout << boolalpha << setw(8) << (bool) c << setw(14)
|
cout << setw(8) << static_cast<bool>(c) << setw(14)
|
||||||
<< (bool) h << setw(12) << (bool) m << setw(13)
|
<< static_cast<bool>(h) << setw(12) << static_cast<bool>(m)
|
||||||
<< (bool) a << setw(16) << prodPot << endl;
|
<< setw(13) << static_cast<bool>(a) << setw(16) << prodPot
|
||||||
|
<< endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// "Most Probable Explanation", i.e., configuration with largest value
|
// "Most Probable Explanation", i.e., configuration with largest value
|
||||||
DiscreteSequentialSolver solver(graph);
|
DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize();
|
||||||
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
|
cout << "\nMost Probable Explanation (MPE):" << endl;
|
||||||
cout <<"\nMost Probable Explanation (MPE):" << endl;
|
print(mpe);
|
||||||
cout << boolalpha << "Cloudy = " << (bool)(*optimalDecoding)[Cloudy.first]
|
|
||||||
<< " Sprinkler = " << (bool)(*optimalDecoding)[Sprinkler.first]
|
|
||||||
<< " Rain = " << boolalpha << (bool)(*optimalDecoding)[Rain.first]
|
|
||||||
<< " WetGrass = " << (bool)(*optimalDecoding)[WetGrass.first]<< endl;
|
|
||||||
|
|
||||||
|
// "Inference" We show an inference query like: probability that the Sprinkler
|
||||||
|
// was on; given that the grass is wet i.e. P( S | C=0) = ?
|
||||||
|
|
||||||
// "Inference" We show an inference query like: probability that the Sprinkler was on;
|
// add evidence that it is not Cloudy
|
||||||
// given that the grass is wet i.e. P( S | W=1) =?
|
graph.add(Cloudy, "1 0");
|
||||||
cout << "\nInference Query: Probability of Sprinkler being on given Grass is Wet" << endl;
|
|
||||||
|
|
||||||
// Method 1: we can compute the joint marginal P(S,W) and from that we can compute
|
// solve again, now with evidence
|
||||||
// P(S | W=1) = P(S,W=1)/P(W=1) We do this in following three steps..
|
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||||
|
DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize();
|
||||||
|
|
||||||
//Step1: Compute P(S,W)
|
cout << "\nMPE given C=0:" << endl;
|
||||||
DiscreteFactorGraph jointFG;
|
print(mpe_with_evidence);
|
||||||
jointFG = *solver.jointFactorGraph(DiscreteKeys(Sprinkler & WetGrass).indices());
|
|
||||||
DecisionTreeFactor probSW = jointFG.product();
|
|
||||||
|
|
||||||
//Step2: Compute P(W)
|
// we can also calculate arbitrary marginals:
|
||||||
DiscreteFactor::shared_ptr probW = solver.marginalFactor(WetGrass.first);
|
DiscreteMarginals marginals(graph);
|
||||||
|
cout << "\nP(S=1|C=0):" << marginals.marginalProbabilities(Sprinkler)[1]
|
||||||
//Step3: Computer P(S | W=1) = P(S,W=1)/P(W=1)
|
<< endl;
|
||||||
DiscreteFactor::Values values;
|
cout << "\nP(R=0|C=0):" << marginals.marginalProbabilities(Rain)[0] << endl;
|
||||||
values[WetGrass.first] = 1;
|
cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1]
|
||||||
|
<< endl;
|
||||||
//print P(S=0|W=1)
|
|
||||||
values[Sprinkler.first] = 0;
|
|
||||||
cout << "P(S=0|W=1) = " << probSW(values)/(*probW)(values) << endl;
|
|
||||||
|
|
||||||
//print P(S=1|W=1)
|
|
||||||
values[Sprinkler.first] = 1;
|
|
||||||
cout << "P(S=1|W=1) = " << probSW(values)/(*probW)(values) << endl;
|
|
||||||
|
|
||||||
// TODO: Method 2 : One way is to modify the factor graph to
|
|
||||||
// incorporate the evidence node and compute the marginal
|
|
||||||
// TODO: graph.addEvidence(Cloudy,0);
|
|
||||||
|
|
||||||
|
// We can also sample from it
|
||||||
|
cout << "\n10 samples:" << endl;
|
||||||
|
for (size_t i = 0; i < 10; i++) {
|
||||||
|
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||||
|
print(sample);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file DiscreteBayesNetExample.cpp
|
||||||
|
* @brief Hidden Markov Model example, discrete.
|
||||||
|
* @author Frank Dellaert
|
||||||
|
* @date July 12, 2020
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
|
||||||
|
#include <iomanip>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
int main(int argc, char **argv) {
|
||||||
|
const int nrNodes = 4;
|
||||||
|
const size_t nrStates = 3;
|
||||||
|
|
||||||
|
// Define variables as well as ordering
|
||||||
|
Ordering ordering;
|
||||||
|
vector<DiscreteKey> keys;
|
||||||
|
for (int k = 0; k < nrNodes; k++) {
|
||||||
|
DiscreteKey key_i(k, nrStates);
|
||||||
|
keys.push_back(key_i);
|
||||||
|
ordering.emplace_back(k);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create HMM as a DiscreteBayesNet
|
||||||
|
DiscreteBayesNet hmm;
|
||||||
|
|
||||||
|
// Define backbone
|
||||||
|
const string transition = "8/1/1 1/8/1 1/1/8";
|
||||||
|
for (int k = 1; k < nrNodes; k++) {
|
||||||
|
hmm.add(keys[k] | keys[k - 1] = transition);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add some measurements, not needed for all time steps!
|
||||||
|
hmm.add(keys[0] % "7/2/1");
|
||||||
|
hmm.add(keys[1] % "1/9/0");
|
||||||
|
hmm.add(keys.back() % "5/4/1");
|
||||||
|
|
||||||
|
// print
|
||||||
|
hmm.print("HMM");
|
||||||
|
|
||||||
|
// Convert to factor graph
|
||||||
|
DiscreteFactorGraph factorGraph(hmm);
|
||||||
|
|
||||||
|
// Create solver and eliminate
|
||||||
|
// This will create a DAG ordered with arrow of time reversed
|
||||||
|
DiscreteBayesNet::shared_ptr chordal =
|
||||||
|
factorGraph.eliminateSequential(ordering);
|
||||||
|
chordal->print("Eliminated");
|
||||||
|
|
||||||
|
// solve
|
||||||
|
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||||
|
GTSAM_PRINT(*mpe);
|
||||||
|
|
||||||
|
// We can also sample from it
|
||||||
|
cout << "\n10 samples:" << endl;
|
||||||
|
for (size_t k = 0; k < 10; k++) {
|
||||||
|
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||||
|
GTSAM_PRINT(*sample);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or compute the marginals. This re-eliminates the FG into a Bayes tree
|
||||||
|
cout << "\nComputing Node Marginals .." << endl;
|
||||||
|
DiscreteMarginals marginals(factorGraph);
|
||||||
|
for (int k = 0; k < nrNodes; k++) {
|
||||||
|
Vector margProbs = marginals.marginalProbabilities(keys[k]);
|
||||||
|
stringstream ss;
|
||||||
|
ss << "marginal " << k;
|
||||||
|
print(margProbs, ss.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(frank): put in the glue to have DiscreteMarginals produce *arbitrary*
|
||||||
|
// joints efficiently, by the Bayes tree shortcut magic. All the code is there
|
||||||
|
// but it's not yet connected.
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -0,0 +1,359 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file IMUKittiExampleGPS
|
||||||
|
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
|
||||||
|
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics
|
||||||
|
*/
|
||||||
|
|
||||||
|
// GTSAM related includes.
|
||||||
|
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||||
|
#include <gtsam/navigation/GPSFactor.h>
|
||||||
|
#include <gtsam/navigation/ImuFactor.h>
|
||||||
|
#include <gtsam/slam/dataset.h>
|
||||||
|
#include <gtsam/slam/BetweenFactor.h>
|
||||||
|
#include <gtsam/slam/PriorFactor.h>
|
||||||
|
#include <gtsam/nonlinear/ISAM2.h>
|
||||||
|
#include <gtsam/nonlinear/ISAM2Params.h>
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
#include <gtsam/inference/Symbol.h>
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
#include <fstream>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||||
|
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||||
|
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
|
||||||
|
|
||||||
|
struct KittiCalibration {
|
||||||
|
double body_ptx;
|
||||||
|
double body_pty;
|
||||||
|
double body_ptz;
|
||||||
|
double body_prx;
|
||||||
|
double body_pry;
|
||||||
|
double body_prz;
|
||||||
|
double accelerometer_sigma;
|
||||||
|
double gyroscope_sigma;
|
||||||
|
double integration_sigma;
|
||||||
|
double accelerometer_bias_sigma;
|
||||||
|
double gyroscope_bias_sigma;
|
||||||
|
double average_delta_t;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ImuMeasurement {
|
||||||
|
double time;
|
||||||
|
double dt;
|
||||||
|
Vector3 accelerometer;
|
||||||
|
Vector3 gyroscope; // omega
|
||||||
|
};
|
||||||
|
|
||||||
|
struct GpsMeasurement {
|
||||||
|
double time;
|
||||||
|
Vector3 position; // x,y,z
|
||||||
|
};
|
||||||
|
|
||||||
|
const string output_filename = "IMUKittiExampleGPSResults.csv";
|
||||||
|
|
||||||
|
void loadKittiData(KittiCalibration& kitti_calibration,
|
||||||
|
vector<ImuMeasurement>& imu_measurements,
|
||||||
|
vector<GpsMeasurement>& gps_measurements) {
|
||||||
|
string line;
|
||||||
|
|
||||||
|
// Read IMU metadata and compute relative sensor pose transforms
|
||||||
|
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
|
||||||
|
// AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
|
||||||
|
string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||||
|
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||||
|
|
||||||
|
printf("-- Reading sensor metadata\n");
|
||||||
|
|
||||||
|
getline(imu_metadata, line, '\n'); // ignore the first line
|
||||||
|
|
||||||
|
// Load Kitti calibration
|
||||||
|
getline(imu_metadata, line, '\n');
|
||||||
|
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||||
|
&kitti_calibration.body_ptx,
|
||||||
|
&kitti_calibration.body_pty,
|
||||||
|
&kitti_calibration.body_ptz,
|
||||||
|
&kitti_calibration.body_prx,
|
||||||
|
&kitti_calibration.body_pry,
|
||||||
|
&kitti_calibration.body_prz,
|
||||||
|
&kitti_calibration.accelerometer_sigma,
|
||||||
|
&kitti_calibration.gyroscope_sigma,
|
||||||
|
&kitti_calibration.integration_sigma,
|
||||||
|
&kitti_calibration.accelerometer_bias_sigma,
|
||||||
|
&kitti_calibration.gyroscope_bias_sigma,
|
||||||
|
&kitti_calibration.average_delta_t);
|
||||||
|
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||||
|
kitti_calibration.body_ptx,
|
||||||
|
kitti_calibration.body_pty,
|
||||||
|
kitti_calibration.body_ptz,
|
||||||
|
kitti_calibration.body_prx,
|
||||||
|
kitti_calibration.body_pry,
|
||||||
|
kitti_calibration.body_prz,
|
||||||
|
kitti_calibration.accelerometer_sigma,
|
||||||
|
kitti_calibration.gyroscope_sigma,
|
||||||
|
kitti_calibration.integration_sigma,
|
||||||
|
kitti_calibration.accelerometer_bias_sigma,
|
||||||
|
kitti_calibration.gyroscope_bias_sigma,
|
||||||
|
kitti_calibration.average_delta_t);
|
||||||
|
|
||||||
|
// Read IMU data
|
||||||
|
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||||
|
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
|
||||||
|
printf("-- Reading IMU measurements from file\n");
|
||||||
|
{
|
||||||
|
ifstream imu_data(imu_data_file.c_str());
|
||||||
|
getline(imu_data, line, '\n'); // ignore the first line
|
||||||
|
|
||||||
|
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0;
|
||||||
|
while (!imu_data.eof()) {
|
||||||
|
getline(imu_data, line, '\n');
|
||||||
|
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf",
|
||||||
|
&time, &dt,
|
||||||
|
&acc_x, &acc_y, &acc_z,
|
||||||
|
&gyro_x, &gyro_y, &gyro_z);
|
||||||
|
|
||||||
|
ImuMeasurement measurement;
|
||||||
|
measurement.time = time;
|
||||||
|
measurement.dt = dt;
|
||||||
|
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
|
||||||
|
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
|
||||||
|
imu_measurements.push_back(measurement);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read GPS data
|
||||||
|
// Time,X,Y,Z
|
||||||
|
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
|
||||||
|
printf("-- Reading GPS measurements from file\n");
|
||||||
|
{
|
||||||
|
ifstream gps_data(gps_data_file.c_str());
|
||||||
|
getline(gps_data, line, '\n'); // ignore the first line
|
||||||
|
|
||||||
|
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
|
||||||
|
while (!gps_data.eof()) {
|
||||||
|
getline(gps_data, line, '\n');
|
||||||
|
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
|
||||||
|
|
||||||
|
GpsMeasurement measurement;
|
||||||
|
measurement.time = time;
|
||||||
|
measurement.position = Vector3(gps_x, gps_y, gps_z);
|
||||||
|
gps_measurements.push_back(measurement);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
KittiCalibration kitti_calibration;
|
||||||
|
vector<ImuMeasurement> imu_measurements;
|
||||||
|
vector<GpsMeasurement> gps_measurements;
|
||||||
|
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||||
|
|
||||||
|
Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
|
||||||
|
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||||
|
.finished();
|
||||||
|
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||||
|
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||||
|
printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same");
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure different variables
|
||||||
|
// double t_offset = gps_measurements[0].time;
|
||||||
|
size_t first_gps_pose = 1;
|
||||||
|
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||||
|
double g = 9.8;
|
||||||
|
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||||
|
|
||||||
|
// Configure noise models
|
||||||
|
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||||
|
Vector3::Constant(1.0/0.07))
|
||||||
|
.finished());
|
||||||
|
|
||||||
|
// Set initial conditions for the estimated trajectory
|
||||||
|
// initial pose is the reference frame (navigation frame)
|
||||||
|
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||||
|
// the vehicle is stationary at the beginning at position 0,0,0
|
||||||
|
Vector3 current_velocity_global = Vector3::Zero();
|
||||||
|
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||||
|
|
||||||
|
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||||
|
Vector3::Constant(1.0))
|
||||||
|
.finished());
|
||||||
|
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||||
|
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100),
|
||||||
|
Vector3::Constant(5.00e-05))
|
||||||
|
.finished());
|
||||||
|
|
||||||
|
// Set IMU preintegration parameters
|
||||||
|
Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||||
|
Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||||
|
// error committed in integrating position from velocities
|
||||||
|
Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||||
|
|
||||||
|
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||||
|
imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
|
||||||
|
imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous
|
||||||
|
imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
|
||||||
|
imu_params->omegaCoriolis = w_coriolis;
|
||||||
|
|
||||||
|
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr;
|
||||||
|
|
||||||
|
// Set ISAM2 parameters and create ISAM2 solver object
|
||||||
|
ISAM2Params isam_params;
|
||||||
|
isam_params.factorization = ISAM2Params::CHOLESKY;
|
||||||
|
isam_params.relinearizeSkip = 10;
|
||||||
|
|
||||||
|
ISAM2 isam(isam_params);
|
||||||
|
|
||||||
|
// Create the factor graph and values object that will store new factors and values to add to the incremental graph
|
||||||
|
NonlinearFactorGraph new_factors;
|
||||||
|
Values new_values; // values storing the initial estimates of new nodes in the factor graph
|
||||||
|
|
||||||
|
/// Main loop:
|
||||||
|
/// (1) we read the measurements
|
||||||
|
/// (2) we create the corresponding factors in the graph
|
||||||
|
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||||
|
printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n");
|
||||||
|
size_t j = 0;
|
||||||
|
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||||
|
// At each non=IMU measurement we initialize a new node in the graph
|
||||||
|
auto current_pose_key = X(i);
|
||||||
|
auto current_vel_key = V(i);
|
||||||
|
auto current_bias_key = B(i);
|
||||||
|
double t = gps_measurements[i].time;
|
||||||
|
|
||||||
|
if (i == first_gps_pose) {
|
||||||
|
// Create initial estimate and prior on initial pose, velocity, and biases
|
||||||
|
new_values.insert(current_pose_key, current_pose_global);
|
||||||
|
new_values.insert(current_vel_key, current_velocity_global);
|
||||||
|
new_values.insert(current_bias_key, current_bias);
|
||||||
|
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x);
|
||||||
|
new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v);
|
||||||
|
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b);
|
||||||
|
} else {
|
||||||
|
double t_previous = gps_measurements[i-1].time;
|
||||||
|
|
||||||
|
// Summarize IMU data between the previous GPS measurement and now
|
||||||
|
current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias);
|
||||||
|
static size_t included_imu_measurement_count = 0;
|
||||||
|
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||||
|
if (imu_measurements[j].time >= t_previous) {
|
||||||
|
current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer,
|
||||||
|
imu_measurements[j].gyroscope,
|
||||||
|
imu_measurements[j].dt);
|
||||||
|
included_imu_measurement_count++;
|
||||||
|
}
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create IMU factor
|
||||||
|
auto previous_pose_key = X(i-1);
|
||||||
|
auto previous_vel_key = V(i-1);
|
||||||
|
auto previous_bias_key = B(i-1);
|
||||||
|
|
||||||
|
new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key,
|
||||||
|
current_pose_key, current_vel_key,
|
||||||
|
previous_bias_key, *current_summarized_measurement);
|
||||||
|
|
||||||
|
// Bias evolution as given in the IMU metadata
|
||||||
|
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() <<
|
||||||
|
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
|
||||||
|
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
|
||||||
|
.finished());
|
||||||
|
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key,
|
||||||
|
current_bias_key,
|
||||||
|
imuBias::ConstantBias(),
|
||||||
|
sigma_between_b);
|
||||||
|
|
||||||
|
// Create GPS factor
|
||||||
|
auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||||
|
if ((i % gps_skip) == 0) {
|
||||||
|
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps);
|
||||||
|
new_values.insert(current_pose_key, gps_pose);
|
||||||
|
|
||||||
|
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
|
||||||
|
gps_pose.translation().print();
|
||||||
|
printf("\n\n");
|
||||||
|
} else {
|
||||||
|
new_values.insert(current_pose_key, current_pose_global);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add initial values for velocity and bias based on the previous estimates
|
||||||
|
new_values.insert(current_vel_key, current_velocity_global);
|
||||||
|
new_values.insert(current_bias_key, current_bias);
|
||||||
|
|
||||||
|
// Update solver
|
||||||
|
// =======================================================================
|
||||||
|
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||||
|
// first so that the heading becomes observable.
|
||||||
|
if (i > (first_gps_pose + 2*gps_skip)) {
|
||||||
|
printf("################ NEW FACTORS AT TIME %lf ################\n", t);
|
||||||
|
new_factors.print();
|
||||||
|
|
||||||
|
isam.update(new_factors, new_values);
|
||||||
|
|
||||||
|
// Reset the newFactors and newValues list
|
||||||
|
new_factors.resize(0);
|
||||||
|
new_values.clear();
|
||||||
|
|
||||||
|
// Extract the result/current estimates
|
||||||
|
Values result = isam.calculateEstimate();
|
||||||
|
|
||||||
|
current_pose_global = result.at<Pose3>(current_pose_key);
|
||||||
|
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||||
|
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||||
|
|
||||||
|
printf("\n################ POSE AT TIME %lf ################\n", t);
|
||||||
|
current_pose_global.print();
|
||||||
|
printf("\n\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save results to file
|
||||||
|
printf("\nWriting results to file...\n");
|
||||||
|
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||||
|
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||||
|
|
||||||
|
Values result = isam.calculateEstimate();
|
||||||
|
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||||
|
auto pose_key = X(i);
|
||||||
|
auto vel_key = V(i);
|
||||||
|
auto bias_key = B(i);
|
||||||
|
|
||||||
|
auto pose = result.at<Pose3>(pose_key);
|
||||||
|
auto velocity = result.at<Vector3>(vel_key);
|
||||||
|
auto bias = result.at<imuBias::ConstantBias>(bias_key);
|
||||||
|
|
||||||
|
auto pose_quat = pose.rotation().toQuaternion();
|
||||||
|
auto gps = gps_measurements[i].position;
|
||||||
|
|
||||||
|
cout << "State at #" << i << endl;
|
||||||
|
cout << "Pose:" << endl << pose << endl;
|
||||||
|
cout << "Velocity:" << endl << velocity << endl;
|
||||||
|
cout << "Bias:" << endl << bias << endl;
|
||||||
|
|
||||||
|
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||||
|
gps_measurements[i].time,
|
||||||
|
pose.x(), pose.y(), pose.z(),
|
||||||
|
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
|
||||||
|
gps(0), gps(1), gps(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
fclose(fp_out);
|
||||||
|
}
|
|
@ -35,22 +35,28 @@
|
||||||
* optional arguments:
|
* optional arguments:
|
||||||
* data_csv_path path to the CSV file with the IMU data.
|
* data_csv_path path to the CSV file with the IMU data.
|
||||||
* -c use CombinedImuFactor
|
* -c use CombinedImuFactor
|
||||||
|
* Note: Define USE_LM to use Levenberg Marquardt Optimizer
|
||||||
|
* By default ISAM2 is used
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// GTSAM related includes.
|
// GTSAM related includes.
|
||||||
#include <gtsam/inference/Symbol.h>
|
|
||||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||||
#include <gtsam/navigation/GPSFactor.h>
|
#include <gtsam/navigation/GPSFactor.h>
|
||||||
#include <gtsam/navigation/ImuFactor.h>
|
#include <gtsam/navigation/ImuFactor.h>
|
||||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
|
||||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
|
||||||
#include <gtsam/slam/BetweenFactor.h>
|
#include <gtsam/slam/BetweenFactor.h>
|
||||||
#include <gtsam/slam/dataset.h>
|
#include <gtsam/slam/dataset.h>
|
||||||
|
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
#include <gtsam/nonlinear/ISAM2.h>
|
||||||
|
#include <gtsam/inference/Symbol.h>
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
|
// Uncomment the following to use Levenberg Marquardt Optimizer
|
||||||
|
// #define USE_LM
|
||||||
|
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -64,6 +70,17 @@ static const char use_combined_imu_flag[3] = "-c";
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
string data_filename;
|
string data_filename;
|
||||||
bool use_combined_imu = false;
|
bool use_combined_imu = false;
|
||||||
|
|
||||||
|
#ifndef USE_LM
|
||||||
|
printf("Using ISAM2\n");
|
||||||
|
ISAM2Params parameters;
|
||||||
|
parameters.relinearizeThreshold = 0.01;
|
||||||
|
parameters.relinearizeSkip = 1;
|
||||||
|
ISAM2 isam2(parameters);
|
||||||
|
#else
|
||||||
|
printf("Using Levenberg Marquardt Optimizer\n");
|
||||||
|
#endif
|
||||||
|
|
||||||
if (argc < 2) {
|
if (argc < 2) {
|
||||||
printf("using default CSV file\n");
|
printf("using default CSV file\n");
|
||||||
data_filename = findExampleDataFile("imuAndGPSdata.csv");
|
data_filename = findExampleDataFile("imuAndGPSdata.csv");
|
||||||
|
@ -252,9 +269,19 @@ int main(int argc, char* argv[]) {
|
||||||
initial_values.insert(V(correction_count), prop_state.v());
|
initial_values.insert(V(correction_count), prop_state.v());
|
||||||
initial_values.insert(B(correction_count), prev_bias);
|
initial_values.insert(B(correction_count), prev_bias);
|
||||||
|
|
||||||
|
Values result;
|
||||||
|
#ifdef USE_LM
|
||||||
LevenbergMarquardtOptimizer optimizer(*graph, initial_values);
|
LevenbergMarquardtOptimizer optimizer(*graph, initial_values);
|
||||||
Values result = optimizer.optimize();
|
result = optimizer.optimize();
|
||||||
|
#else
|
||||||
|
isam2.update(*graph, initial_values);
|
||||||
|
isam2.update();
|
||||||
|
result = isam2.calculateEstimate();
|
||||||
|
|
||||||
|
// reset the graph
|
||||||
|
graph->resize(0);
|
||||||
|
initial_values.clear();
|
||||||
|
#endif
|
||||||
// Overwrite the beginning of the preintegration for the next step.
|
// Overwrite the beginning of the preintegration for the next step.
|
||||||
prev_state = NavState(result.at<Pose3>(X(correction_count)),
|
prev_state = NavState(result.at<Pose3>(X(correction_count)),
|
||||||
result.at<Vector3>(V(correction_count)));
|
result.at<Vector3>(V(correction_count)));
|
||||||
|
|
|
@ -109,7 +109,7 @@ int main(int argc, char* argv[]) {
|
||||||
Symbol('x', i), corrupted_pose);
|
Symbol('x', i), corrupted_pose);
|
||||||
}
|
}
|
||||||
for (size_t j = 0; j < points.size(); ++j) {
|
for (size_t j = 0; j < points.size(); ++j) {
|
||||||
auto corrupted_point = points[j] + Point3(-0.25, 0.20, 0.15);
|
Point3 corrupted_point = points[j] + Point3(-0.25, 0.20, 0.15);
|
||||||
initialEstimate.insert<Point3>(Symbol('l', j), corrupted_point);
|
initialEstimate.insert<Point3>(Symbol('l', j), corrupted_point);
|
||||||
}
|
}
|
||||||
initialEstimate.print("Initial Estimates:\n");
|
initialEstimate.print("Initial Estimates:\n");
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
* -------------------------------------------------------------------------- */
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file small.cpp
|
* @file UGM_chain.cpp
|
||||||
* @brief UGM (undirected graphical model) examples: chain
|
* @brief UGM (undirected graphical model) examples: chain
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
* @author Abhijit Kundu
|
* @author Abhijit Kundu
|
||||||
|
@ -19,10 +19,9 @@
|
||||||
* for more explanation. This code demos the same example using GTSAM.
|
* for more explanation. This code demos the same example using GTSAM.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
|
||||||
#include <gtsam/discrete/DiscreteSequentialSolver.h>
|
|
||||||
#include <gtsam/discrete/DiscreteMarginals.h>
|
|
||||||
#include <gtsam/base/timing.h>
|
#include <gtsam/base/timing.h>
|
||||||
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
|
||||||
|
@ -30,9 +29,8 @@ using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
|
// Set Number of Nodes in the Graph
|
||||||
// Set Number of Nodes in the Graph
|
const int nrNodes = 60;
|
||||||
const int nrNodes = 60;
|
|
||||||
|
|
||||||
// Each node takes 1 of 7 possible states denoted by 0-6 in following order:
|
// Each node takes 1 of 7 possible states denoted by 0-6 in following order:
|
||||||
// ["VideoGames" "Industry" "GradSchool" "VideoGames(with PhD)"
|
// ["VideoGames" "Industry" "GradSchool" "VideoGames(with PhD)"
|
||||||
|
@ -40,70 +38,55 @@ int main(int argc, char** argv) {
|
||||||
const size_t nrStates = 7;
|
const size_t nrStates = 7;
|
||||||
|
|
||||||
// define variables
|
// define variables
|
||||||
vector<DiscreteKey> nodes;
|
vector<DiscreteKey> nodes;
|
||||||
for (int i = 0; i < nrNodes; i++){
|
for (int i = 0; i < nrNodes; i++) {
|
||||||
DiscreteKey dk(i, nrStates);
|
DiscreteKey dk(i, nrStates);
|
||||||
nodes.push_back(dk);
|
nodes.push_back(dk);
|
||||||
}
|
}
|
||||||
|
|
||||||
// create graph
|
// create graph
|
||||||
DiscreteFactorGraph graph;
|
DiscreteFactorGraph graph;
|
||||||
|
|
||||||
// add node potentials
|
// add node potentials
|
||||||
graph.add(nodes[0], ".3 .6 .1 0 0 0 0");
|
graph.add(nodes[0], ".3 .6 .1 0 0 0 0");
|
||||||
for (int i = 1; i < nrNodes; i++)
|
for (int i = 1; i < nrNodes; i++) graph.add(nodes[i], "1 1 1 1 1 1 1");
|
||||||
graph.add(nodes[i], "1 1 1 1 1 1 1");
|
|
||||||
|
|
||||||
const std::string edgePotential = ".08 .9 .01 0 0 0 .01 "
|
const std::string edgePotential =
|
||||||
".03 .95 .01 0 0 0 .01 "
|
".08 .9 .01 0 0 0 .01 "
|
||||||
".06 .06 .75 .05 .05 .02 .01 "
|
".03 .95 .01 0 0 0 .01 "
|
||||||
"0 0 0 .3 .6 .09 .01 "
|
".06 .06 .75 .05 .05 .02 .01 "
|
||||||
"0 0 0 .02 .95 .02 .01 "
|
"0 0 0 .3 .6 .09 .01 "
|
||||||
"0 0 0 .01 .01 .97 .01 "
|
"0 0 0 .02 .95 .02 .01 "
|
||||||
"0 0 0 0 0 0 1";
|
"0 0 0 .01 .01 .97 .01 "
|
||||||
|
"0 0 0 0 0 0 1";
|
||||||
|
|
||||||
// add edge potentials
|
// add edge potentials
|
||||||
for (int i = 0; i < nrNodes - 1; i++)
|
for (int i = 0; i < nrNodes - 1; i++)
|
||||||
graph.add(nodes[i] & nodes[i + 1], edgePotential);
|
graph.add(nodes[i] & nodes[i + 1], edgePotential);
|
||||||
|
|
||||||
cout << "Created Factor Graph with " << nrNodes << " variable nodes and "
|
cout << "Created Factor Graph with " << nrNodes << " variable nodes and "
|
||||||
<< graph.size() << " factors (Unary+Edge).";
|
<< graph.size() << " factors (Unary+Edge).";
|
||||||
|
|
||||||
// "Decoding", i.e., configuration with largest value
|
// "Decoding", i.e., configuration with largest value
|
||||||
// We use sequential variable elimination
|
// We use sequential variable elimination
|
||||||
DiscreteSequentialSolver solver(graph);
|
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||||
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
|
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||||
optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n");
|
optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||||
|
|
||||||
// "Inference" Computing marginals for each node
|
// "Inference" Computing marginals for each node
|
||||||
|
|
||||||
|
|
||||||
cout << "\nComputing Node Marginals ..(Sequential Elimination)" << endl;
|
|
||||||
gttic_(Sequential);
|
|
||||||
for (vector<DiscreteKey>::iterator itr = nodes.begin(); itr != nodes.end();
|
|
||||||
++itr) {
|
|
||||||
//Compute the marginal
|
|
||||||
Vector margProbs = solver.marginalProbabilities(*itr);
|
|
||||||
|
|
||||||
//Print the marginals
|
|
||||||
cout << "Node#" << setw(4) << itr->first << " : ";
|
|
||||||
print(margProbs);
|
|
||||||
}
|
|
||||||
gttoc_(Sequential);
|
|
||||||
|
|
||||||
// Here we'll make use of DiscreteMarginals class, which makes use of
|
// Here we'll make use of DiscreteMarginals class, which makes use of
|
||||||
// bayes-tree based shortcut evaluation of marginals
|
// bayes-tree based shortcut evaluation of marginals
|
||||||
DiscreteMarginals marginals(graph);
|
DiscreteMarginals marginals(graph);
|
||||||
|
|
||||||
cout << "\nComputing Node Marginals ..(BayesTree based)" << endl;
|
cout << "\nComputing Node Marginals ..(BayesTree based)" << endl;
|
||||||
gttic_(Multifrontal);
|
gttic_(Multifrontal);
|
||||||
for (vector<DiscreteKey>::iterator itr = nodes.begin(); itr != nodes.end();
|
for (vector<DiscreteKey>::iterator it = nodes.begin(); it != nodes.end();
|
||||||
++itr) {
|
++it) {
|
||||||
//Compute the marginal
|
// Compute the marginal
|
||||||
Vector margProbs = marginals.marginalProbabilities(*itr);
|
Vector margProbs = marginals.marginalProbabilities(*it);
|
||||||
|
|
||||||
//Print the marginals
|
// Print the marginals
|
||||||
cout << "Node#" << setw(4) << itr->first << " : ";
|
cout << "Node#" << setw(4) << it->first << " : ";
|
||||||
print(margProbs);
|
print(margProbs);
|
||||||
}
|
}
|
||||||
gttoc_(Multifrontal);
|
gttoc_(Multifrontal);
|
||||||
|
@ -111,4 +94,3 @@ int main(int argc, char** argv) {
|
||||||
tictoc_print_();
|
tictoc_print_();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,15 +10,16 @@
|
||||||
* -------------------------------------------------------------------------- */
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file small.cpp
|
* @file UGM_small.cpp
|
||||||
* @brief UGM (undirected graphical model) examples: small
|
* @brief UGM (undirected graphical model) examples: small
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
*
|
*
|
||||||
* See http://www.di.ens.fr/~mschmidt/Software/UGM/small.html
|
* See http://www.di.ens.fr/~mschmidt/Software/UGM/small.html
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/base/Vector.h>
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteSequentialSolver.h>
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
@ -61,24 +62,24 @@ int main(int argc, char** argv) {
|
||||||
|
|
||||||
// "Decoding", i.e., configuration with largest value (MPE)
|
// "Decoding", i.e., configuration with largest value (MPE)
|
||||||
// We use sequential variable elimination
|
// We use sequential variable elimination
|
||||||
DiscreteSequentialSolver solver(graph);
|
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||||
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
|
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||||
optimalDecoding->print("\noptimalDecoding");
|
optimalDecoding->print("\noptimalDecoding");
|
||||||
|
|
||||||
// "Inference" Computing marginals
|
// "Inference" Computing marginals
|
||||||
cout << "\nComputing Node Marginals .." << endl;
|
cout << "\nComputing Node Marginals .." << endl;
|
||||||
Vector margProbs;
|
DiscreteMarginals marginals(graph);
|
||||||
|
|
||||||
margProbs = solver.marginalProbabilities(Cathy);
|
Vector margProbs = marginals.marginalProbabilities(Cathy);
|
||||||
print(margProbs, "Cathy's Node Marginal:");
|
print(margProbs, "Cathy's Node Marginal:");
|
||||||
|
|
||||||
margProbs = solver.marginalProbabilities(Heather);
|
margProbs = marginals.marginalProbabilities(Heather);
|
||||||
print(margProbs, "Heather's Node Marginal");
|
print(margProbs, "Heather's Node Marginal");
|
||||||
|
|
||||||
margProbs = solver.marginalProbabilities(Mark);
|
margProbs = marginals.marginalProbabilities(Mark);
|
||||||
print(margProbs, "Mark's Node Marginal");
|
print(margProbs, "Mark's Node Marginal");
|
||||||
|
|
||||||
margProbs = solver.marginalProbabilities(Allison);
|
margProbs = marginals.marginalProbabilities(Allison);
|
||||||
print(margProbs, "Allison's Node Marginal");
|
print(margProbs, "Allison's Node Marginal");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
67
gtsam.h
67
gtsam.h
|
@ -281,7 +281,7 @@ virtual class Value {
|
||||||
};
|
};
|
||||||
|
|
||||||
#include <gtsam/base/GenericValue.h>
|
#include <gtsam/base/GenericValue.h>
|
||||||
template<T = {Vector, gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::StereoPoint2, gtsam::Cal3_S2, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::imuBias::ConstantBias}>
|
template<T = {Vector, Matrix, gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::StereoPoint2, gtsam::Cal3_S2, gtsam::Cal3DS2, gtsam::Cal3Bundler, gtsam::EssentialMatrix, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::imuBias::ConstantBias}>
|
||||||
virtual class GenericValue : gtsam::Value {
|
virtual class GenericValue : gtsam::Value {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
};
|
};
|
||||||
|
@ -598,6 +598,7 @@ class SOn {
|
||||||
// Standard Constructors
|
// Standard Constructors
|
||||||
SOn(size_t n);
|
SOn(size_t n);
|
||||||
static gtsam::SOn FromMatrix(Matrix R);
|
static gtsam::SOn FromMatrix(Matrix R);
|
||||||
|
static gtsam::SOn Lift(size_t n, Matrix R);
|
||||||
|
|
||||||
// Testable
|
// Testable
|
||||||
void print(string s) const;
|
void print(string s) const;
|
||||||
|
@ -1458,7 +1459,7 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1469,7 +1470,7 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1480,7 +1481,7 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
|
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1491,7 +1492,7 @@ virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1502,7 +1503,7 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
|
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1513,7 +1514,7 @@ virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
|
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1524,7 +1525,7 @@ virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
|
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1535,7 +1536,7 @@ virtual class DCS: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
||||||
|
@ -1546,7 +1547,7 @@ virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
||||||
void serializable() const;
|
void serializable() const;
|
||||||
|
|
||||||
double weight(double error) const;
|
double weight(double error) const;
|
||||||
double residual(double error) const;
|
double loss(double error) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}///\namespace mEstimator
|
}///\namespace mEstimator
|
||||||
|
@ -1937,6 +1938,22 @@ virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParamete
|
||||||
void print() const;
|
void print() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#include <gtsam/linear/Preconditioner.h>
|
||||||
|
virtual class PreconditionerParameters {
|
||||||
|
PreconditionerParameters();
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual class DummyPreconditionerParameters : gtsam::PreconditionerParameters {
|
||||||
|
DummyPreconditionerParameters();
|
||||||
|
};
|
||||||
|
|
||||||
|
#include <gtsam/linear/PCGSolver.h>
|
||||||
|
virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters {
|
||||||
|
PCGSolverParameters();
|
||||||
|
void print(string s);
|
||||||
|
void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner);
|
||||||
|
};
|
||||||
|
|
||||||
#include <gtsam/linear/SubgraphSolver.h>
|
#include <gtsam/linear/SubgraphSolver.h>
|
||||||
virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters {
|
virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters {
|
||||||
SubgraphSolverParameters();
|
SubgraphSolverParameters();
|
||||||
|
@ -2835,6 +2852,34 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor {
|
||||||
KarcherMeanFactor(const gtsam::KeyVector& keys);
|
KarcherMeanFactor(const gtsam::KeyVector& keys);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#include <gtsam/slam/FrobeniusFactor.h>
|
||||||
|
gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel(
|
||||||
|
gtsam::noiseModel::Base* model, size_t d);
|
||||||
|
|
||||||
|
template<T = {gtsam::SO3, gtsam::SO4}>
|
||||||
|
virtual class FrobeniusFactor : gtsam::NoiseModelFactor {
|
||||||
|
FrobeniusFactor(size_t key1, size_t key2);
|
||||||
|
FrobeniusFactor(size_t key1, size_t key2, gtsam::noiseModel::Base* model);
|
||||||
|
|
||||||
|
Vector evaluateError(const T& R1, const T& R2);
|
||||||
|
};
|
||||||
|
|
||||||
|
template<T = {gtsam::SO3, gtsam::SO4}>
|
||||||
|
virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor {
|
||||||
|
FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12);
|
||||||
|
FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12, gtsam::noiseModel::Base* model);
|
||||||
|
|
||||||
|
Vector evaluateError(const T& R1, const T& R2);
|
||||||
|
};
|
||||||
|
|
||||||
|
virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor {
|
||||||
|
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
|
||||||
|
size_t p);
|
||||||
|
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
|
||||||
|
size_t p, gtsam::noiseModel::Base* model);
|
||||||
|
Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2);
|
||||||
|
};
|
||||||
|
|
||||||
//*************************************************************************
|
//*************************************************************************
|
||||||
// Navigation
|
// Navigation
|
||||||
//*************************************************************************
|
//*************************************************************************
|
||||||
|
@ -2955,6 +3000,7 @@ class PreintegratedImuMeasurements {
|
||||||
gtsam::Rot3 deltaRij() const;
|
gtsam::Rot3 deltaRij() const;
|
||||||
Vector deltaPij() const;
|
Vector deltaPij() const;
|
||||||
Vector deltaVij() const;
|
Vector deltaVij() const;
|
||||||
|
gtsam::imuBias::ConstantBias biasHat() const;
|
||||||
Vector biasHatVector() const;
|
Vector biasHatVector() const;
|
||||||
gtsam::NavState predict(const gtsam::NavState& state_i,
|
gtsam::NavState predict(const gtsam::NavState& state_i,
|
||||||
const gtsam::imuBias::ConstantBias& bias) const;
|
const gtsam::imuBias::ConstantBias& bias) const;
|
||||||
|
@ -3016,6 +3062,7 @@ class PreintegratedCombinedMeasurements {
|
||||||
gtsam::Rot3 deltaRij() const;
|
gtsam::Rot3 deltaRij() const;
|
||||||
Vector deltaPij() const;
|
Vector deltaPij() const;
|
||||||
Vector deltaVij() const;
|
Vector deltaVij() const;
|
||||||
|
gtsam::imuBias::ConstantBias biasHat() const;
|
||||||
Vector biasHatVector() const;
|
Vector biasHatVector() const;
|
||||||
gtsam::NavState predict(const gtsam::NavState& state_i,
|
gtsam::NavState predict(const gtsam::NavState& state_i,
|
||||||
const gtsam::imuBias::ConstantBias& bias) const;
|
const gtsam::imuBias::ConstantBias& bias) const;
|
||||||
|
|
|
@ -17,7 +17,7 @@ if(NOT GTSAM_USE_SYSTEM_EIGEN)
|
||||||
endforeach(eigen_dir)
|
endforeach(eigen_dir)
|
||||||
|
|
||||||
if(GTSAM_WITH_EIGEN_UNSUPPORTED)
|
if(GTSAM_WITH_EIGEN_UNSUPPORTED)
|
||||||
message("-- Installing Eigen Unsupported modules")
|
message(STATUS "Installing Eigen Unsupported modules")
|
||||||
# do the same for the unsupported eigen folder
|
# do the same for the unsupported eigen folder
|
||||||
file(GLOB_RECURSE unsupported_eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/unsupported/Eigen/*.h")
|
file(GLOB_RECURSE unsupported_eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/unsupported/Eigen/*.h")
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ set (gtsam_subdirs
|
||||||
sfm
|
sfm
|
||||||
slam
|
slam
|
||||||
smart
|
smart
|
||||||
navigation
|
navigation
|
||||||
)
|
)
|
||||||
|
|
||||||
set(gtsam_srcs)
|
set(gtsam_srcs)
|
||||||
|
@ -186,11 +186,17 @@ install(
|
||||||
list(APPEND GTSAM_EXPORTED_TARGETS gtsam)
|
list(APPEND GTSAM_EXPORTED_TARGETS gtsam)
|
||||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||||
|
|
||||||
# make sure that ccolamd compiles even in face of warnings
|
# Make sure that ccolamd compiles even in face of warnings
|
||||||
|
# and suppress all warnings from 3rd party code if Release build
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-w")
|
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "/w")
|
||||||
else()
|
else()
|
||||||
|
if("${CMAKE_BUILD_TYPE}" STREQUAL "Release")
|
||||||
|
# Suppress all warnings from 3rd party sources.
|
||||||
|
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-w")
|
||||||
|
else()
|
||||||
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-Wno-error")
|
set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-Wno-error")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Create the matlab toolbox for the gtsam library
|
# Create the matlab toolbox for the gtsam library
|
||||||
|
|
|
@ -181,7 +181,7 @@ public:
|
||||||
// Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html
|
// Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html
|
||||||
enum { NeedsToAlign = (sizeof(T) % 16) == 0 };
|
enum { NeedsToAlign = (sizeof(T) % 16) == 0 };
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||||
};
|
};
|
||||||
|
|
||||||
/// use this macro instead of BOOST_CLASS_EXPORT for GenericValues
|
/// use this macro instead of BOOST_CLASS_EXPORT for GenericValues
|
||||||
|
|
|
@ -214,7 +214,7 @@ public:
|
||||||
enum { NeedsToAlign = (sizeof(M1) % 16) == 0 || (sizeof(M2) % 16) == 0
|
enum { NeedsToAlign = (sizeof(M1) % 16) == 0 || (sizeof(M2) % 16) == 0
|
||||||
};
|
};
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Define any direct product group to be a model of the multiplicative Group concept
|
// Define any direct product group to be a model of the multiplicative Group concept
|
||||||
|
|
|
@ -552,17 +552,47 @@ GTSAM_EXPORT Vector columnNormSquare(const Matrix &A);
|
||||||
namespace boost {
|
namespace boost {
|
||||||
namespace serialization {
|
namespace serialization {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ref. https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
|
||||||
|
*
|
||||||
|
* Eigen supports calling resize() on both static and dynamic matrices.
|
||||||
|
* This allows for a uniform API, with resize having no effect if the static matrix
|
||||||
|
* is already the correct size.
|
||||||
|
* https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
|
||||||
|
*
|
||||||
|
* We use all the Matrix template parameters to ensure wide compatibility.
|
||||||
|
*
|
||||||
|
* eigen_typekit in ROS uses the same code
|
||||||
|
* http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
|
||||||
|
*/
|
||||||
|
|
||||||
// split version - sends sizes ahead
|
// split version - sends sizes ahead
|
||||||
template<class Archive>
|
template<class Archive,
|
||||||
void save(Archive & ar, const gtsam::Matrix & m, unsigned int /*version*/) {
|
typename Scalar_,
|
||||||
|
int Rows_,
|
||||||
|
int Cols_,
|
||||||
|
int Ops_,
|
||||||
|
int MaxRows_,
|
||||||
|
int MaxCols_>
|
||||||
|
void save(Archive & ar,
|
||||||
|
const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||||
|
const unsigned int /*version*/) {
|
||||||
const size_t rows = m.rows(), cols = m.cols();
|
const size_t rows = m.rows(), cols = m.cols();
|
||||||
ar << BOOST_SERIALIZATION_NVP(rows);
|
ar << BOOST_SERIALIZATION_NVP(rows);
|
||||||
ar << BOOST_SERIALIZATION_NVP(cols);
|
ar << BOOST_SERIALIZATION_NVP(cols);
|
||||||
ar << make_nvp("data", make_array(m.data(), m.size()));
|
ar << make_nvp("data", make_array(m.data(), m.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Archive>
|
template<class Archive,
|
||||||
void load(Archive & ar, gtsam::Matrix & m, unsigned int /*version*/) {
|
typename Scalar_,
|
||||||
|
int Rows_,
|
||||||
|
int Cols_,
|
||||||
|
int Ops_,
|
||||||
|
int MaxRows_,
|
||||||
|
int MaxCols_>
|
||||||
|
void load(Archive & ar,
|
||||||
|
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||||
|
const unsigned int /*version*/) {
|
||||||
size_t rows, cols;
|
size_t rows, cols;
|
||||||
ar >> BOOST_SERIALIZATION_NVP(rows);
|
ar >> BOOST_SERIALIZATION_NVP(rows);
|
||||||
ar >> BOOST_SERIALIZATION_NVP(cols);
|
ar >> BOOST_SERIALIZATION_NVP(cols);
|
||||||
|
@ -570,8 +600,25 @@ namespace boost {
|
||||||
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
|
||||||
|
template<class Archive,
|
||||||
|
typename Scalar_,
|
||||||
|
int Rows_,
|
||||||
|
int Cols_,
|
||||||
|
int Ops_,
|
||||||
|
int MaxRows_,
|
||||||
|
int MaxCols_>
|
||||||
|
void serialize(Archive & ar,
|
||||||
|
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||||
|
const unsigned int version) {
|
||||||
|
split_free(ar, m, version);
|
||||||
|
}
|
||||||
|
|
||||||
|
// specialized to Matrix for MATLAB wrapper
|
||||||
|
template <class Archive>
|
||||||
|
void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) {
|
||||||
|
split_free(ar, m, version);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace serialization
|
} // namespace serialization
|
||||||
} // namespace boost
|
} // namespace boost
|
||||||
|
|
||||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Matrix);
|
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ namespace gtsam {
|
||||||
blockStart_(0)
|
blockStart_(0)
|
||||||
{
|
{
|
||||||
fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension);
|
fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension);
|
||||||
matrix_.setZero(variableColOffsets_.back(), variableColOffsets_.back());
|
matrix_.resize(variableColOffsets_.back(), variableColOffsets_.back());
|
||||||
assertInvariants();
|
assertInvariants();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ namespace gtsam {
|
||||||
blockStart_(0)
|
blockStart_(0)
|
||||||
{
|
{
|
||||||
fillOffsets(firstBlockDim, lastBlockDim, appendOneDimension);
|
fillOffsets(firstBlockDim, lastBlockDim, appendOneDimension);
|
||||||
matrix_.setZero(variableColOffsets_.back(), variableColOffsets_.back());
|
matrix_.resize(variableColOffsets_.back(), variableColOffsets_.back());
|
||||||
assertInvariants();
|
assertInvariants();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ namespace gtsam {
|
||||||
SymmetricBlockMatrix(const CONTAINER& dimensions, const Matrix& matrix, bool appendOneDimension = false) :
|
SymmetricBlockMatrix(const CONTAINER& dimensions, const Matrix& matrix, bool appendOneDimension = false) :
|
||||||
blockStart_(0)
|
blockStart_(0)
|
||||||
{
|
{
|
||||||
matrix_.setZero(matrix.rows(), matrix.cols());
|
matrix_.resize(matrix.rows(), matrix.cols());
|
||||||
matrix_.triangularView<Eigen::Upper>() = matrix.triangularView<Eigen::Upper>();
|
matrix_.triangularView<Eigen::Upper>() = matrix.triangularView<Eigen::Upper>();
|
||||||
fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension);
|
fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension);
|
||||||
if(matrix_.rows() != matrix_.cols())
|
if(matrix_.rows() != matrix_.cols())
|
||||||
|
@ -416,4 +416,3 @@ namespace gtsam {
|
||||||
class CholeskyFailed;
|
class CholeskyFailed;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2020, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file make_shared.h
|
||||||
|
* @brief make_shared trampoline function to ensure proper alignment
|
||||||
|
* @author Fan Jiang
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gtsam/base/types.h>
|
||||||
|
|
||||||
|
#include <Eigen/Core>
|
||||||
|
|
||||||
|
#include <boost/make_shared.hpp>
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
/// An shorthand alias for accessing the ::type inside std::enable_if that can be used in a template directly
|
||||||
|
template<bool B, class T = void>
|
||||||
|
using enable_if_t = typename std::enable_if<B, T>::type;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add our own `make_shared` as a layer of wrapping on `boost::make_shared`
|
||||||
|
* This solves the problem with the stock `make_shared` that custom alignment is not respected, causing SEGFAULTs
|
||||||
|
* at runtime, which is notoriously hard to debug.
|
||||||
|
*
|
||||||
|
* Explanation
|
||||||
|
* ===============
|
||||||
|
* The template `needs_eigen_aligned_allocator<T>::value` will evaluate to `std::true_type` if the type alias
|
||||||
|
* `_eigen_aligned_allocator_trait = void` is present in a class, which is automatically added by the
|
||||||
|
* `GTSAM_MAKE_ALIGNED_OPERATOR_NEW` macro.
|
||||||
|
*
|
||||||
|
* This function declaration will only be taken when the above condition is true, so if some object does not need to
|
||||||
|
* be aligned, `gtsam::make_shared` will fall back to the next definition, which is a simple wrapper for
|
||||||
|
* `boost::make_shared`.
|
||||||
|
*
|
||||||
|
* @tparam T The type of object being constructed
|
||||||
|
* @tparam Args Type of the arguments of the constructor
|
||||||
|
* @param args Arguments of the constructor
|
||||||
|
* @return The object created as a boost::shared_ptr<T>
|
||||||
|
*/
|
||||||
|
template<typename T, typename ... Args>
|
||||||
|
gtsam::enable_if_t<needs_eigen_aligned_allocator<T>::value, boost::shared_ptr<T>> make_shared(Args &&... args) {
|
||||||
|
return boost::allocate_shared<T>(Eigen::aligned_allocator<T>(), std::forward<Args>(args)...);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fall back to the boost version if no need for alignment
|
||||||
|
template<typename T, typename ... Args>
|
||||||
|
gtsam::enable_if_t<!needs_eigen_aligned_allocator<T>::value, boost::shared_ptr<T>> make_shared(Args &&... args) {
|
||||||
|
return boost::make_shared<T>(std::forward<Args>(args)...);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -42,123 +42,218 @@
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
// Serialization directly to strings in compressed format
|
/** @name Standard serialization
|
||||||
template<class T>
|
* Serialization in default compressed format
|
||||||
std::string serialize(const T& input) {
|
*/
|
||||||
std::ostringstream out_archive_stream;
|
///@{
|
||||||
|
/// serializes to a stream
|
||||||
|
template <class T>
|
||||||
|
void serializeToStream(const T& input, std::ostream& out_archive_stream) {
|
||||||
boost::archive::text_oarchive out_archive(out_archive_stream);
|
boost::archive::text_oarchive out_archive(out_archive_stream);
|
||||||
out_archive << input;
|
out_archive << input;
|
||||||
return out_archive_stream.str();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class T>
|
/// deserializes from a stream
|
||||||
void deserialize(const std::string& serialized, T& output) {
|
template <class T>
|
||||||
std::istringstream in_archive_stream(serialized);
|
void deserializeFromStream(std::istream& in_archive_stream, T& output) {
|
||||||
boost::archive::text_iarchive in_archive(in_archive_stream);
|
boost::archive::text_iarchive in_archive(in_archive_stream);
|
||||||
in_archive >> output;
|
in_archive >> output;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class T>
|
/// serializes to a string
|
||||||
|
template <class T>
|
||||||
|
std::string serializeToString(const T& input) {
|
||||||
|
std::ostringstream out_archive_stream;
|
||||||
|
serializeToStream(input, out_archive_stream);
|
||||||
|
return out_archive_stream.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// deserializes from a string
|
||||||
|
template <class T>
|
||||||
|
void deserializeFromString(const std::string& serialized, T& output) {
|
||||||
|
std::istringstream in_archive_stream(serialized);
|
||||||
|
deserializeFromStream(in_archive_stream, output);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// serializes to a file
|
||||||
|
template <class T>
|
||||||
bool serializeToFile(const T& input, const std::string& filename) {
|
bool serializeToFile(const T& input, const std::string& filename) {
|
||||||
std::ofstream out_archive_stream(filename.c_str());
|
std::ofstream out_archive_stream(filename.c_str());
|
||||||
if (!out_archive_stream.is_open())
|
if (!out_archive_stream.is_open()) return false;
|
||||||
return false;
|
serializeToStream(input, out_archive_stream);
|
||||||
boost::archive::text_oarchive out_archive(out_archive_stream);
|
|
||||||
out_archive << input;
|
|
||||||
out_archive_stream.close();
|
out_archive_stream.close();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class T>
|
/// deserializes from a file
|
||||||
|
template <class T>
|
||||||
bool deserializeFromFile(const std::string& filename, T& output) {
|
bool deserializeFromFile(const std::string& filename, T& output) {
|
||||||
std::ifstream in_archive_stream(filename.c_str());
|
std::ifstream in_archive_stream(filename.c_str());
|
||||||
if (!in_archive_stream.is_open())
|
if (!in_archive_stream.is_open()) return false;
|
||||||
return false;
|
deserializeFromStream(in_archive_stream, output);
|
||||||
boost::archive::text_iarchive in_archive(in_archive_stream);
|
|
||||||
in_archive >> output;
|
|
||||||
in_archive_stream.close();
|
in_archive_stream.close();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialization to XML format with named structures
|
/// serializes to a string
|
||||||
template<class T>
|
template <class T>
|
||||||
std::string serializeXML(const T& input, const std::string& name="data") {
|
std::string serialize(const T& input) {
|
||||||
std::ostringstream out_archive_stream;
|
return serializeToString(input);
|
||||||
// braces to flush out_archive as it goes out of scope before taking str()
|
|
||||||
// fixes crash with boost 1.66-1.68
|
|
||||||
// see https://github.com/boostorg/serialization/issues/82
|
|
||||||
{
|
|
||||||
boost::archive::xml_oarchive out_archive(out_archive_stream);
|
|
||||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
|
||||||
}
|
|
||||||
return out_archive_stream.str();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class T>
|
/// deserializes from a string
|
||||||
void deserializeXML(const std::string& serialized, T& output, const std::string& name="data") {
|
template <class T>
|
||||||
std::istringstream in_archive_stream(serialized);
|
void deserialize(const std::string& serialized, T& output) {
|
||||||
boost::archive::xml_iarchive in_archive(in_archive_stream);
|
deserializeFromString(serialized, output);
|
||||||
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
|
||||||
}
|
}
|
||||||
|
///@}
|
||||||
|
|
||||||
template<class T>
|
/** @name XML Serialization
|
||||||
bool serializeToXMLFile(const T& input, const std::string& filename, const std::string& name="data") {
|
* Serialization to XML format with named structures
|
||||||
std::ofstream out_archive_stream(filename.c_str());
|
*/
|
||||||
if (!out_archive_stream.is_open())
|
///@{
|
||||||
return false;
|
/// serializes to a stream in XML
|
||||||
|
template <class T>
|
||||||
|
void serializeToXMLStream(const T& input, std::ostream& out_archive_stream,
|
||||||
|
const std::string& name = "data") {
|
||||||
boost::archive::xml_oarchive out_archive(out_archive_stream);
|
boost::archive::xml_oarchive out_archive(out_archive_stream);
|
||||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);;
|
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
||||||
out_archive_stream.close();
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class T>
|
/// deserializes from a stream in XML
|
||||||
bool deserializeFromXMLFile(const std::string& filename, T& output, const std::string& name="data") {
|
template <class T>
|
||||||
std::ifstream in_archive_stream(filename.c_str());
|
void deserializeFromXMLStream(std::istream& in_archive_stream, T& output,
|
||||||
if (!in_archive_stream.is_open())
|
const std::string& name = "data") {
|
||||||
return false;
|
|
||||||
boost::archive::xml_iarchive in_archive(in_archive_stream);
|
boost::archive::xml_iarchive in_archive(in_archive_stream);
|
||||||
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
||||||
in_archive_stream.close();
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialization to binary format with named structures
|
/// serializes to a string in XML
|
||||||
template<class T>
|
template <class T>
|
||||||
std::string serializeBinary(const T& input, const std::string& name="data") {
|
std::string serializeToXMLString(const T& input,
|
||||||
|
const std::string& name = "data") {
|
||||||
std::ostringstream out_archive_stream;
|
std::ostringstream out_archive_stream;
|
||||||
boost::archive::binary_oarchive out_archive(out_archive_stream);
|
serializeToXMLStream(input, out_archive_stream, name);
|
||||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
|
||||||
return out_archive_stream.str();
|
return out_archive_stream.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class T>
|
/// deserializes from a string in XML
|
||||||
void deserializeBinary(const std::string& serialized, T& output, const std::string& name="data") {
|
template <class T>
|
||||||
|
void deserializeFromXMLString(const std::string& serialized, T& output,
|
||||||
|
const std::string& name = "data") {
|
||||||
std::istringstream in_archive_stream(serialized);
|
std::istringstream in_archive_stream(serialized);
|
||||||
boost::archive::binary_iarchive in_archive(in_archive_stream);
|
deserializeFromXMLStream(in_archive_stream, output, name);
|
||||||
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class T>
|
/// serializes to an XML file
|
||||||
bool serializeToBinaryFile(const T& input, const std::string& filename, const std::string& name="data") {
|
template <class T>
|
||||||
|
bool serializeToXMLFile(const T& input, const std::string& filename,
|
||||||
|
const std::string& name = "data") {
|
||||||
std::ofstream out_archive_stream(filename.c_str());
|
std::ofstream out_archive_stream(filename.c_str());
|
||||||
if (!out_archive_stream.is_open())
|
if (!out_archive_stream.is_open()) return false;
|
||||||
return false;
|
serializeToXMLStream(input, out_archive_stream, name);
|
||||||
boost::archive::binary_oarchive out_archive(out_archive_stream);
|
|
||||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
|
||||||
out_archive_stream.close();
|
out_archive_stream.close();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class T>
|
/// deserializes from an XML file
|
||||||
bool deserializeFromBinaryFile(const std::string& filename, T& output, const std::string& name="data") {
|
template <class T>
|
||||||
|
bool deserializeFromXMLFile(const std::string& filename, T& output,
|
||||||
|
const std::string& name = "data") {
|
||||||
std::ifstream in_archive_stream(filename.c_str());
|
std::ifstream in_archive_stream(filename.c_str());
|
||||||
if (!in_archive_stream.is_open())
|
if (!in_archive_stream.is_open()) return false;
|
||||||
return false;
|
deserializeFromXMLStream(in_archive_stream, output, name);
|
||||||
boost::archive::binary_iarchive in_archive(in_archive_stream);
|
|
||||||
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
|
||||||
in_archive_stream.close();
|
in_archive_stream.close();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // \namespace gtsam
|
/// serializes to a string in XML
|
||||||
|
template <class T>
|
||||||
|
std::string serializeXML(const T& input,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
return serializeToXMLString(input, name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// deserializes from a string in XML
|
||||||
|
template <class T>
|
||||||
|
void deserializeXML(const std::string& serialized, T& output,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
deserializeFromXMLString(serialized, output, name);
|
||||||
|
}
|
||||||
|
///@}
|
||||||
|
|
||||||
|
/** @name Binary Serialization
|
||||||
|
* Serialization to binary format with named structures
|
||||||
|
*/
|
||||||
|
///@{
|
||||||
|
/// serializes to a stream in binary
|
||||||
|
template <class T>
|
||||||
|
void serializeToBinaryStream(const T& input, std::ostream& out_archive_stream,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
boost::archive::binary_oarchive out_archive(out_archive_stream);
|
||||||
|
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// deserializes from a stream in binary
|
||||||
|
template <class T>
|
||||||
|
void deserializeFromBinaryStream(std::istream& in_archive_stream, T& output,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
boost::archive::binary_iarchive in_archive(in_archive_stream);
|
||||||
|
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// serializes to a string in binary
|
||||||
|
template <class T>
|
||||||
|
std::string serializeToBinaryString(const T& input,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
std::ostringstream out_archive_stream;
|
||||||
|
serializeToBinaryStream(input, out_archive_stream, name);
|
||||||
|
return out_archive_stream.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// deserializes from a string in binary
|
||||||
|
template <class T>
|
||||||
|
void deserializeFromBinaryString(const std::string& serialized, T& output,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
std::istringstream in_archive_stream(serialized);
|
||||||
|
deserializeFromBinaryStream(in_archive_stream, output, name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// serializes to a binary file
|
||||||
|
template <class T>
|
||||||
|
bool serializeToBinaryFile(const T& input, const std::string& filename,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
std::ofstream out_archive_stream(filename.c_str());
|
||||||
|
if (!out_archive_stream.is_open()) return false;
|
||||||
|
serializeToBinaryStream(input, out_archive_stream, name);
|
||||||
|
out_archive_stream.close();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// deserializes from a binary file
|
||||||
|
template <class T>
|
||||||
|
bool deserializeFromBinaryFile(const std::string& filename, T& output,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
std::ifstream in_archive_stream(filename.c_str());
|
||||||
|
if (!in_archive_stream.is_open()) return false;
|
||||||
|
deserializeFromBinaryStream(in_archive_stream, output, name);
|
||||||
|
in_archive_stream.close();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// serializes to a string in binary
|
||||||
|
template <class T>
|
||||||
|
std::string serializeBinary(const T& input,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
return serializeToBinaryString(input, name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// deserializes from a string in binary
|
||||||
|
template <class T>
|
||||||
|
void deserializeBinary(const std::string& serialized, T& output,
|
||||||
|
const std::string& name = "data") {
|
||||||
|
deserializeFromBinaryString(serialized, output, name);
|
||||||
|
}
|
||||||
|
///@}
|
||||||
|
|
||||||
|
} // namespace gtsam
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <gtsam/base/serialization.h>
|
#include <gtsam/base/serialization.h>
|
||||||
|
|
||||||
#include <boost/serialization/serialization.hpp>
|
#include <boost/serialization/serialization.hpp>
|
||||||
|
#include <boost/filesystem.hpp>
|
||||||
|
|
||||||
|
|
||||||
// whether to print the serialized text to stdout
|
// whether to print the serialized text to stdout
|
||||||
|
@ -40,22 +41,37 @@ T create() {
|
||||||
return T();
|
return T();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Creates or empties a folder in the build folder and returns the relative path
|
||||||
|
boost::filesystem::path resetFilesystem(
|
||||||
|
boost::filesystem::path folder = "actual") {
|
||||||
|
boost::filesystem::remove_all(folder);
|
||||||
|
boost::filesystem::create_directory(folder);
|
||||||
|
return folder;
|
||||||
|
}
|
||||||
|
|
||||||
// Templated round-trip serialization
|
// Templated round-trip serialization
|
||||||
template<class T>
|
template<class T>
|
||||||
void roundtrip(const T& input, T& output) {
|
void roundtrip(const T& input, T& output) {
|
||||||
// Serialize
|
|
||||||
std::string serialized = serialize(input);
|
std::string serialized = serialize(input);
|
||||||
if (verbose) std::cout << serialized << std::endl << std::endl;
|
if (verbose) std::cout << serialized << std::endl << std::endl;
|
||||||
|
|
||||||
deserialize(serialized, output);
|
deserialize(serialized, output);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This version requires equality operator
|
// Templated round-trip serialization using a file
|
||||||
|
template<class T>
|
||||||
|
void roundtripFile(const T& input, T& output) {
|
||||||
|
boost::filesystem::path path = resetFilesystem()/"graph.dat";
|
||||||
|
serializeToFile(input, path.string());
|
||||||
|
deserializeFromFile(path.string(), output);
|
||||||
|
}
|
||||||
|
|
||||||
|
// This version requires equality operator and uses string and file round-trips
|
||||||
template<class T>
|
template<class T>
|
||||||
bool equality(const T& input = T()) {
|
bool equality(const T& input = T()) {
|
||||||
T output = create<T>();
|
T output = create<T>(), outputf = create<T>();
|
||||||
roundtrip<T>(input,output);
|
roundtrip<T>(input,output);
|
||||||
return input==output;
|
roundtripFile<T>(input,outputf);
|
||||||
|
return (input==output) && (input==outputf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This version requires Testable
|
// This version requires Testable
|
||||||
|
@ -77,20 +93,26 @@ bool equalsDereferenced(const T& input) {
|
||||||
// Templated round-trip serialization using XML
|
// Templated round-trip serialization using XML
|
||||||
template<class T>
|
template<class T>
|
||||||
void roundtripXML(const T& input, T& output) {
|
void roundtripXML(const T& input, T& output) {
|
||||||
// Serialize
|
|
||||||
std::string serialized = serializeXML<T>(input);
|
std::string serialized = serializeXML<T>(input);
|
||||||
if (verbose) std::cout << serialized << std::endl << std::endl;
|
if (verbose) std::cout << serialized << std::endl << std::endl;
|
||||||
|
|
||||||
// De-serialize
|
|
||||||
deserializeXML(serialized, output);
|
deserializeXML(serialized, output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Templated round-trip serialization using XML File
|
||||||
|
template<class T>
|
||||||
|
void roundtripXMLFile(const T& input, T& output) {
|
||||||
|
boost::filesystem::path path = resetFilesystem()/"graph.xml";
|
||||||
|
serializeToXMLFile(input, path.string());
|
||||||
|
deserializeFromXMLFile(path.string(), output);
|
||||||
|
}
|
||||||
|
|
||||||
// This version requires equality operator
|
// This version requires equality operator
|
||||||
template<class T>
|
template<class T>
|
||||||
bool equalityXML(const T& input = T()) {
|
bool equalityXML(const T& input = T()) {
|
||||||
T output = create<T>();
|
T output = create<T>(), outputf = create<T>();
|
||||||
roundtripXML<T>(input,output);
|
roundtripXML<T>(input,output);
|
||||||
return input==output;
|
roundtripXMLFile<T>(input,outputf);
|
||||||
|
return (input==output) && (input==outputf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This version requires Testable
|
// This version requires Testable
|
||||||
|
@ -112,20 +134,26 @@ bool equalsDereferencedXML(const T& input = T()) {
|
||||||
// Templated round-trip serialization using XML
|
// Templated round-trip serialization using XML
|
||||||
template<class T>
|
template<class T>
|
||||||
void roundtripBinary(const T& input, T& output) {
|
void roundtripBinary(const T& input, T& output) {
|
||||||
// Serialize
|
|
||||||
std::string serialized = serializeBinary<T>(input);
|
std::string serialized = serializeBinary<T>(input);
|
||||||
if (verbose) std::cout << serialized << std::endl << std::endl;
|
if (verbose) std::cout << serialized << std::endl << std::endl;
|
||||||
|
|
||||||
// De-serialize
|
|
||||||
deserializeBinary(serialized, output);
|
deserializeBinary(serialized, output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Templated round-trip serialization using Binary file
|
||||||
|
template<class T>
|
||||||
|
void roundtripBinaryFile(const T& input, T& output) {
|
||||||
|
boost::filesystem::path path = resetFilesystem()/"graph.bin";
|
||||||
|
serializeToBinaryFile(input, path.string());
|
||||||
|
deserializeFromBinaryFile(path.string(), output);
|
||||||
|
}
|
||||||
|
|
||||||
// This version requires equality operator
|
// This version requires equality operator
|
||||||
template<class T>
|
template<class T>
|
||||||
bool equalityBinary(const T& input = T()) {
|
bool equalityBinary(const T& input = T()) {
|
||||||
T output = create<T>();
|
T output = create<T>(), outputf = create<T>();
|
||||||
roundtripBinary<T>(input,output);
|
roundtripBinary<T>(input,output);
|
||||||
return input==output;
|
roundtripBinaryFile<T>(input,outputf);
|
||||||
|
return (input==output) && (input==outputf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This version requires Testable
|
// This version requires Testable
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
|
||||||
#include <exception>
|
#include <exception>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
#ifdef GTSAM_USE_TBB
|
#ifdef GTSAM_USE_TBB
|
||||||
#include <tbb/scalable_allocator.h>
|
#include <tbb/scalable_allocator.h>
|
||||||
|
@ -54,7 +55,7 @@
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/// Function to demangle type name of variable, e.g. demangle(typeid(x).name())
|
/// Function to demangle type name of variable, e.g. demangle(typeid(x).name())
|
||||||
std::string demangle(const char* name);
|
std::string GTSAM_EXPORT demangle(const char* name);
|
||||||
|
|
||||||
/// Integer nonlinear key type
|
/// Integer nonlinear key type
|
||||||
typedef std::uint64_t Key;
|
typedef std::uint64_t Key;
|
||||||
|
@ -230,3 +231,50 @@ namespace std {
|
||||||
#ifdef ERROR
|
#ifdef ERROR
|
||||||
#undef ERROR
|
#undef ERROR
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
/// Convenience void_t as we assume C++11, it will not conflict the std one in C++17 as this is in `gtsam::`
|
||||||
|
template<typename ...> using void_t = void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A SFINAE trait to mark classes that need special alignment.
|
||||||
|
*
|
||||||
|
* This is required to make boost::make_shared and etc respect alignment, which is essential for the Python
|
||||||
|
* wrappers to work properly.
|
||||||
|
*
|
||||||
|
* Explanation
|
||||||
|
* =============
|
||||||
|
* When a GTSAM type is not declared with the type alias `_eigen_aligned_allocator_trait = void`, the first template
|
||||||
|
* will be taken so `needs_eigen_aligned_allocator` will be resolved to `std::false_type`.
|
||||||
|
*
|
||||||
|
* Otherwise, it will resolve to the second template, which will be resolved to `std::true_type`.
|
||||||
|
*
|
||||||
|
* Please refer to `gtsam/base/make_shared.h` for an example.
|
||||||
|
*/
|
||||||
|
template<typename, typename = void_t<>>
|
||||||
|
struct needs_eigen_aligned_allocator : std::false_type {
|
||||||
|
};
|
||||||
|
template<typename T>
|
||||||
|
struct needs_eigen_aligned_allocator<T, void_t<typename T::_eigen_aligned_allocator_trait>> : std::true_type {
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This marks a GTSAM object to require alignment. With this macro an object will automatically be allocated in aligned
|
||||||
|
* memory when one uses `gtsam::make_shared`. It reduces future misalignment problems that is hard to debug.
|
||||||
|
* See https://eigen.tuxfamily.org/dox/group__DenseMatrixManipulation__Alignement.html for detailed explanation.
|
||||||
|
*/
|
||||||
|
#define GTSAM_MAKE_ALIGNED_OPERATOR_NEW \
|
||||||
|
EIGEN_MAKE_ALIGNED_OPERATOR_NEW \
|
||||||
|
using _eigen_aligned_allocator_trait = void;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This marks a GTSAM object to require alignment. With this macro an object will automatically be allocated in aligned
|
||||||
|
* memory when one uses `gtsam::make_shared`. It reduces future misalignment problems that is hard to debug.
|
||||||
|
* See https://eigen.tuxfamily.org/dox/group__DenseMatrixManipulation__Alignement.html for detailed explanation.
|
||||||
|
*/
|
||||||
|
#define GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
|
||||||
|
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
|
||||||
|
using _eigen_aligned_allocator_trait = void;
|
||||||
|
|
|
@ -20,13 +20,14 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <boost/shared_ptr.hpp>
|
#include <boost/shared_ptr.hpp>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
#include <gtsam/inference/FactorGraph.h>
|
#include <gtsam/inference/FactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteConditional.h>
|
#include <gtsam/discrete/DiscreteConditional.h>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/** A Bayes net made from linear-Discrete densities */
|
/** A Bayes net made from linear-Discrete densities */
|
||||||
class GTSAM_EXPORT DiscreteBayesNet: public FactorGraph<DiscreteConditional>
|
class GTSAM_EXPORT DiscreteBayesNet: public BayesNet<DiscreteConditional>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
|
|
@ -29,13 +29,32 @@ namespace gtsam {
|
||||||
template class BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>;
|
template class BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>;
|
||||||
template class BayesTree<DiscreteBayesTreeClique>;
|
template class BayesTree<DiscreteBayesTreeClique>;
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double DiscreteBayesTreeClique::evaluate(
|
||||||
|
const DiscreteConditional::Values& values) const {
|
||||||
|
// evaluate all conditionals and multiply
|
||||||
|
double result = (*conditional_)(values);
|
||||||
|
for (const auto& child : children) {
|
||||||
|
result *= child->evaluate(values);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
bool DiscreteBayesTree::equals(const This& other, double tol) const
|
bool DiscreteBayesTree::equals(const This& other, double tol) const {
|
||||||
{
|
|
||||||
return Base::equals(other, tol);
|
return Base::equals(other, tol);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double DiscreteBayesTree::evaluate(
|
||||||
|
const DiscreteConditional::Values& values) const {
|
||||||
|
double result = 1.0;
|
||||||
|
for (const auto& root : roots_) {
|
||||||
|
result *= root->evaluate(values);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
} // \namespace gtsam
|
} // \namespace gtsam
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,8 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file DiscreteBayesTree.h
|
* @file DiscreteBayesTree.h
|
||||||
* @brief Discrete Bayes Tree, the result of eliminating a DiscreteJunctionTree
|
* @brief Discrete Bayes Tree, the result of eliminating a
|
||||||
|
* DiscreteJunctionTree
|
||||||
* @brief DiscreteBayesTree
|
* @brief DiscreteBayesTree
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
* @author Richard Roberts
|
* @author Richard Roberts
|
||||||
|
@ -22,45 +23,62 @@
|
||||||
#include <gtsam/discrete/DiscreteBayesNet.h>
|
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/inference/BayesTree.h>
|
#include <gtsam/inference/BayesTree.h>
|
||||||
|
#include <gtsam/inference/Conditional.h>
|
||||||
#include <gtsam/inference/BayesTreeCliqueBase.h>
|
#include <gtsam/inference/BayesTreeCliqueBase.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
// Forward declarations
|
// Forward declarations
|
||||||
class DiscreteConditional;
|
class DiscreteConditional;
|
||||||
class VectorValues;
|
class VectorValues;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/** A clique in a DiscreteBayesTree */
|
/** A clique in a DiscreteBayesTree */
|
||||||
class GTSAM_EXPORT DiscreteBayesTreeClique :
|
class GTSAM_EXPORT DiscreteBayesTreeClique
|
||||||
public BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>
|
: public BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph> {
|
||||||
{
|
public:
|
||||||
public:
|
typedef DiscreteBayesTreeClique This;
|
||||||
typedef DiscreteBayesTreeClique This;
|
typedef BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph>
|
||||||
typedef BayesTreeCliqueBase<DiscreteBayesTreeClique, DiscreteFactorGraph> Base;
|
Base;
|
||||||
typedef boost::shared_ptr<This> shared_ptr;
|
typedef boost::shared_ptr<This> shared_ptr;
|
||||||
typedef boost::weak_ptr<This> weak_ptr;
|
typedef boost::weak_ptr<This> weak_ptr;
|
||||||
DiscreteBayesTreeClique() {}
|
DiscreteBayesTreeClique() {}
|
||||||
DiscreteBayesTreeClique(const boost::shared_ptr<DiscreteConditional>& conditional) : Base(conditional) {}
|
DiscreteBayesTreeClique(
|
||||||
};
|
const boost::shared_ptr<DiscreteConditional>& conditional)
|
||||||
|
: Base(conditional) {}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/// print index signature only
|
||||||
/** A Bayes tree representing a Discrete density */
|
void printSignature(
|
||||||
class GTSAM_EXPORT DiscreteBayesTree :
|
const std::string& s = "Clique: ",
|
||||||
public BayesTree<DiscreteBayesTreeClique>
|
const KeyFormatter& formatter = DefaultKeyFormatter) const {
|
||||||
{
|
conditional_->printSignature(s, formatter);
|
||||||
private:
|
}
|
||||||
typedef BayesTree<DiscreteBayesTreeClique> Base;
|
|
||||||
|
|
||||||
public:
|
//** evaluate conditional probability of subtree for given Values */
|
||||||
typedef DiscreteBayesTree This;
|
double evaluate(const DiscreteConditional::Values& values) const;
|
||||||
typedef boost::shared_ptr<This> shared_ptr;
|
};
|
||||||
|
|
||||||
/** Default constructor, creates an empty Bayes tree */
|
/* ************************************************************************* */
|
||||||
DiscreteBayesTree() {}
|
/** A Bayes tree representing a Discrete density */
|
||||||
|
class GTSAM_EXPORT DiscreteBayesTree
|
||||||
|
: public BayesTree<DiscreteBayesTreeClique> {
|
||||||
|
private:
|
||||||
|
typedef BayesTree<DiscreteBayesTreeClique> Base;
|
||||||
|
|
||||||
/** Check equality */
|
public:
|
||||||
bool equals(const This& other, double tol = 1e-9) const;
|
typedef DiscreteBayesTree This;
|
||||||
};
|
typedef boost::shared_ptr<This> shared_ptr;
|
||||||
|
|
||||||
}
|
/** Default constructor, creates an empty Bayes tree */
|
||||||
|
DiscreteBayesTree() {}
|
||||||
|
|
||||||
|
/** Check equality */
|
||||||
|
bool equals(const This& other, double tol = 1e-9) const;
|
||||||
|
|
||||||
|
//** evaluate probability for given Values */
|
||||||
|
double evaluate(const DiscreteConditional::Values& values) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace gtsam
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <random>
|
#include <random>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
@ -61,16 +62,26 @@ DiscreteConditional::DiscreteConditional(const DecisionTreeFactor& joint,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
DiscreteConditional::DiscreteConditional(const Signature& signature) :
|
DiscreteConditional::DiscreteConditional(const Signature& signature)
|
||||||
BaseFactor(signature.discreteKeysParentsFirst(), signature.cpt()), BaseConditional(
|
: BaseFactor(signature.discreteKeys(), signature.cpt()),
|
||||||
1) {
|
BaseConditional(1) {}
|
||||||
}
|
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
void DiscreteConditional::print(const std::string& s,
|
void DiscreteConditional::print(const string& s,
|
||||||
const KeyFormatter& formatter) const {
|
const KeyFormatter& formatter) const {
|
||||||
std::cout << s << std::endl;
|
cout << s << " P( ";
|
||||||
Potentials::print(s);
|
for (const_iterator it = beginFrontals(); it != endFrontals(); ++it) {
|
||||||
|
cout << formatter(*it) << " ";
|
||||||
|
}
|
||||||
|
if (nrParents()) {
|
||||||
|
cout << "| ";
|
||||||
|
for (const_iterator it = beginParents(); it != endParents(); ++it) {
|
||||||
|
cout << formatter(*it) << " ";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cout << ")";
|
||||||
|
Potentials::print("");
|
||||||
|
cout << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
|
@ -173,55 +184,28 @@ size_t DiscreteConditional::solve(const Values& parentsValues) const {
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
size_t DiscreteConditional::sample(const Values& parentsValues) const {
|
size_t DiscreteConditional::sample(const Values& parentsValues) const {
|
||||||
|
static mt19937 rng(2); // random number generator
|
||||||
static mt19937 rng(2); // random number generator
|
|
||||||
|
|
||||||
bool debug = ISDEBUG("DiscreteConditional::sample");
|
|
||||||
|
|
||||||
// Get the correct conditional density
|
// Get the correct conditional density
|
||||||
ADT pFS = choose(parentsValues); // P(F|S=parentsValues)
|
ADT pFS = choose(parentsValues); // P(F|S=parentsValues)
|
||||||
if (debug)
|
|
||||||
GTSAM_PRINT(pFS);
|
|
||||||
|
|
||||||
// get cumulative distribution function (cdf)
|
// TODO(Duy): only works for one key now, seems horribly slow this way
|
||||||
// TODO, only works for one key now, seems horribly slow this way
|
|
||||||
assert(nrFrontals() == 1);
|
assert(nrFrontals() == 1);
|
||||||
Key j = (firstFrontalKey());
|
Key key = firstFrontalKey();
|
||||||
size_t nj = cardinality(j);
|
size_t nj = cardinality(key);
|
||||||
vector<double> cdf(nj);
|
vector<double> p(nj);
|
||||||
Values frontals;
|
Values frontals;
|
||||||
double sum = 0;
|
|
||||||
for (size_t value = 0; value < nj; value++) {
|
for (size_t value = 0; value < nj; value++) {
|
||||||
frontals[j] = value;
|
frontals[key] = value;
|
||||||
double pValueS = pFS(frontals); // P(F=value|S=parentsValues)
|
p[value] = pFS(frontals); // P(F=value|S=parentsValues)
|
||||||
sum += pValueS; // accumulate
|
if (p[value] == 1.0) {
|
||||||
if (debug)
|
return value; // shortcut exit
|
||||||
cout << sum << " ";
|
|
||||||
if (pValueS == 1) {
|
|
||||||
if (debug)
|
|
||||||
cout << "--> " << value << endl;
|
|
||||||
return value; // shortcut exit
|
|
||||||
}
|
}
|
||||||
cdf[value] = sum;
|
|
||||||
}
|
}
|
||||||
|
std::discrete_distribution<size_t> distribution(p.begin(), p.end());
|
||||||
// inspired by http://www.boost.org/doc/libs/1_46_1/doc/html/boost_random/tutorial.html
|
return distribution(rng);
|
||||||
uniform_real_distribution<double> dist(0, cdf.back());
|
|
||||||
size_t sampled = lower_bound(cdf.begin(), cdf.end(), dist(rng)) - cdf.begin();
|
|
||||||
if (debug)
|
|
||||||
cout << "-> " << sampled << endl;
|
|
||||||
|
|
||||||
return sampled;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
|
||||||
//void DiscreteConditional::permuteWithInverse(
|
|
||||||
// const Permutation& inversePermutation) {
|
|
||||||
// IndexConditionalOrdered::permuteWithInverse(inversePermutation);
|
|
||||||
// Potentials::permuteWithInverse(inversePermutation);
|
|
||||||
//}
|
|
||||||
/* ******************************************************************************** */
|
/* ******************************************************************************** */
|
||||||
|
|
||||||
}// namespace
|
}// namespace
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
#include <boost/shared_ptr.hpp>
|
#include <boost/shared_ptr.hpp>
|
||||||
#include <boost/make_shared.hpp>
|
#include <boost/make_shared.hpp>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -92,6 +94,13 @@ public:
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
/// print index signature only
|
||||||
|
void printSignature(
|
||||||
|
const std::string& s = "Discrete Conditional: ",
|
||||||
|
const KeyFormatter& formatter = DefaultKeyFormatter) const {
|
||||||
|
static_cast<const BaseConditional*>(this)->print(s, formatter);
|
||||||
|
}
|
||||||
|
|
||||||
/// Evaluate, just look up in AlgebraicDecisonTree
|
/// Evaluate, just look up in AlgebraicDecisonTree
|
||||||
virtual double operator()(const Values& values) const {
|
virtual double operator()(const Values& values) const {
|
||||||
return Potentials::operator()(values);
|
return Potentials::operator()(values);
|
||||||
|
|
|
@ -15,50 +15,52 @@
|
||||||
* @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <gtsam/discrete/Potentials.h>
|
|
||||||
#include <gtsam/discrete/DecisionTree-inl.h>
|
#include <gtsam/discrete/DecisionTree-inl.h>
|
||||||
|
#include <gtsam/discrete/Potentials.h>
|
||||||
|
|
||||||
#include <boost/format.hpp>
|
#include <boost/format.hpp>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
// explicit instantiation
|
// explicit instantiation
|
||||||
template class DecisionTree<Key, double> ;
|
template class DecisionTree<Key, double>;
|
||||||
template class AlgebraicDecisionTree<Key> ;
|
template class AlgebraicDecisionTree<Key>;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Potentials::safe_div(const double& a, const double& b) {
|
double Potentials::safe_div(const double& a, const double& b) {
|
||||||
// cout << boost::format("%g / %g = %g\n") % a % b % ((a == 0) ? 0 : (a / b));
|
// cout << boost::format("%g / %g = %g\n") % a % b % ((a == 0) ? 0 : (a / b));
|
||||||
// The use for safe_div is when we divide the product factor by the sum factor.
|
// The use for safe_div is when we divide the product factor by the sum
|
||||||
// If the product or sum is zero, we accord zero probability to the event.
|
// factor. If the product or sum is zero, we accord zero probability to the
|
||||||
return (a == 0 || b == 0) ? 0 : (a / b);
|
// event.
|
||||||
}
|
return (a == 0 || b == 0) ? 0 : (a / b);
|
||||||
|
}
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ********************************************************************************
|
||||||
Potentials::Potentials() :
|
*/
|
||||||
ADT(1.0) {
|
Potentials::Potentials() : ADT(1.0) {}
|
||||||
}
|
|
||||||
|
|
||||||
/* ******************************************************************************** */
|
/* ********************************************************************************
|
||||||
Potentials::Potentials(const DiscreteKeys& keys, const ADT& decisionTree) :
|
*/
|
||||||
ADT(decisionTree), cardinalities_(keys.cardinalities()) {
|
Potentials::Potentials(const DiscreteKeys& keys, const ADT& decisionTree)
|
||||||
}
|
: ADT(decisionTree), cardinalities_(keys.cardinalities()) {}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
bool Potentials::equals(const Potentials& other, double tol) const {
|
bool Potentials::equals(const Potentials& other, double tol) const {
|
||||||
return ADT::equals(other, tol);
|
return ADT::equals(other, tol);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void Potentials::print(const string& s,
|
void Potentials::print(const string& s, const KeyFormatter& formatter) const {
|
||||||
const KeyFormatter& formatter) const {
|
cout << s << "\n Cardinalities: {";
|
||||||
cout << s << "\n Cardinalities: ";
|
for (const DiscreteKey& key : cardinalities_)
|
||||||
for(const DiscreteKey& key: cardinalities_)
|
cout << formatter(key.first) << ":" << key.second << ", ";
|
||||||
cout << formatter(key.first) << "=" << formatter(key.second) << " ";
|
cout << "}" << endl;
|
||||||
cout << endl;
|
ADT::print(" ");
|
||||||
ADT::print(" ");
|
}
|
||||||
}
|
|
||||||
//
|
//
|
||||||
// /* ************************************************************************* */
|
// /* ************************************************************************* */
|
||||||
// template<class P>
|
// template<class P>
|
||||||
|
@ -95,4 +97,4 @@ namespace gtsam {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
} // namespace gtsam
|
} // namespace gtsam
|
||||||
|
|
|
@ -122,28 +122,30 @@ namespace gtsam {
|
||||||
key_(key) {
|
key_(key) {
|
||||||
}
|
}
|
||||||
|
|
||||||
DiscreteKeys Signature::discreteKeysParentsFirst() const {
|
DiscreteKeys Signature::discreteKeys() const {
|
||||||
DiscreteKeys keys;
|
DiscreteKeys keys;
|
||||||
for(const DiscreteKey& key: parents_)
|
|
||||||
keys.push_back(key);
|
|
||||||
keys.push_back(key_);
|
keys.push_back(key_);
|
||||||
|
for (const DiscreteKey& key : parents_) keys.push_back(key);
|
||||||
return keys;
|
return keys;
|
||||||
}
|
}
|
||||||
|
|
||||||
KeyVector Signature::indices() const {
|
KeyVector Signature::indices() const {
|
||||||
KeyVector js;
|
KeyVector js;
|
||||||
js.push_back(key_.first);
|
js.push_back(key_.first);
|
||||||
for(const DiscreteKey& key: parents_)
|
for (const DiscreteKey& key : parents_) js.push_back(key.first);
|
||||||
js.push_back(key.first);
|
|
||||||
return js;
|
return js;
|
||||||
}
|
}
|
||||||
|
|
||||||
vector<double> Signature::cpt() const {
|
vector<double> Signature::cpt() const {
|
||||||
vector<double> cpt;
|
vector<double> cpt;
|
||||||
if (table_) {
|
if (table_) {
|
||||||
for(const Row& row: *table_)
|
const size_t nrStates = table_->at(0).size();
|
||||||
for(const double& x: row)
|
for (size_t j = 0; j < nrStates; j++) {
|
||||||
cpt.push_back(x);
|
for (const Row& row : *table_) {
|
||||||
|
assert(row.size() == nrStates);
|
||||||
|
cpt.push_back(row[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return cpt;
|
return cpt;
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,8 +86,8 @@ namespace gtsam {
|
||||||
return parents_;
|
return parents_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** All keys, with variable key last */
|
/** All keys, with variable key first */
|
||||||
DiscreteKeys discreteKeysParentsFirst() const;
|
DiscreteKeys discreteKeys() const;
|
||||||
|
|
||||||
/** All key indices, with variable key first */
|
/** All key indices, with variable key first */
|
||||||
KeyVector indices() const;
|
KeyVector indices() const;
|
||||||
|
|
|
@ -132,7 +132,7 @@ TEST(ADT, example3)
|
||||||
|
|
||||||
/** Convert Signature into CPT */
|
/** Convert Signature into CPT */
|
||||||
ADT create(const Signature& signature) {
|
ADT create(const Signature& signature) {
|
||||||
ADT p(signature.discreteKeysParentsFirst(), signature.cpt());
|
ADT p(signature.discreteKeys(), signature.cpt());
|
||||||
static size_t count = 0;
|
static size_t count = 0;
|
||||||
const DiscreteKey& key = signature.key();
|
const DiscreteKey& key = signature.key();
|
||||||
string dotfile = (boost::format("CPT-%03d-%d") % ++count % key.first).str();
|
string dotfile = (boost::format("CPT-%03d-%d") % ++count % key.first).str();
|
||||||
|
@ -181,19 +181,20 @@ TEST(ADT, joint)
|
||||||
dot(joint, "Asia-ASTLBEX");
|
dot(joint, "Asia-ASTLBEX");
|
||||||
joint = apply(joint, pD, &mul);
|
joint = apply(joint, pD, &mul);
|
||||||
dot(joint, "Asia-ASTLBEXD");
|
dot(joint, "Asia-ASTLBEXD");
|
||||||
EXPECT_LONGS_EQUAL(346, (long)muls);
|
EXPECT_LONGS_EQUAL(346, muls);
|
||||||
gttoc_(asiaJoint);
|
gttoc_(asiaJoint);
|
||||||
tictoc_getNode(asiaJointNode, asiaJoint);
|
tictoc_getNode(asiaJointNode, asiaJoint);
|
||||||
elapsed = asiaJointNode->secs() + asiaJointNode->wall();
|
elapsed = asiaJointNode->secs() + asiaJointNode->wall();
|
||||||
tictoc_reset_();
|
tictoc_reset_();
|
||||||
printCounts("Asia joint");
|
printCounts("Asia joint");
|
||||||
|
|
||||||
|
// Form P(A,S,T,L) = P(A) P(S) P(T|A) P(L|S)
|
||||||
ADT pASTL = pA;
|
ADT pASTL = pA;
|
||||||
pASTL = apply(pASTL, pS, &mul);
|
pASTL = apply(pASTL, pS, &mul);
|
||||||
pASTL = apply(pASTL, pT, &mul);
|
pASTL = apply(pASTL, pT, &mul);
|
||||||
pASTL = apply(pASTL, pL, &mul);
|
pASTL = apply(pASTL, pL, &mul);
|
||||||
|
|
||||||
// test combine
|
// test combine to check that P(A) = \sum_{S,T,L} P(A,S,T,L)
|
||||||
ADT fAa = pASTL.combine(L, &add_).combine(T, &add_).combine(S, &add_);
|
ADT fAa = pASTL.combine(L, &add_).combine(T, &add_).combine(S, &add_);
|
||||||
EXPECT(assert_equal(pA, fAa));
|
EXPECT(assert_equal(pA, fAa));
|
||||||
ADT fAb = pASTL.combine(S, &add_).combine(T, &add_).combine(L, &add_);
|
ADT fAb = pASTL.combine(S, &add_).combine(T, &add_).combine(L, &add_);
|
||||||
|
|
|
@ -18,110 +18,135 @@
|
||||||
|
|
||||||
#include <gtsam/discrete/DiscreteBayesNet.h>
|
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||||
#include <gtsam/base/debug.h>
|
#include <gtsam/base/debug.h>
|
||||||
|
#include <gtsam/base/Testable.h>
|
||||||
|
#include <gtsam/base/Vector.h>
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
#include <boost/assign/std/map.hpp>
|
|
||||||
#include <boost/assign/list_inserter.hpp>
|
#include <boost/assign/list_inserter.hpp>
|
||||||
|
#include <boost/assign/std/map.hpp>
|
||||||
|
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(DiscreteBayesNet, Asia)
|
TEST(DiscreteBayesNet, bayesNet) {
|
||||||
{
|
DiscreteBayesNet bayesNet;
|
||||||
|
DiscreteKey Parent(0, 2), Child(1, 2);
|
||||||
|
|
||||||
|
auto prior = boost::make_shared<DiscreteConditional>(Parent % "6/4");
|
||||||
|
CHECK(assert_equal(Potentials::ADT({Parent}, "0.6 0.4"),
|
||||||
|
(Potentials::ADT)*prior));
|
||||||
|
bayesNet.push_back(prior);
|
||||||
|
|
||||||
|
auto conditional =
|
||||||
|
boost::make_shared<DiscreteConditional>(Child | Parent = "7/3 8/2");
|
||||||
|
EXPECT_LONGS_EQUAL(1, *(conditional->beginFrontals()));
|
||||||
|
Potentials::ADT expected(Child & Parent, "0.7 0.8 0.3 0.2");
|
||||||
|
CHECK(assert_equal(expected, (Potentials::ADT)*conditional));
|
||||||
|
bayesNet.push_back(conditional);
|
||||||
|
|
||||||
|
DiscreteFactorGraph fg(bayesNet);
|
||||||
|
LONGS_EQUAL(2, fg.back()->size());
|
||||||
|
|
||||||
|
// Check the marginals
|
||||||
|
const double expectedMarginal[2]{0.4, 0.6 * 0.3 + 0.4 * 0.2};
|
||||||
|
DiscreteMarginals marginals(fg);
|
||||||
|
for (size_t j = 0; j < 2; j++) {
|
||||||
|
Vector FT = marginals.marginalProbabilities(DiscreteKey(j, 2));
|
||||||
|
EXPECT_DOUBLES_EQUAL(expectedMarginal[j], FT[1], 1e-3);
|
||||||
|
EXPECT_DOUBLES_EQUAL(FT[0], 1.0 - FT[1], 1e-9);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(DiscreteBayesNet, Asia) {
|
||||||
DiscreteBayesNet asia;
|
DiscreteBayesNet asia;
|
||||||
// DiscreteKey A("Asia"), S("Smoking"), T("Tuberculosis"), L("LungCancer"), B(
|
DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2),
|
||||||
// "Bronchitis"), E("Either"), X("XRay"), D("Dyspnoea");
|
Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
|
||||||
DiscreteKey A(0,2), S(4,2), T(3,2), L(6,2), B(7,2), E(5,2), X(2,2), D(1,2);
|
|
||||||
|
|
||||||
// TODO: make a version that doesn't use the parser
|
asia.add(Asia % "99/1");
|
||||||
asia.add(A % "99/1");
|
asia.add(Smoking % "50/50");
|
||||||
asia.add(S % "50/50");
|
|
||||||
|
|
||||||
asia.add(T | A = "99/1 95/5");
|
asia.add(Tuberculosis | Asia = "99/1 95/5");
|
||||||
asia.add(L | S = "99/1 90/10");
|
asia.add(LungCancer | Smoking = "99/1 90/10");
|
||||||
asia.add(B | S = "70/30 40/60");
|
asia.add(Bronchitis | Smoking = "70/30 40/60");
|
||||||
|
|
||||||
asia.add((E | T, L) = "F T T T");
|
asia.add((Either | Tuberculosis, LungCancer) = "F T T T");
|
||||||
|
|
||||||
asia.add(X | E = "95/5 2/98");
|
asia.add(XRay | Either = "95/5 2/98");
|
||||||
// next lines are same as asia.add((D | E, B) = "9/1 2/8 3/7 1/9");
|
asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9");
|
||||||
DiscreteConditional::shared_ptr actual =
|
|
||||||
boost::make_shared<DiscreteConditional>((D | E, B) = "9/1 2/8 3/7 1/9");
|
|
||||||
asia.push_back(actual);
|
|
||||||
// GTSAM_PRINT(asia);
|
|
||||||
|
|
||||||
// Convert to factor graph
|
// Convert to factor graph
|
||||||
DiscreteFactorGraph fg(asia);
|
DiscreteFactorGraph fg(asia);
|
||||||
// GTSAM_PRINT(fg);
|
LONGS_EQUAL(3, fg.back()->size());
|
||||||
LONGS_EQUAL(3,fg.back()->size());
|
|
||||||
Potentials::ADT expected(B & D & E, "0.9 0.3 0.1 0.7 0.2 0.1 0.8 0.9");
|
// Check the marginals we know (of the parent-less nodes)
|
||||||
CHECK(assert_equal(expected,(Potentials::ADT)*actual));
|
DiscreteMarginals marginals(fg);
|
||||||
|
Vector2 va(0.99, 0.01), vs(0.5, 0.5);
|
||||||
|
EXPECT(assert_equal(va, marginals.marginalProbabilities(Asia)));
|
||||||
|
EXPECT(assert_equal(vs, marginals.marginalProbabilities(Smoking)));
|
||||||
|
|
||||||
// Create solver and eliminate
|
// Create solver and eliminate
|
||||||
Ordering ordering;
|
Ordering ordering;
|
||||||
ordering += Key(0),Key(1),Key(2),Key(3),Key(4),Key(5),Key(6),Key(7);
|
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
|
||||||
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||||
// GTSAM_PRINT(*chordal);
|
DiscreteConditional expected2(Bronchitis % "11/9");
|
||||||
DiscreteConditional expected2(B % "11/9");
|
EXPECT(assert_equal(expected2, *chordal->back()));
|
||||||
CHECK(assert_equal(expected2,*chordal->back()));
|
|
||||||
|
|
||||||
// solve
|
// solve
|
||||||
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
||||||
DiscreteFactor::Values expectedMPE;
|
DiscreteFactor::Values expectedMPE;
|
||||||
insert(expectedMPE)(A.first, 0)(D.first, 0)(X.first, 0)(T.first, 0)(S.first,
|
insert(expectedMPE)(Asia.first, 0)(Dyspnea.first, 0)(XRay.first, 0)(
|
||||||
0)(E.first, 0)(L.first, 0)(B.first, 0);
|
Tuberculosis.first, 0)(Smoking.first, 0)(Either.first, 0)(
|
||||||
|
LungCancer.first, 0)(Bronchitis.first, 0);
|
||||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||||
|
|
||||||
// add evidence, we were in Asia and we have Dispnoea
|
// add evidence, we were in Asia and we have dyspnea
|
||||||
fg.add(A, "0 1");
|
fg.add(Asia, "0 1");
|
||||||
fg.add(D, "0 1");
|
fg.add(Dyspnea, "0 1");
|
||||||
// fg.product().dot("fg");
|
|
||||||
|
|
||||||
// solve again, now with evidence
|
// solve again, now with evidence
|
||||||
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||||
// GTSAM_PRINT(*chordal2);
|
|
||||||
DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize();
|
DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize();
|
||||||
DiscreteFactor::Values expectedMPE2;
|
DiscreteFactor::Values expectedMPE2;
|
||||||
insert(expectedMPE2)(A.first, 1)(D.first, 1)(X.first, 0)(T.first, 0)(S.first,
|
insert(expectedMPE2)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 0)(
|
||||||
1)(E.first, 0)(L.first, 0)(B.first, 1);
|
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 0)(
|
||||||
|
LungCancer.first, 0)(Bronchitis.first, 1);
|
||||||
EXPECT(assert_equal(expectedMPE2, *actualMPE2));
|
EXPECT(assert_equal(expectedMPE2, *actualMPE2));
|
||||||
|
|
||||||
// now sample from it
|
// now sample from it
|
||||||
DiscreteFactor::Values expectedSample;
|
DiscreteFactor::Values expectedSample;
|
||||||
SETDEBUG("DiscreteConditional::sample", false);
|
SETDEBUG("DiscreteConditional::sample", false);
|
||||||
insert(expectedSample)(A.first, 1)(D.first, 1)(X.first, 1)(T.first, 0)(
|
insert(expectedSample)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 1)(
|
||||||
S.first, 1)(E.first, 1)(L.first, 1)(B.first, 0);
|
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 1)(
|
||||||
|
LungCancer.first, 1)(Bronchitis.first, 0);
|
||||||
DiscreteFactor::sharedValues actualSample = chordal2->sample();
|
DiscreteFactor::sharedValues actualSample = chordal2->sample();
|
||||||
EXPECT(assert_equal(expectedSample, *actualSample));
|
EXPECT(assert_equal(expectedSample, *actualSample));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST_UNSAFE(DiscreteBayesNet, Sugar)
|
TEST_UNSAFE(DiscreteBayesNet, Sugar) {
|
||||||
{
|
DiscreteKey T(0, 2), L(1, 2), E(2, 2), C(8, 3), S(7, 2);
|
||||||
DiscreteKey T(0,2), L(1,2), E(2,2), D(3,2), C(8,3), S(7,2);
|
|
||||||
|
|
||||||
DiscreteBayesNet bn;
|
DiscreteBayesNet bn;
|
||||||
|
|
||||||
// test some mistakes
|
|
||||||
// add(bn, D);
|
|
||||||
// add(bn, D | E);
|
|
||||||
// add(bn, D | E = "blah");
|
|
||||||
|
|
||||||
// try logic
|
// try logic
|
||||||
bn.add((E | T, L) = "OR");
|
bn.add((E | T, L) = "OR");
|
||||||
bn.add((E | T, L) = "AND");
|
bn.add((E | T, L) = "AND");
|
||||||
|
|
||||||
// // try multivalued
|
// try multivalued
|
||||||
bn.add(C % "1/1/2");
|
bn.add(C % "1/1/2");
|
||||||
bn.add(C | S = "1/1/2 5/2/3");
|
bn.add(C | S = "1/1/2 5/2/3");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -130,4 +155,3 @@ int main() {
|
||||||
return TestRegistry::runAllTests(tr);
|
return TestRegistry::runAllTests(tr);
|
||||||
}
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
|
@ -1,261 +1,228 @@
|
||||||
///* ----------------------------------------------------------------------------
|
/* ----------------------------------------------------------------------------
|
||||||
//
|
|
||||||
// * GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||||
// * Atlanta, Georgia 30332-0415
|
* Atlanta, Georgia 30332-0415
|
||||||
// * All Rights Reserved
|
* All Rights Reserved
|
||||||
// * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
//
|
|
||||||
// * See LICENSE for the license information
|
* See LICENSE for the license information
|
||||||
//
|
|
||||||
// * -------------------------------------------------------------------------- */
|
* -------------------------------------------------------------------------- */
|
||||||
//
|
|
||||||
///*
|
/*
|
||||||
// * @file testDiscreteBayesTree.cpp
|
* @file testDiscreteBayesTree.cpp
|
||||||
// * @date sept 15, 2012
|
* @date sept 15, 2012
|
||||||
// * @author Frank Dellaert
|
* @author Frank Dellaert
|
||||||
// */
|
*/
|
||||||
//
|
|
||||||
//#include <gtsam/discrete/DiscreteBayesNet.h>
|
#include <gtsam/base/Vector.h>
|
||||||
//#include <gtsam/discrete/DiscreteBayesTree.h>
|
#include <gtsam/discrete/DiscreteBayesNet.h>
|
||||||
//#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteBayesTree.h>
|
||||||
//
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
//#include <boost/assign/std/vector.hpp>
|
#include <gtsam/inference/BayesNet.h>
|
||||||
//using namespace boost::assign;
|
|
||||||
//
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
//
|
|
||||||
//using namespace std;
|
#include <vector>
|
||||||
//using namespace gtsam;
|
|
||||||
//
|
using namespace std;
|
||||||
//static bool debug = false;
|
using namespace gtsam;
|
||||||
//
|
|
||||||
///**
|
static bool debug = false;
|
||||||
// * Custom clique class to debug shortcuts
|
|
||||||
// */
|
/* ************************************************************************* */
|
||||||
////class Clique: public BayesTreeCliqueBaseOrdered<Clique, DiscreteConditional> {
|
|
||||||
////
|
TEST_UNSAFE(DiscreteBayesTree, ThinTree) {
|
||||||
////protected:
|
const int nrNodes = 15;
|
||||||
////
|
const size_t nrStates = 2;
|
||||||
////public:
|
|
||||||
////
|
// define variables
|
||||||
//// typedef BayesTreeCliqueBaseOrdered<Clique, DiscreteConditional> Base;
|
vector<DiscreteKey> key;
|
||||||
//// typedef boost::shared_ptr<Clique> shared_ptr;
|
for (int i = 0; i < nrNodes; i++) {
|
||||||
////
|
DiscreteKey key_i(i, nrStates);
|
||||||
//// // Constructors
|
key.push_back(key_i);
|
||||||
//// Clique() {
|
}
|
||||||
//// }
|
|
||||||
//// Clique(const DiscreteConditional::shared_ptr& conditional) :
|
// create a thin-tree Bayesnet, a la Jean-Guillaume
|
||||||
//// Base(conditional) {
|
DiscreteBayesNet bayesNet;
|
||||||
//// }
|
bayesNet.add(key[14] % "1/3");
|
||||||
//// Clique(
|
|
||||||
//// const std::pair<DiscreteConditional::shared_ptr,
|
bayesNet.add(key[13] | key[14] = "1/3 3/1");
|
||||||
//// DiscreteConditional::FactorType::shared_ptr>& result) :
|
bayesNet.add(key[12] | key[14] = "3/1 3/1");
|
||||||
//// Base(result) {
|
|
||||||
//// }
|
bayesNet.add((key[11] | key[13], key[14]) = "1/4 2/3 3/2 4/1");
|
||||||
////
|
bayesNet.add((key[10] | key[13], key[14]) = "1/4 3/2 2/3 4/1");
|
||||||
//// /// print index signature only
|
bayesNet.add((key[9] | key[12], key[14]) = "4/1 2/3 F 1/4");
|
||||||
//// void printSignature(const std::string& s = "Clique: ",
|
bayesNet.add((key[8] | key[12], key[14]) = "T 1/4 3/2 4/1");
|
||||||
//// const KeyFormatter& indexFormatter = DefaultKeyFormatter) const {
|
|
||||||
//// ((IndexConditionalOrdered::shared_ptr) conditional_)->print(s, indexFormatter);
|
bayesNet.add((key[7] | key[11], key[13]) = "1/4 2/3 3/2 4/1");
|
||||||
//// }
|
bayesNet.add((key[6] | key[11], key[13]) = "1/4 3/2 2/3 4/1");
|
||||||
////
|
bayesNet.add((key[5] | key[10], key[13]) = "4/1 2/3 3/2 1/4");
|
||||||
//// /// evaluate value of sub-tree
|
bayesNet.add((key[4] | key[10], key[13]) = "2/3 1/4 3/2 4/1");
|
||||||
//// double evaluate(const DiscreteConditional::Values & values) {
|
|
||||||
//// double result = (*(this->conditional_))(values);
|
bayesNet.add((key[3] | key[9], key[12]) = "1/4 2/3 3/2 4/1");
|
||||||
//// // evaluate all children and multiply into result
|
bayesNet.add((key[2] | key[9], key[12]) = "1/4 8/2 2/3 4/1");
|
||||||
//// for(boost::shared_ptr<Clique> c: children_)
|
bayesNet.add((key[1] | key[8], key[12]) = "4/1 2/3 3/2 1/4");
|
||||||
//// result *= c->evaluate(values);
|
bayesNet.add((key[0] | key[8], key[12]) = "2/3 1/4 3/2 4/1");
|
||||||
//// return result;
|
|
||||||
//// }
|
if (debug) {
|
||||||
////
|
GTSAM_PRINT(bayesNet);
|
||||||
////};
|
bayesNet.saveGraph("/tmp/discreteBayesNet.dot");
|
||||||
//
|
}
|
||||||
////typedef BayesTreeOrdered<DiscreteConditional, Clique> DiscreteBayesTree;
|
|
||||||
////
|
// create a BayesTree out of a Bayes net
|
||||||
/////* ************************************************************************* */
|
auto bayesTree = DiscreteFactorGraph(bayesNet).eliminateMultifrontal();
|
||||||
////double evaluate(const DiscreteBayesTree& tree,
|
if (debug) {
|
||||||
//// const DiscreteConditional::Values & values) {
|
GTSAM_PRINT(*bayesTree);
|
||||||
//// return tree.root()->evaluate(values);
|
bayesTree->saveGraph("/tmp/discreteBayesTree.dot");
|
||||||
////}
|
}
|
||||||
//
|
|
||||||
///* ************************************************************************* */
|
// Check frontals and parents
|
||||||
//
|
for (size_t i : {13, 14, 9, 3, 2, 8, 1, 0, 10, 5, 4}) {
|
||||||
//TEST_UNSAFE( DiscreteBayesTree, thinTree ) {
|
auto clique_i = (*bayesTree)[i];
|
||||||
//
|
EXPECT_LONGS_EQUAL(i, *(clique_i->conditional_->beginFrontals()));
|
||||||
// const int nrNodes = 15;
|
}
|
||||||
// const size_t nrStates = 2;
|
|
||||||
//
|
auto R = bayesTree->roots().front();
|
||||||
// // define variables
|
|
||||||
// vector<DiscreteKey> key;
|
// Check whether BN and BT give the same answer on all configurations
|
||||||
// for (int i = 0; i < nrNodes; i++) {
|
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
||||||
// DiscreteKey key_i(i, nrStates);
|
key[0] & key[1] & key[2] & key[3] & key[4] & key[5] & key[6] & key[7] &
|
||||||
// key.push_back(key_i);
|
key[8] & key[9] & key[10] & key[11] & key[12] & key[13] & key[14]);
|
||||||
// }
|
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||||
//
|
DiscreteFactor::Values x = allPosbValues[i];
|
||||||
// // create a thin-tree Bayesnet, a la Jean-Guillaume
|
double expected = bayesNet.evaluate(x);
|
||||||
// DiscreteBayesNet bayesNet;
|
double actual = bayesTree->evaluate(x);
|
||||||
// bayesNet.add(key[14] % "1/3");
|
DOUBLES_EQUAL(expected, actual, 1e-9);
|
||||||
//
|
}
|
||||||
// bayesNet.add(key[13] | key[14] = "1/3 3/1");
|
|
||||||
// bayesNet.add(key[12] | key[14] = "3/1 3/1");
|
// Calculate all some marginals for Values==all1
|
||||||
//
|
Vector marginals = Vector::Zero(15);
|
||||||
// bayesNet.add((key[11] | key[13], key[14]) = "1/4 2/3 3/2 4/1");
|
double joint_12_14 = 0, joint_9_12_14 = 0, joint_8_12_14 = 0, joint_8_12 = 0,
|
||||||
// bayesNet.add((key[10] | key[13], key[14]) = "1/4 3/2 2/3 4/1");
|
joint82 = 0, joint12 = 0, joint24 = 0, joint45 = 0, joint46 = 0,
|
||||||
// bayesNet.add((key[9] | key[12], key[14]) = "4/1 2/3 F 1/4");
|
joint_4_11 = 0, joint_11_13 = 0, joint_11_13_14 = 0,
|
||||||
// bayesNet.add((key[8] | key[12], key[14]) = "T 1/4 3/2 4/1");
|
joint_11_12_13_14 = 0, joint_9_11_12_13 = 0, joint_8_11_12_13 = 0;
|
||||||
//
|
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||||
// bayesNet.add((key[7] | key[11], key[13]) = "1/4 2/3 3/2 4/1");
|
DiscreteFactor::Values x = allPosbValues[i];
|
||||||
// bayesNet.add((key[6] | key[11], key[13]) = "1/4 3/2 2/3 4/1");
|
double px = bayesTree->evaluate(x);
|
||||||
// bayesNet.add((key[5] | key[10], key[13]) = "4/1 2/3 3/2 1/4");
|
for (size_t i = 0; i < 15; i++)
|
||||||
// bayesNet.add((key[4] | key[10], key[13]) = "2/3 1/4 3/2 4/1");
|
if (x[i]) marginals[i] += px;
|
||||||
//
|
if (x[12] && x[14]) {
|
||||||
// bayesNet.add((key[3] | key[9], key[12]) = "1/4 2/3 3/2 4/1");
|
joint_12_14 += px;
|
||||||
// bayesNet.add((key[2] | key[9], key[12]) = "1/4 8/2 2/3 4/1");
|
if (x[9]) joint_9_12_14 += px;
|
||||||
// bayesNet.add((key[1] | key[8], key[12]) = "4/1 2/3 3/2 1/4");
|
if (x[8]) joint_8_12_14 += px;
|
||||||
// bayesNet.add((key[0] | key[8], key[12]) = "2/3 1/4 3/2 4/1");
|
}
|
||||||
//
|
if (x[8] && x[12]) joint_8_12 += px;
|
||||||
//// if (debug) {
|
if (x[2]) {
|
||||||
//// GTSAM_PRINT(bayesNet);
|
if (x[8]) joint82 += px;
|
||||||
//// bayesNet.saveGraph("/tmp/discreteBayesNet.dot");
|
if (x[1]) joint12 += px;
|
||||||
//// }
|
}
|
||||||
//
|
if (x[4]) {
|
||||||
// // create a BayesTree out of a Bayes net
|
if (x[2]) joint24 += px;
|
||||||
// DiscreteBayesTree bayesTree(bayesNet);
|
if (x[5]) joint45 += px;
|
||||||
// if (debug) {
|
if (x[6]) joint46 += px;
|
||||||
// GTSAM_PRINT(bayesTree);
|
if (x[11]) joint_4_11 += px;
|
||||||
// bayesTree.saveGraph("/tmp/discreteBayesTree.dot");
|
}
|
||||||
// }
|
if (x[11] && x[13]) {
|
||||||
//
|
joint_11_13 += px;
|
||||||
// // Check whether BN and BT give the same answer on all configurations
|
if (x[8] && x[12]) joint_8_11_12_13 += px;
|
||||||
// // Also calculate all some marginals
|
if (x[9] && x[12]) joint_9_11_12_13 += px;
|
||||||
// Vector marginals = zero(15);
|
if (x[14]) {
|
||||||
// double joint_12_14 = 0, joint_9_12_14 = 0, joint_8_12_14 = 0, joint_8_12 = 0,
|
joint_11_13_14 += px;
|
||||||
// joint82 = 0, joint12 = 0, joint24 = 0, joint45 = 0, joint46 = 0,
|
if (x[12]) {
|
||||||
// joint_4_11 = 0;
|
joint_11_12_13_14 += px;
|
||||||
// vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
}
|
||||||
// key[0] & key[1] & key[2] & key[3] & key[4] & key[5] & key[6] & key[7]
|
}
|
||||||
// & key[8] & key[9] & key[10] & key[11] & key[12] & key[13] & key[14]);
|
}
|
||||||
// for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
}
|
||||||
// DiscreteFactor::Values x = allPosbValues[i];
|
DiscreteFactor::Values all1 = allPosbValues.back();
|
||||||
// double expected = evaluate(bayesNet, x);
|
|
||||||
// double actual = evaluate(bayesTree, x);
|
// check separator marginal P(S0)
|
||||||
// DOUBLES_EQUAL(expected, actual, 1e-9);
|
auto clique = (*bayesTree)[0];
|
||||||
// // collect marginals
|
DiscreteFactorGraph separatorMarginal0 =
|
||||||
// for (size_t i = 0; i < 15; i++)
|
clique->separatorMarginal(EliminateDiscrete);
|
||||||
// if (x[i])
|
DOUBLES_EQUAL(joint_8_12, separatorMarginal0(all1), 1e-9);
|
||||||
// marginals[i] += actual;
|
|
||||||
// // calculate shortcut 8 and 0
|
// check separator marginal P(S9), should be P(14)
|
||||||
// if (x[12] && x[14])
|
clique = (*bayesTree)[9];
|
||||||
// joint_12_14 += actual;
|
DiscreteFactorGraph separatorMarginal9 =
|
||||||
// if (x[9] && x[12] & x[14])
|
clique->separatorMarginal(EliminateDiscrete);
|
||||||
// joint_9_12_14 += actual;
|
DOUBLES_EQUAL(marginals[14], separatorMarginal9(all1), 1e-9);
|
||||||
// if (x[8] && x[12] & x[14])
|
|
||||||
// joint_8_12_14 += actual;
|
// check separator marginal of root, should be empty
|
||||||
// if (x[8] && x[12])
|
clique = (*bayesTree)[11];
|
||||||
// joint_8_12 += actual;
|
DiscreteFactorGraph separatorMarginal11 =
|
||||||
// if (x[8] && x[2])
|
clique->separatorMarginal(EliminateDiscrete);
|
||||||
// joint82 += actual;
|
LONGS_EQUAL(0, separatorMarginal11.size());
|
||||||
// if (x[1] && x[2])
|
|
||||||
// joint12 += actual;
|
// check shortcut P(S9||R) to root
|
||||||
// if (x[2] && x[4])
|
clique = (*bayesTree)[9];
|
||||||
// joint24 += actual;
|
DiscreteBayesNet shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
// if (x[4] && x[5])
|
LONGS_EQUAL(1, shortcut.size());
|
||||||
// joint45 += actual;
|
DOUBLES_EQUAL(joint_11_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// if (x[4] && x[6])
|
|
||||||
// joint46 += actual;
|
// check shortcut P(S8||R) to root
|
||||||
// if (x[4] && x[11])
|
clique = (*bayesTree)[8];
|
||||||
// joint_4_11 += actual;
|
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
// }
|
DOUBLES_EQUAL(joint_11_12_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// DiscreteFactor::Values all1 = allPosbValues.back();
|
|
||||||
//
|
// check shortcut P(S2||R) to root
|
||||||
// Clique::shared_ptr R = bayesTree.root();
|
clique = (*bayesTree)[2];
|
||||||
//
|
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
// // check separator marginal P(S0)
|
DOUBLES_EQUAL(joint_9_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// Clique::shared_ptr c = bayesTree[0];
|
|
||||||
// DiscreteFactorGraph separatorMarginal0 = c->separatorMarginal(R,
|
// check shortcut P(S0||R) to root
|
||||||
// EliminateDiscrete);
|
clique = (*bayesTree)[0];
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_8_12, separatorMarginal0(all1), 1e-9);
|
shortcut = clique->shortcut(R, EliminateDiscrete);
|
||||||
//
|
DOUBLES_EQUAL(joint_8_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9);
|
||||||
// // check separator marginal P(S9), should be P(14)
|
|
||||||
// c = bayesTree[9];
|
// calculate all shortcuts to root
|
||||||
// DiscreteFactorGraph separatorMarginal9 = c->separatorMarginal(R,
|
DiscreteBayesTree::Nodes cliques = bayesTree->nodes();
|
||||||
// EliminateDiscrete);
|
for (auto clique : cliques) {
|
||||||
// EXPECT_DOUBLES_EQUAL(marginals[14], separatorMarginal9(all1), 1e-9);
|
DiscreteBayesNet shortcut = clique.second->shortcut(R, EliminateDiscrete);
|
||||||
//
|
if (debug) {
|
||||||
// // check separator marginal of root, should be empty
|
clique.second->conditional_->printSignature();
|
||||||
// c = bayesTree[11];
|
shortcut.print("shortcut:");
|
||||||
// DiscreteFactorGraph separatorMarginal11 = c->separatorMarginal(R,
|
}
|
||||||
// EliminateDiscrete);
|
}
|
||||||
// EXPECT_LONGS_EQUAL(0, separatorMarginal11.size());
|
|
||||||
//
|
// Check all marginals
|
||||||
// // check shortcut P(S9||R) to root
|
DiscreteFactor::shared_ptr marginalFactor;
|
||||||
// c = bayesTree[9];
|
for (size_t i = 0; i < 15; i++) {
|
||||||
// DiscreteBayesNet shortcut = c->shortcut(R, EliminateDiscrete);
|
marginalFactor = bayesTree->marginalFactor(i, EliminateDiscrete);
|
||||||
// EXPECT_LONGS_EQUAL(0, shortcut.size());
|
double actual = (*marginalFactor)(all1);
|
||||||
//
|
DOUBLES_EQUAL(marginals[i], actual, 1e-9);
|
||||||
// // check shortcut P(S8||R) to root
|
}
|
||||||
// c = bayesTree[8];
|
|
||||||
// shortcut = c->shortcut(R, EliminateDiscrete);
|
DiscreteBayesNet::shared_ptr actualJoint;
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_12_14/marginals[14], evaluate(shortcut,all1),
|
|
||||||
// 1e-9);
|
// Check joint P(8, 2)
|
||||||
//
|
actualJoint = bayesTree->jointBayesNet(8, 2, EliminateDiscrete);
|
||||||
// // check shortcut P(S2||R) to root
|
DOUBLES_EQUAL(joint82, actualJoint->evaluate(all1), 1e-9);
|
||||||
// c = bayesTree[2];
|
|
||||||
// shortcut = c->shortcut(R, EliminateDiscrete);
|
// Check joint P(1, 2)
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_9_12_14/marginals[14], evaluate(shortcut,all1),
|
actualJoint = bayesTree->jointBayesNet(1, 2, EliminateDiscrete);
|
||||||
// 1e-9);
|
DOUBLES_EQUAL(joint12, actualJoint->evaluate(all1), 1e-9);
|
||||||
//
|
|
||||||
// // check shortcut P(S0||R) to root
|
// Check joint P(2, 4)
|
||||||
// c = bayesTree[0];
|
actualJoint = bayesTree->jointBayesNet(2, 4, EliminateDiscrete);
|
||||||
// shortcut = c->shortcut(R, EliminateDiscrete);
|
DOUBLES_EQUAL(joint24, actualJoint->evaluate(all1), 1e-9);
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_8_12_14/marginals[14], evaluate(shortcut,all1),
|
|
||||||
// 1e-9);
|
// Check joint P(4, 5)
|
||||||
//
|
actualJoint = bayesTree->jointBayesNet(4, 5, EliminateDiscrete);
|
||||||
// // calculate all shortcuts to root
|
DOUBLES_EQUAL(joint45, actualJoint->evaluate(all1), 1e-9);
|
||||||
// DiscreteBayesTree::Nodes cliques = bayesTree.nodes();
|
|
||||||
// for(Clique::shared_ptr c: cliques) {
|
// Check joint P(4, 6)
|
||||||
// DiscreteBayesNet shortcut = c->shortcut(R, EliminateDiscrete);
|
actualJoint = bayesTree->jointBayesNet(4, 6, EliminateDiscrete);
|
||||||
// if (debug) {
|
DOUBLES_EQUAL(joint46, actualJoint->evaluate(all1), 1e-9);
|
||||||
// c->printSignature();
|
|
||||||
// shortcut.print("shortcut:");
|
// Check joint P(4, 11)
|
||||||
// }
|
actualJoint = bayesTree->jointBayesNet(4, 11, EliminateDiscrete);
|
||||||
// }
|
DOUBLES_EQUAL(joint_4_11, actualJoint->evaluate(all1), 1e-9);
|
||||||
//
|
}
|
||||||
// // Check all marginals
|
|
||||||
// DiscreteFactor::shared_ptr marginalFactor;
|
|
||||||
// for (size_t i = 0; i < 15; i++) {
|
|
||||||
// marginalFactor = bayesTree.marginalFactor(i, EliminateDiscrete);
|
|
||||||
// double actual = (*marginalFactor)(all1);
|
|
||||||
// EXPECT_DOUBLES_EQUAL(marginals[i], actual, 1e-9);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// DiscreteBayesNet::shared_ptr actualJoint;
|
|
||||||
//
|
|
||||||
// // Check joint P(8,2) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(8, 2, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint82, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(1,2) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(1, 2, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint12, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(2,4)
|
|
||||||
// actualJoint = bayesTree.jointBayesNet(2, 4, EliminateDiscrete);
|
|
||||||
// EXPECT_DOUBLES_EQUAL(joint24, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(4,5) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(4, 5, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint46, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(4,6) TODO: not disjoint !
|
|
||||||
//// actualJoint = bayesTree.jointBayesNet(4, 6, EliminateDiscrete);
|
|
||||||
//// EXPECT_DOUBLES_EQUAL(joint46, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
// // Check joint P(4,11)
|
|
||||||
// actualJoint = bayesTree.jointBayesNet(4, 11, EliminateDiscrete);
|
|
||||||
// EXPECT_DOUBLES_EQUAL(joint_4_11, evaluate(*actualJoint,all1), 1e-9);
|
|
||||||
//
|
|
||||||
//}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() {
|
int main() {
|
||||||
|
@ -263,4 +230,3 @@ int main() {
|
||||||
return TestRegistry::runAllTests(tr);
|
return TestRegistry::runAllTests(tr);
|
||||||
}
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -16,9 +16,9 @@
|
||||||
* @date Feb 14, 2011
|
* @date Feb 14, 2011
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <boost/make_shared.hpp>
|
|
||||||
#include <boost/assign/std/map.hpp>
|
#include <boost/assign/std/map.hpp>
|
||||||
#include <boost/assign/std/vector.hpp>
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
#include <boost/make_shared.hpp>
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
@ -36,6 +36,11 @@ TEST( DiscreteConditional, constructors)
|
||||||
DiscreteConditional::shared_ptr expected1 = //
|
DiscreteConditional::shared_ptr expected1 = //
|
||||||
boost::make_shared<DiscreteConditional>(X | Y = "1/1 2/3 1/4");
|
boost::make_shared<DiscreteConditional>(X | Y = "1/1 2/3 1/4");
|
||||||
EXPECT(expected1);
|
EXPECT(expected1);
|
||||||
|
EXPECT_LONGS_EQUAL(0, *(expected1->beginFrontals()));
|
||||||
|
EXPECT_LONGS_EQUAL(2, *(expected1->beginParents()));
|
||||||
|
EXPECT(expected1->endParents() == expected1->end());
|
||||||
|
EXPECT(expected1->endFrontals() == expected1->beginParents());
|
||||||
|
|
||||||
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
||||||
DiscreteConditional actual1(1, f1);
|
DiscreteConditional actual1(1, f1);
|
||||||
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
||||||
|
@ -43,71 +48,68 @@ TEST( DiscreteConditional, constructors)
|
||||||
DecisionTreeFactor f2(X & Y & Z,
|
DecisionTreeFactor f2(X & Y & Z,
|
||||||
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
||||||
DiscreteConditional actual2(1, f2);
|
DiscreteConditional actual2(1, f2);
|
||||||
DecisionTreeFactor::shared_ptr actual2factor = actual2.toFactor();
|
EXPECT(assert_equal(f2 / *f2.sum(1), *actual2.toFactor(), 1e-9));
|
||||||
// EXPECT(assert_equal(f2, *actual2factor, 1e-9));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, constructors_alt_interface)
|
TEST(DiscreteConditional, constructors_alt_interface) {
|
||||||
{
|
DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering !
|
||||||
DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering !
|
|
||||||
|
|
||||||
Signature::Table table;
|
Signature::Table table;
|
||||||
Signature::Row r1, r2, r3;
|
Signature::Row r1, r2, r3;
|
||||||
r1 += 1.0, 1.0; r2 += 2.0, 3.0; r3 += 1.0, 4.0;
|
r1 += 1.0, 1.0;
|
||||||
|
r2 += 2.0, 3.0;
|
||||||
|
r3 += 1.0, 4.0;
|
||||||
table += r1, r2, r3;
|
table += r1, r2, r3;
|
||||||
DiscreteConditional::shared_ptr expected1 = //
|
auto actual1 = boost::make_shared<DiscreteConditional>(X | Y = table);
|
||||||
boost::make_shared<DiscreteConditional>(X | Y = table);
|
EXPECT(actual1);
|
||||||
EXPECT(expected1);
|
|
||||||
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8");
|
||||||
DiscreteConditional actual1(1, f1);
|
DiscreteConditional expected1(1, f1);
|
||||||
EXPECT(assert_equal(*expected1, actual1, 1e-9));
|
EXPECT(assert_equal(expected1, *actual1, 1e-9));
|
||||||
|
|
||||||
DecisionTreeFactor f2(X & Y & Z,
|
DecisionTreeFactor f2(
|
||||||
"0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
X & Y & Z, "0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75");
|
||||||
DiscreteConditional actual2(1, f2);
|
DiscreteConditional actual2(1, f2);
|
||||||
DecisionTreeFactor::shared_ptr actual2factor = actual2.toFactor();
|
EXPECT(assert_equal(f2 / *f2.sum(1), *actual2.toFactor(), 1e-9));
|
||||||
// EXPECT(assert_equal(f2, *actual2factor, 1e-9));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, constructors2)
|
TEST(DiscreteConditional, constructors2) {
|
||||||
{
|
|
||||||
// Declare keys and ordering
|
// Declare keys and ordering
|
||||||
DiscreteKey C(0,2), B(1,2);
|
DiscreteKey C(0, 2), B(1, 2);
|
||||||
DecisionTreeFactor expected(C & B, "0.8 0.75 0.2 0.25");
|
DecisionTreeFactor actual(C & B, "0.8 0.75 0.2 0.25");
|
||||||
Signature signature((C | B) = "4/1 3/1");
|
Signature signature((C | B) = "4/1 3/1");
|
||||||
DiscreteConditional actual(signature);
|
DiscreteConditional expected(signature);
|
||||||
DecisionTreeFactor::shared_ptr actualFactor = actual.toFactor();
|
DecisionTreeFactor::shared_ptr expectedFactor = expected.toFactor();
|
||||||
EXPECT(assert_equal(expected, *actualFactor));
|
EXPECT(assert_equal(*expectedFactor, actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, constructors3)
|
TEST(DiscreteConditional, constructors3) {
|
||||||
{
|
|
||||||
// Declare keys and ordering
|
// Declare keys and ordering
|
||||||
DiscreteKey C(0,2), B(1,2), A(2,2);
|
DiscreteKey C(0, 2), B(1, 2), A(2, 2);
|
||||||
DecisionTreeFactor expected(C & B & A, "0.8 0.5 0.5 0.2 0.2 0.5 0.5 0.8");
|
DecisionTreeFactor actual(C & B & A, "0.8 0.5 0.5 0.2 0.2 0.5 0.5 0.8");
|
||||||
Signature signature((C | B, A) = "4/1 1/1 1/1 1/4");
|
Signature signature((C | B, A) = "4/1 1/1 1/1 1/4");
|
||||||
DiscreteConditional actual(signature);
|
DiscreteConditional expected(signature);
|
||||||
DecisionTreeFactor::shared_ptr actualFactor = actual.toFactor();
|
DecisionTreeFactor::shared_ptr expectedFactor = expected.toFactor();
|
||||||
EXPECT(assert_equal(expected, *actualFactor));
|
EXPECT(assert_equal(*expectedFactor, actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( DiscreteConditional, Combine) {
|
TEST(DiscreteConditional, Combine) {
|
||||||
DiscreteKey A(0, 2), B(1, 2);
|
DiscreteKey A(0, 2), B(1, 2);
|
||||||
vector<DiscreteConditional::shared_ptr> c;
|
vector<DiscreteConditional::shared_ptr> c;
|
||||||
c.push_back(boost::make_shared<DiscreteConditional>(A | B = "1/2 2/1"));
|
c.push_back(boost::make_shared<DiscreteConditional>(A | B = "1/2 2/1"));
|
||||||
c.push_back(boost::make_shared<DiscreteConditional>(B % "1/2"));
|
c.push_back(boost::make_shared<DiscreteConditional>(B % "1/2"));
|
||||||
DecisionTreeFactor factor(A & B, "0.111111 0.444444 0.222222 0.222222");
|
DecisionTreeFactor factor(A & B, "0.111111 0.444444 0.222222 0.222222");
|
||||||
DiscreteConditional expected(2, factor);
|
DiscreteConditional actual(2, factor);
|
||||||
DiscreteConditional::shared_ptr actual = DiscreteConditional::Combine(
|
auto expected = DiscreteConditional::Combine(c.begin(), c.end());
|
||||||
c.begin(), c.end());
|
EXPECT(assert_equal(*expected, actual, 1e-5));
|
||||||
EXPECT(assert_equal(expected, *actual,1e-5));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
int main() {
|
||||||
|
TestResult tr;
|
||||||
|
return TestRegistry::runAllTests(tr);
|
||||||
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||||
#include <gtsam/discrete/DiscreteEliminationTree.h>
|
#include <gtsam/discrete/DiscreteEliminationTree.h>
|
||||||
#include <gtsam/discrete/DiscreteBayesTree.h>
|
#include <gtsam/discrete/DiscreteBayesTree.h>
|
||||||
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
|
|
|
@ -146,8 +146,7 @@ TEST_UNSAFE( DiscreteMarginals, truss ) {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// Second truss example with non-trivial factors
|
// Second truss example with non-trivial factors
|
||||||
TEST_UNSAFE( DiscreteMarginals, truss2 ) {
|
TEST_UNSAFE(DiscreteMarginals, truss2) {
|
||||||
|
|
||||||
const int nrNodes = 5;
|
const int nrNodes = 5;
|
||||||
const size_t nrStates = 2;
|
const size_t nrStates = 2;
|
||||||
|
|
||||||
|
@ -160,40 +159,39 @@ TEST_UNSAFE( DiscreteMarginals, truss2 ) {
|
||||||
|
|
||||||
// create graph and add three truss potentials
|
// create graph and add three truss potentials
|
||||||
DiscreteFactorGraph graph;
|
DiscreteFactorGraph graph;
|
||||||
graph.add(key[0] & key[2] & key[4],"1 2 3 4 5 6 7 8");
|
graph.add(key[0] & key[2] & key[4], "1 2 3 4 5 6 7 8");
|
||||||
graph.add(key[1] & key[3] & key[4],"1 2 3 4 5 6 7 8");
|
graph.add(key[1] & key[3] & key[4], "1 2 3 4 5 6 7 8");
|
||||||
graph.add(key[2] & key[3] & key[4],"1 2 3 4 5 6 7 8");
|
graph.add(key[2] & key[3] & key[4], "1 2 3 4 5 6 7 8");
|
||||||
|
|
||||||
// Calculate the marginals by brute force
|
// Calculate the marginals by brute force
|
||||||
vector<DiscreteFactor::Values> allPosbValues = cartesianProduct(
|
vector<DiscreteFactor::Values> allPosbValues =
|
||||||
key[0] & key[1] & key[2] & key[3] & key[4]);
|
cartesianProduct(key[0] & key[1] & key[2] & key[3] & key[4]);
|
||||||
Vector T = Z_5x1, F = Z_5x1;
|
Vector T = Z_5x1, F = Z_5x1;
|
||||||
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
for (size_t i = 0; i < allPosbValues.size(); ++i) {
|
||||||
DiscreteFactor::Values x = allPosbValues[i];
|
DiscreteFactor::Values x = allPosbValues[i];
|
||||||
double px = graph(x);
|
double px = graph(x);
|
||||||
for (size_t j=0;j<5;j++)
|
for (size_t j = 0; j < 5; j++)
|
||||||
if (x[j]) T[j]+=px; else F[j]+=px;
|
if (x[j])
|
||||||
// cout << x[0] << " " << x[1] << " "<< x[2] << " " << x[3] << " " << x[4] << " :\t" << px << endl;
|
T[j] += px;
|
||||||
|
else
|
||||||
|
F[j] += px;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check all marginals given by a sequential solver and Marginals
|
// Check all marginals given by a sequential solver and Marginals
|
||||||
// DiscreteSequentialSolver solver(graph);
|
// DiscreteSequentialSolver solver(graph);
|
||||||
DiscreteMarginals marginals(graph);
|
DiscreteMarginals marginals(graph);
|
||||||
for (size_t j=0;j<5;j++) {
|
for (size_t j = 0; j < 5; j++) {
|
||||||
double sum = T[j]+F[j];
|
double sum = T[j] + F[j];
|
||||||
T[j]/=sum;
|
T[j] /= sum;
|
||||||
F[j]/=sum;
|
F[j] /= sum;
|
||||||
|
|
||||||
// // solver
|
|
||||||
// Vector actualV = solver.marginalProbabilities(key[j]);
|
|
||||||
// EXPECT(assert_equal((Vector(2) << F[j], T[j]), actualV));
|
|
||||||
|
|
||||||
// Marginals
|
// Marginals
|
||||||
vector<double> table;
|
vector<double> table;
|
||||||
table += F[j],T[j];
|
table += F[j], T[j];
|
||||||
DecisionTreeFactor expectedM(key[j],table);
|
DecisionTreeFactor expectedM(key[j], table);
|
||||||
DiscreteFactor::shared_ptr actualM = marginals(j);
|
DiscreteFactor::shared_ptr actualM = marginals(j);
|
||||||
EXPECT(assert_equal(expectedM, *boost::dynamic_pointer_cast<DecisionTreeFactor>(actualM)));
|
EXPECT(assert_equal(
|
||||||
|
expectedM, *boost::dynamic_pointer_cast<DecisionTreeFactor>(actualM)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,36 +11,43 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file testSignature
|
* @file testSignature
|
||||||
* @brief Tests focusing on the details of Signatures to evaluate boost compliance
|
* @brief Tests focusing on the details of Signatures to evaluate boost
|
||||||
|
* compliance
|
||||||
* @author Alex Cunningham
|
* @author Alex Cunningham
|
||||||
* @date Sept 19th 2011
|
* @date Sept 19th 2011
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <boost/assign/std/vector.hpp>
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/base/Testable.h>
|
||||||
#include <gtsam/discrete/Signature.h>
|
#include <gtsam/discrete/Signature.h>
|
||||||
|
|
||||||
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
DiscreteKey X(0,2), Y(1,3), Z(2,2);
|
DiscreteKey X(0, 2), Y(1, 3), Z(2, 2);
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(testSignature, simple_conditional) {
|
TEST(testSignature, simple_conditional) {
|
||||||
Signature sig(X | Y = "1/1 2/3 1/4");
|
Signature sig(X | Y = "1/1 2/3 1/4");
|
||||||
|
Signature::Table table = *sig.table();
|
||||||
|
vector<double> row[3]{{0.5, 0.5}, {0.4, 0.6}, {0.2, 0.8}};
|
||||||
|
CHECK(row[0] == table[0]);
|
||||||
|
CHECK(row[1] == table[1]);
|
||||||
|
CHECK(row[2] == table[2]);
|
||||||
DiscreteKey actKey = sig.key();
|
DiscreteKey actKey = sig.key();
|
||||||
LONGS_EQUAL((long)X.first, (long)actKey.first);
|
LONGS_EQUAL(X.first, actKey.first);
|
||||||
|
|
||||||
DiscreteKeys actKeys = sig.discreteKeysParentsFirst();
|
DiscreteKeys actKeys = sig.discreteKeys();
|
||||||
LONGS_EQUAL(2, (long)actKeys.size());
|
LONGS_EQUAL(2, actKeys.size());
|
||||||
LONGS_EQUAL((long)Y.first, (long)actKeys.front().first);
|
LONGS_EQUAL(X.first, actKeys.front().first);
|
||||||
LONGS_EQUAL((long)X.first, (long)actKeys.back().first);
|
LONGS_EQUAL(Y.first, actKeys.back().first);
|
||||||
|
|
||||||
vector<double> actCpt = sig.cpt();
|
vector<double> actCpt = sig.cpt();
|
||||||
EXPECT_LONGS_EQUAL(6, (long)actCpt.size());
|
EXPECT_LONGS_EQUAL(6, actCpt.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -54,17 +61,20 @@ TEST(testSignature, simple_conditional_nonparser) {
|
||||||
|
|
||||||
Signature sig(X | Y = table);
|
Signature sig(X | Y = table);
|
||||||
DiscreteKey actKey = sig.key();
|
DiscreteKey actKey = sig.key();
|
||||||
EXPECT_LONGS_EQUAL((long)X.first, (long)actKey.first);
|
EXPECT_LONGS_EQUAL(X.first, actKey.first);
|
||||||
|
|
||||||
DiscreteKeys actKeys = sig.discreteKeysParentsFirst();
|
DiscreteKeys actKeys = sig.discreteKeys();
|
||||||
LONGS_EQUAL(2, (long)actKeys.size());
|
LONGS_EQUAL(2, actKeys.size());
|
||||||
LONGS_EQUAL((long)Y.first, (long)actKeys.front().first);
|
LONGS_EQUAL(X.first, actKeys.front().first);
|
||||||
LONGS_EQUAL((long)X.first, (long)actKeys.back().first);
|
LONGS_EQUAL(Y.first, actKeys.back().first);
|
||||||
|
|
||||||
vector<double> actCpt = sig.cpt();
|
vector<double> actCpt = sig.cpt();
|
||||||
EXPECT_LONGS_EQUAL(6, (long)actCpt.size());
|
EXPECT_LONGS_EQUAL(6, actCpt.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
int main() {
|
||||||
|
TestResult tr;
|
||||||
|
return TestRegistry::runAllTests(tr);
|
||||||
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -162,7 +162,7 @@ private:
|
||||||
NeedsToAlign = (sizeof(B) % 16) == 0 || (sizeof(R) % 16) == 0
|
NeedsToAlign = (sizeof(B) % 16) == 0 || (sizeof(R) % 16) == 0
|
||||||
};
|
};
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Declare this to be both Testable and a Manifold
|
// Declare this to be both Testable and a Manifold
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Cal3Fisheye::Cal3Fisheye(const Vector& v)
|
Cal3Fisheye::Cal3Fisheye(const Vector9& v)
|
||||||
: fx_(v[0]),
|
: fx_(v[0]),
|
||||||
fy_(v[1]),
|
fy_(v[1]),
|
||||||
s_(v[2]),
|
s_(v[2]),
|
||||||
|
@ -50,76 +50,73 @@ Matrix3 Cal3Fisheye::K() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
static Matrix29 D2dcalibration(const double xd, const double yd,
|
double Cal3Fisheye::Scaling(double r) {
|
||||||
const double xi, const double yi,
|
static constexpr double threshold = 1e-8;
|
||||||
const double t3, const double t5,
|
if (r > threshold || r < -threshold) {
|
||||||
const double t7, const double t9, const double r,
|
return atan(r) / r;
|
||||||
Matrix2& DK) {
|
} else {
|
||||||
// order: fx, fy, s, u0, v0
|
// Taylor expansion close to 0
|
||||||
Matrix25 DR1;
|
double r2 = r * r, r4 = r2 * r2;
|
||||||
DR1 << xd, 0.0, yd, 1.0, 0.0, 0.0, yd, 0.0, 0.0, 1.0;
|
return 1.0 - r2 / 3 + r4 / 5;
|
||||||
|
}
|
||||||
// order: k1, k2, k3, k4
|
|
||||||
Matrix24 DR2;
|
|
||||||
DR2 << t3 * xi, t5 * xi, t7 * xi, t9 * xi, t3 * yi, t5 * yi, t7 * yi, t9 * yi;
|
|
||||||
DR2 /= r;
|
|
||||||
Matrix29 D;
|
|
||||||
D << DR1, DK * DR2;
|
|
||||||
return D;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
static Matrix2 D2dintrinsic(const double xi, const double yi, const double r,
|
|
||||||
const double td, const double t, const double tt,
|
|
||||||
const double t4, const double t6, const double t8,
|
|
||||||
const double k1, const double k2, const double k3,
|
|
||||||
const double k4, const Matrix2& DK) {
|
|
||||||
const double dr_dxi = xi / sqrt(xi * xi + yi * yi);
|
|
||||||
const double dr_dyi = yi / sqrt(xi * xi + yi * yi);
|
|
||||||
const double dt_dr = 1 / (1 + r * r);
|
|
||||||
const double dtd_dt =
|
|
||||||
1 + 3 * k1 * tt + 5 * k2 * t4 + 7 * k3 * t6 + 9 * k4 * t8;
|
|
||||||
const double dtd_dxi = dtd_dt * dt_dr * dr_dxi;
|
|
||||||
const double dtd_dyi = dtd_dt * dt_dr * dr_dyi;
|
|
||||||
|
|
||||||
const double rinv = 1 / r;
|
|
||||||
const double rrinv = 1 / (r * r);
|
|
||||||
const double dxd_dxi =
|
|
||||||
dtd_dxi * xi * rinv + td * rinv + td * xi * (-rrinv) * dr_dxi;
|
|
||||||
const double dxd_dyi = dtd_dyi * xi * rinv - td * xi * rrinv * dr_dyi;
|
|
||||||
const double dyd_dxi = dtd_dxi * yi * rinv - td * yi * rrinv * dr_dxi;
|
|
||||||
const double dyd_dyi =
|
|
||||||
dtd_dyi * yi * rinv + td * rinv + td * yi * (-rrinv) * dr_dyi;
|
|
||||||
|
|
||||||
Matrix2 DR;
|
|
||||||
DR << dxd_dxi, dxd_dyi, dyd_dxi, dyd_dyi;
|
|
||||||
|
|
||||||
return DK * DR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Point2 Cal3Fisheye::uncalibrate(const Point2& p, OptionalJacobian<2, 9> H1,
|
Point2 Cal3Fisheye::uncalibrate(const Point2& p, OptionalJacobian<2, 9> H1,
|
||||||
OptionalJacobian<2, 2> H2) const {
|
OptionalJacobian<2, 2> H2) const {
|
||||||
const double xi = p.x(), yi = p.y();
|
const double xi = p.x(), yi = p.y();
|
||||||
const double r = sqrt(xi * xi + yi * yi);
|
const double r2 = xi * xi + yi * yi, r = sqrt(r2);
|
||||||
const double t = atan(r);
|
const double t = atan(r);
|
||||||
const double tt = t * t, t4 = tt * tt, t6 = tt * t4, t8 = t4 * t4;
|
const double t2 = t * t, t4 = t2 * t2, t6 = t2 * t4, t8 = t4 * t4;
|
||||||
const double td = t * (1 + k1_ * tt + k2_ * t4 + k3_ * t6 + k4_ * t8);
|
Vector5 K, T;
|
||||||
const double td_o_r = r > 1e-8 ? td / r : 1;
|
K << 1, k1_, k2_, k3_, k4_;
|
||||||
const double xd = td_o_r * xi, yd = td_o_r * yi;
|
T << 1, t2, t4, t6, t8;
|
||||||
|
const double scaling = Scaling(r);
|
||||||
|
const double s = scaling * K.dot(T);
|
||||||
|
const double xd = s * xi, yd = s * yi;
|
||||||
Point2 uv(fx_ * xd + s_ * yd + u0_, fy_ * yd + v0_);
|
Point2 uv(fx_ * xd + s_ * yd + u0_, fy_ * yd + v0_);
|
||||||
|
|
||||||
Matrix2 DK;
|
Matrix2 DK;
|
||||||
if (H1 || H2) DK << fx_, s_, 0.0, fy_;
|
if (H1 || H2) DK << fx_, s_, 0.0, fy_;
|
||||||
|
|
||||||
// Derivative for calibration parameters (2 by 9)
|
// Derivative for calibration parameters (2 by 9)
|
||||||
if (H1)
|
if (H1) {
|
||||||
*H1 = D2dcalibration(xd, yd, xi, yi, t * tt, t * t4, t * t6, t * t8, r, DK);
|
Matrix25 DR1;
|
||||||
|
// order: fx, fy, s, u0, v0
|
||||||
|
DR1 << xd, 0.0, yd, 1.0, 0.0, 0.0, yd, 0.0, 0.0, 1.0;
|
||||||
|
|
||||||
|
// order: k1, k2, k3, k4
|
||||||
|
Matrix24 DR2;
|
||||||
|
auto T4 = T.tail<4>().transpose();
|
||||||
|
DR2 << xi * T4, yi * T4;
|
||||||
|
*H1 << DR1, DK * scaling * DR2;
|
||||||
|
}
|
||||||
|
|
||||||
// Derivative for points in intrinsic coords (2 by 2)
|
// Derivative for points in intrinsic coords (2 by 2)
|
||||||
if (H2)
|
if (H2) {
|
||||||
*H2 =
|
const double dtd_dt =
|
||||||
D2dintrinsic(xi, yi, r, td, t, tt, t4, t6, t8, k1_, k2_, k3_, k4_, DK);
|
1 + 3 * k1_ * t2 + 5 * k2_ * t4 + 7 * k3_ * t6 + 9 * k4_ * t8;
|
||||||
|
const double dt_dr = 1 / (1 + r2);
|
||||||
|
const double rinv = 1 / r;
|
||||||
|
const double dr_dxi = xi * rinv;
|
||||||
|
const double dr_dyi = yi * rinv;
|
||||||
|
const double dtd_dxi = dtd_dt * dt_dr * dr_dxi;
|
||||||
|
const double dtd_dyi = dtd_dt * dt_dr * dr_dyi;
|
||||||
|
|
||||||
|
const double td = t * K.dot(T);
|
||||||
|
const double rrinv = 1 / r2;
|
||||||
|
const double dxd_dxi =
|
||||||
|
dtd_dxi * dr_dxi + td * rinv - td * xi * rrinv * dr_dxi;
|
||||||
|
const double dxd_dyi = dtd_dyi * dr_dxi - td * xi * rrinv * dr_dyi;
|
||||||
|
const double dyd_dxi = dtd_dxi * dr_dyi - td * yi * rrinv * dr_dxi;
|
||||||
|
const double dyd_dyi =
|
||||||
|
dtd_dyi * dr_dyi + td * rinv - td * yi * rrinv * dr_dyi;
|
||||||
|
|
||||||
|
Matrix2 DR;
|
||||||
|
DR << dxd_dxi, dxd_dyi, dyd_dxi, dyd_dyi;
|
||||||
|
|
||||||
|
*H2 = DK * DR;
|
||||||
|
}
|
||||||
|
|
||||||
return uv;
|
return uv;
|
||||||
}
|
}
|
||||||
|
@ -157,39 +154,10 @@ Point2 Cal3Fisheye::calibrate(const Point2& uv, const double tol) const {
|
||||||
return pi;
|
return pi;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
Matrix2 Cal3Fisheye::D2d_intrinsic(const Point2& p) const {
|
|
||||||
const double xi = p.x(), yi = p.y();
|
|
||||||
const double r = sqrt(xi * xi + yi * yi);
|
|
||||||
const double t = atan(r);
|
|
||||||
const double tt = t * t, t4 = tt * tt, t6 = t4 * tt, t8 = t4 * t4;
|
|
||||||
const double td = t * (1 + k1_ * tt + k2_ * t4 + k3_ * t6 + k4_ * t8);
|
|
||||||
|
|
||||||
Matrix2 DK;
|
|
||||||
DK << fx_, s_, 0.0, fy_;
|
|
||||||
|
|
||||||
return D2dintrinsic(xi, yi, r, td, t, tt, t4, t6, t8, k1_, k2_, k3_, k4_, DK);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
Matrix29 Cal3Fisheye::D2d_calibration(const Point2& p) const {
|
|
||||||
const double xi = p.x(), yi = p.y();
|
|
||||||
const double r = sqrt(xi * xi + yi * yi);
|
|
||||||
const double t = atan(r);
|
|
||||||
const double tt = t * t, t4 = tt * tt, t6 = tt * t4, t8 = t4 * t4;
|
|
||||||
const double td = t * (1 + k1_ * tt + k2_ * t4 + k3_ * t6 + k4_ * t8);
|
|
||||||
const double xd = td / r * xi, yd = td / r * yi;
|
|
||||||
|
|
||||||
Matrix2 DK;
|
|
||||||
DK << fx_, s_, 0.0, fy_;
|
|
||||||
return D2dcalibration(xd, yd, xi, yi, t * tt, t * t4, t * t6, t * t8, r, DK);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void Cal3Fisheye::print(const std::string& s_) const {
|
void Cal3Fisheye::print(const std::string& s_) const {
|
||||||
gtsam::print((Matrix)K(), s_ + ".K");
|
gtsam::print((Matrix)K(), s_ + ".K");
|
||||||
gtsam::print(Vector(k()), s_ + ".k");
|
gtsam::print(Vector(k()), s_ + ".k");
|
||||||
;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
|
|
||||||
#include <gtsam/geometry/Point2.h>
|
#include <gtsam/geometry/Point2.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -43,7 +45,7 @@ namespace gtsam {
|
||||||
* [u; v; 1] = K*[x_d; y_d; 1]
|
* [u; v; 1] = K*[x_d; y_d; 1]
|
||||||
*/
|
*/
|
||||||
class GTSAM_EXPORT Cal3Fisheye {
|
class GTSAM_EXPORT Cal3Fisheye {
|
||||||
protected:
|
private:
|
||||||
double fx_, fy_, s_, u0_, v0_; // focal length, skew and principal point
|
double fx_, fy_, s_, u0_, v0_; // focal length, skew and principal point
|
||||||
double k1_, k2_, k3_, k4_; // fisheye distortion coefficients
|
double k1_, k2_, k3_, k4_; // fisheye distortion coefficients
|
||||||
|
|
||||||
|
@ -78,7 +80,7 @@ class GTSAM_EXPORT Cal3Fisheye {
|
||||||
/// @name Advanced Constructors
|
/// @name Advanced Constructors
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
Cal3Fisheye(const Vector& v);
|
explicit Cal3Fisheye(const Vector9& v);
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
|
@ -120,6 +122,9 @@ class GTSAM_EXPORT Cal3Fisheye {
|
||||||
/// Return all parameters as a vector
|
/// Return all parameters as a vector
|
||||||
Vector9 vector() const;
|
Vector9 vector() const;
|
||||||
|
|
||||||
|
/// Helper function that calculates atan(r)/r
|
||||||
|
static double Scaling(double r);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief convert intrinsic coordinates [x_i; y_i] to (distorted) image
|
* @brief convert intrinsic coordinates [x_i; y_i] to (distorted) image
|
||||||
* coordinates [u; v]
|
* coordinates [u; v]
|
||||||
|
@ -136,13 +141,6 @@ class GTSAM_EXPORT Cal3Fisheye {
|
||||||
/// y_i]
|
/// y_i]
|
||||||
Point2 calibrate(const Point2& p, const double tol = 1e-5) const;
|
Point2 calibrate(const Point2& p, const double tol = 1e-5) const;
|
||||||
|
|
||||||
/// Derivative of uncalibrate wrpt intrinsic coordinates [x_i; y_i]
|
|
||||||
Matrix2 D2d_intrinsic(const Point2& p) const;
|
|
||||||
|
|
||||||
/// Derivative of uncalibrate wrpt the calibration parameters
|
|
||||||
/// [fx, fy, s, u0, v0, k1, k2, k3, k4]
|
|
||||||
Matrix29 D2d_calibration(const Point2& p) const;
|
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Testable
|
/// @name Testable
|
||||||
/// @{
|
/// @{
|
||||||
|
|
|
@ -319,7 +319,7 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
};
|
};
|
||||||
|
|
||||||
template<class CAMERA>
|
template<class CAMERA>
|
||||||
|
|
|
@ -212,7 +212,7 @@ class EssentialMatrix {
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
};
|
};
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
|
|
|
@ -325,7 +325,7 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
};
|
};
|
||||||
|
|
||||||
// manifold traits
|
// manifold traits
|
||||||
|
|
|
@ -107,9 +107,9 @@ public:
|
||||||
|
|
||||||
// If needed, apply chain rule
|
// If needed, apply chain rule
|
||||||
if (Dpose)
|
if (Dpose)
|
||||||
*Dpose = Dpi_pn * *Dpose;
|
*Dpose = Dpi_pn * *Dpose;
|
||||||
if (Dpoint)
|
if (Dpoint)
|
||||||
*Dpoint = Dpi_pn * *Dpoint;
|
*Dpoint = Dpi_pn * *Dpoint;
|
||||||
|
|
||||||
return pi;
|
return pi;
|
||||||
}
|
}
|
||||||
|
@ -222,7 +222,7 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
};
|
};
|
||||||
// end of class PinholeBaseK
|
// end of class PinholeBaseK
|
||||||
|
|
||||||
|
@ -425,7 +425,7 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
};
|
};
|
||||||
// end of class PinholePose
|
// end of class PinholePose
|
||||||
|
|
||||||
|
|
|
@ -317,7 +317,7 @@ public:
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Align for Point2, which is either derived from, or is typedef, of Vector2
|
// Align for Point2, which is either derived from, or is typedef, of Vector2
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
}; // Pose2
|
}; // Pose2
|
||||||
|
|
||||||
/** specialization for pose2 wedge function (generic template in Lie.h) */
|
/** specialization for pose2 wedge function (generic template in Lie.h) */
|
||||||
|
|
|
@ -19,8 +19,10 @@
|
||||||
#include <gtsam/geometry/concepts.h>
|
#include <gtsam/geometry/concepts.h>
|
||||||
#include <gtsam/base/concepts.h>
|
#include <gtsam/base/concepts.h>
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <iostream>
|
||||||
|
#include <limits>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -36,10 +38,10 @@ Pose3::Pose3(const Pose2& pose2) :
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::Create(const Rot3& R, const Point3& t, OptionalJacobian<6, 3> H1,
|
Pose3 Pose3::Create(const Rot3& R, const Point3& t, OptionalJacobian<6, 3> HR,
|
||||||
OptionalJacobian<6, 3> H2) {
|
OptionalJacobian<6, 3> Ht) {
|
||||||
if (H1) *H1 << I_3x3, Z_3x3;
|
if (HR) *HR << I_3x3, Z_3x3;
|
||||||
if (H2) *H2 << Z_3x3, R.transpose();
|
if (Ht) *Ht << Z_3x3, R.transpose();
|
||||||
return Pose3(R, t);
|
return Pose3(R, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,15 +74,15 @@ Matrix6 Pose3::adjointMap(const Vector6& xi) {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
||||||
OptionalJacobian<6,6> H) {
|
OptionalJacobian<6, 6> Hxi) {
|
||||||
if (H) {
|
if (Hxi) {
|
||||||
H->setZero();
|
Hxi->setZero();
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
Vector6 dxi;
|
Vector6 dxi;
|
||||||
dxi.setZero();
|
dxi.setZero();
|
||||||
dxi(i) = 1.0;
|
dxi(i) = 1.0;
|
||||||
Matrix6 Gi = adjointMap(dxi);
|
Matrix6 Gi = adjointMap(dxi);
|
||||||
H->col(i) = Gi * y;
|
Hxi->col(i) = Gi * y;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return adjointMap(xi) * y;
|
return adjointMap(xi) * y;
|
||||||
|
@ -88,15 +90,15 @@ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||||
OptionalJacobian<6,6> H) {
|
OptionalJacobian<6, 6> Hxi) {
|
||||||
if (H) {
|
if (Hxi) {
|
||||||
H->setZero();
|
Hxi->setZero();
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
Vector6 dxi;
|
Vector6 dxi;
|
||||||
dxi.setZero();
|
dxi.setZero();
|
||||||
dxi(i) = 1.0;
|
dxi(i) = 1.0;
|
||||||
Matrix6 GTi = adjointMap(dxi).transpose();
|
Matrix6 GTi = adjointMap(dxi).transpose();
|
||||||
H->col(i) = GTi * y;
|
Hxi->col(i) = GTi * y;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return adjointMap(xi).transpose() * y;
|
return adjointMap(xi).transpose() * y;
|
||||||
|
@ -114,8 +116,8 @@ bool Pose3::equals(const Pose3& pose, double tol) const {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/** Modified from Murray94book version (which assumes w and v normalized?) */
|
/** Modified from Murray94book version (which assumes w and v normalized?) */
|
||||||
Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) {
|
Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi) {
|
||||||
if (H) *H = ExpmapDerivative(xi);
|
if (Hxi) *Hxi = ExpmapDerivative(xi);
|
||||||
|
|
||||||
// get angular velocity omega and translational velocity v from twist xi
|
// get angular velocity omega and translational velocity v from twist xi
|
||||||
Vector3 omega(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5));
|
Vector3 omega(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5));
|
||||||
|
@ -123,8 +125,8 @@ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) {
|
||||||
Rot3 R = Rot3::Expmap(omega);
|
Rot3 R = Rot3::Expmap(omega);
|
||||||
double theta2 = omega.dot(omega);
|
double theta2 = omega.dot(omega);
|
||||||
if (theta2 > std::numeric_limits<double>::epsilon()) {
|
if (theta2 > std::numeric_limits<double>::epsilon()) {
|
||||||
Vector3 t_parallel = omega * omega.dot(v); // translation parallel to axis
|
Vector3 t_parallel = omega * omega.dot(v); // translation parallel to axis
|
||||||
Vector3 omega_cross_v = omega.cross(v); // points towards axis
|
Vector3 omega_cross_v = omega.cross(v); // points towards axis
|
||||||
Vector3 t = (omega_cross_v - R * omega_cross_v + t_parallel) / theta2;
|
Vector3 t = (omega_cross_v - R * omega_cross_v + t_parallel) / theta2;
|
||||||
return Pose3(R, t);
|
return Pose3(R, t);
|
||||||
} else {
|
} else {
|
||||||
|
@ -133,10 +135,10 @@ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) {
|
Vector6 Pose3::Logmap(const Pose3& pose, OptionalJacobian<6, 6> Hpose) {
|
||||||
if (H) *H = LogmapDerivative(p);
|
if (Hpose) *Hpose = LogmapDerivative(pose);
|
||||||
const Vector3 w = Rot3::Logmap(p.rotation());
|
const Vector3 w = Rot3::Logmap(pose.rotation());
|
||||||
const Vector3 T = p.translation();
|
const Vector3 T = pose.translation();
|
||||||
const double t = w.norm();
|
const double t = w.norm();
|
||||||
if (t < 1e-10) {
|
if (t < 1e-10) {
|
||||||
Vector6 log;
|
Vector6 log;
|
||||||
|
@ -156,33 +158,33 @@ Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::ChartAtOrigin::Retract(const Vector6& xi, ChartJacobian H) {
|
Pose3 Pose3::ChartAtOrigin::Retract(const Vector6& xi, ChartJacobian Hxi) {
|
||||||
#ifdef GTSAM_POSE3_EXPMAP
|
#ifdef GTSAM_POSE3_EXPMAP
|
||||||
return Expmap(xi, H);
|
return Expmap(xi, Hxi);
|
||||||
#else
|
#else
|
||||||
Matrix3 DR;
|
Matrix3 DR;
|
||||||
Rot3 R = Rot3::Retract(xi.head<3>(), H ? &DR : 0);
|
Rot3 R = Rot3::Retract(xi.head<3>(), Hxi ? &DR : 0);
|
||||||
if (H) {
|
if (Hxi) {
|
||||||
*H = I_6x6;
|
*Hxi = I_6x6;
|
||||||
H->topLeftCorner<3,3>() = DR;
|
Hxi->topLeftCorner<3, 3>() = DR;
|
||||||
}
|
}
|
||||||
return Pose3(R, Point3(xi.tail<3>()));
|
return Pose3(R, Point3(xi.tail<3>()));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector6 Pose3::ChartAtOrigin::Local(const Pose3& T, ChartJacobian H) {
|
Vector6 Pose3::ChartAtOrigin::Local(const Pose3& pose, ChartJacobian Hpose) {
|
||||||
#ifdef GTSAM_POSE3_EXPMAP
|
#ifdef GTSAM_POSE3_EXPMAP
|
||||||
return Logmap(T, H);
|
return Logmap(pose, Hpose);
|
||||||
#else
|
#else
|
||||||
Matrix3 DR;
|
Matrix3 DR;
|
||||||
Vector3 omega = Rot3::LocalCoordinates(T.rotation(), H ? &DR : 0);
|
Vector3 omega = Rot3::LocalCoordinates(pose.rotation(), Hpose ? &DR : 0);
|
||||||
if (H) {
|
if (Hpose) {
|
||||||
*H = I_6x6;
|
*Hpose = I_6x6;
|
||||||
H->topLeftCorner<3,3>() = DR;
|
Hpose->topLeftCorner<3, 3>() = DR;
|
||||||
}
|
}
|
||||||
Vector6 xi;
|
Vector6 xi;
|
||||||
xi << omega, T.translation();
|
xi << omega, pose.translation();
|
||||||
return xi;
|
return xi;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -259,16 +261,16 @@ Matrix6 Pose3::LogmapDerivative(const Pose3& pose) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
const Point3& Pose3::translation(OptionalJacobian<3, 6> H) const {
|
const Point3& Pose3::translation(OptionalJacobian<3, 6> Hself) const {
|
||||||
if (H) *H << Z_3x3, rotation().matrix();
|
if (Hself) *Hself << Z_3x3, rotation().matrix();
|
||||||
return t_;
|
return t_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
const Rot3& Pose3::rotation(OptionalJacobian<3, 6> H) const {
|
const Rot3& Pose3::rotation(OptionalJacobian<3, 6> Hself) const {
|
||||||
if (H) {
|
if (Hself) {
|
||||||
*H << I_3x3, Z_3x3;
|
*Hself << I_3x3, Z_3x3;
|
||||||
}
|
}
|
||||||
return R_;
|
return R_;
|
||||||
}
|
}
|
||||||
|
@ -282,9 +284,10 @@ Matrix4 Pose3::matrix() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::transformPoseFrom(const Pose3& aTb) const {
|
Pose3 Pose3::transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself,
|
||||||
|
OptionalJacobian<6, 6> HaTb) const {
|
||||||
const Pose3& wTa = *this;
|
const Pose3& wTa = *this;
|
||||||
return wTa * aTb;
|
return wTa.compose(aTb, Hself, HaTb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -297,101 +300,101 @@ Pose3 Pose3::transform_to(const Pose3& pose) const {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> H1,
|
Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself,
|
||||||
OptionalJacobian<6, 6> H2) const {
|
OptionalJacobian<6, 6> HwTb) const {
|
||||||
if (H1) *H1 = -wTb.inverse().AdjointMap() * AdjointMap();
|
if (Hself) *Hself = -wTb.inverse().AdjointMap() * AdjointMap();
|
||||||
if (H2) *H2 = I_6x6;
|
if (HwTb) *HwTb = I_6x6;
|
||||||
const Pose3& wTa = *this;
|
const Pose3& wTa = *this;
|
||||||
return wTa.inverse() * wTb;
|
return wTa.inverse() * wTb;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Point3 Pose3::transformFrom(const Point3& p, OptionalJacobian<3,6> Dpose,
|
Point3 Pose3::transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself,
|
||||||
OptionalJacobian<3,3> Dpoint) const {
|
OptionalJacobian<3, 3> Hpoint) const {
|
||||||
// Only get matrix once, to avoid multiple allocations,
|
// Only get matrix once, to avoid multiple allocations,
|
||||||
// as well as multiple conversions in the Quaternion case
|
// as well as multiple conversions in the Quaternion case
|
||||||
const Matrix3 R = R_.matrix();
|
const Matrix3 R = R_.matrix();
|
||||||
if (Dpose) {
|
if (Hself) {
|
||||||
Dpose->leftCols<3>() = R * skewSymmetric(-p.x(), -p.y(), -p.z());
|
Hself->leftCols<3>() = R * skewSymmetric(-point.x(), -point.y(), -point.z());
|
||||||
Dpose->rightCols<3>() = R;
|
Hself->rightCols<3>() = R;
|
||||||
}
|
}
|
||||||
if (Dpoint) {
|
if (Hpoint) {
|
||||||
*Dpoint = R;
|
*Hpoint = R;
|
||||||
}
|
}
|
||||||
return R_ * p + t_;
|
return R_ * point + t_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Point3 Pose3::transformTo(const Point3& p, OptionalJacobian<3,6> Dpose,
|
Point3 Pose3::transformTo(const Point3& point, OptionalJacobian<3, 6> Hself,
|
||||||
OptionalJacobian<3,3> Dpoint) const {
|
OptionalJacobian<3, 3> Hpoint) const {
|
||||||
// Only get transpose once, to avoid multiple allocations,
|
// Only get transpose once, to avoid multiple allocations,
|
||||||
// as well as multiple conversions in the Quaternion case
|
// as well as multiple conversions in the Quaternion case
|
||||||
const Matrix3 Rt = R_.transpose();
|
const Matrix3 Rt = R_.transpose();
|
||||||
const Point3 q(Rt*(p - t_));
|
const Point3 q(Rt*(point - t_));
|
||||||
if (Dpose) {
|
if (Hself) {
|
||||||
const double wx = q.x(), wy = q.y(), wz = q.z();
|
const double wx = q.x(), wy = q.y(), wz = q.z();
|
||||||
(*Dpose) <<
|
(*Hself) <<
|
||||||
0.0, -wz, +wy,-1.0, 0.0, 0.0,
|
0.0, -wz, +wy,-1.0, 0.0, 0.0,
|
||||||
+wz, 0.0, -wx, 0.0,-1.0, 0.0,
|
+wz, 0.0, -wx, 0.0,-1.0, 0.0,
|
||||||
-wy, +wx, 0.0, 0.0, 0.0,-1.0;
|
-wy, +wx, 0.0, 0.0, 0.0,-1.0;
|
||||||
}
|
}
|
||||||
if (Dpoint) {
|
if (Hpoint) {
|
||||||
*Dpoint = Rt;
|
*Hpoint = Rt;
|
||||||
}
|
}
|
||||||
return q;
|
return q;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> H1,
|
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> Hself,
|
||||||
OptionalJacobian<1, 3> H2) const {
|
OptionalJacobian<1, 3> Hpoint) const {
|
||||||
Matrix36 D_local_pose;
|
Matrix36 D_local_pose;
|
||||||
Matrix3 D_local_point;
|
Matrix3 D_local_point;
|
||||||
Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0);
|
Point3 local = transformTo(point, Hself ? &D_local_pose : 0, Hpoint ? &D_local_point : 0);
|
||||||
if (!H1 && !H2) {
|
if (!Hself && !Hpoint) {
|
||||||
return local.norm();
|
return local.norm();
|
||||||
} else {
|
} else {
|
||||||
Matrix13 D_r_local;
|
Matrix13 D_r_local;
|
||||||
const double r = norm3(local, D_r_local);
|
const double r = norm3(local, D_r_local);
|
||||||
if (H1) *H1 = D_r_local * D_local_pose;
|
if (Hself) *Hself = D_r_local * D_local_pose;
|
||||||
if (H2) *H2 = D_r_local * D_local_point;
|
if (Hpoint) *Hpoint = D_r_local * D_local_point;
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> H1,
|
double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> Hself,
|
||||||
OptionalJacobian<1, 6> H2) const {
|
OptionalJacobian<1, 6> Hpose) const {
|
||||||
Matrix13 D_local_point;
|
Matrix13 D_local_point;
|
||||||
double r = range(pose.translation(), H1, H2 ? &D_local_point : 0);
|
double r = range(pose.translation(), Hself, Hpose ? &D_local_point : 0);
|
||||||
if (H2) *H2 << Matrix13::Zero(), D_local_point * pose.rotation().matrix();
|
if (Hpose) *Hpose << Matrix13::Zero(), D_local_point * pose.rotation().matrix();
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Unit3 Pose3::bearing(const Point3& point, OptionalJacobian<2, 6> H1,
|
Unit3 Pose3::bearing(const Point3& point, OptionalJacobian<2, 6> Hself,
|
||||||
OptionalJacobian<2, 3> H2) const {
|
OptionalJacobian<2, 3> Hpoint) const {
|
||||||
Matrix36 D_local_pose;
|
Matrix36 D_local_pose;
|
||||||
Matrix3 D_local_point;
|
Matrix3 D_local_point;
|
||||||
Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0);
|
Point3 local = transformTo(point, Hself ? &D_local_pose : 0, Hpoint ? &D_local_point : 0);
|
||||||
if (!H1 && !H2) {
|
if (!Hself && !Hpoint) {
|
||||||
return Unit3(local);
|
return Unit3(local);
|
||||||
} else {
|
} else {
|
||||||
Matrix23 D_b_local;
|
Matrix23 D_b_local;
|
||||||
Unit3 b = Unit3::FromPoint3(local, D_b_local);
|
Unit3 b = Unit3::FromPoint3(local, D_b_local);
|
||||||
if (H1) *H1 = D_b_local * D_local_pose;
|
if (Hself) *Hself = D_b_local * D_local_pose;
|
||||||
if (H2) *H2 = D_b_local * D_local_point;
|
if (Hpoint) *Hpoint = D_b_local * D_local_point;
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> H1,
|
Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself,
|
||||||
OptionalJacobian<2, 6> H2) const {
|
OptionalJacobian<2, 6> Hpose) const {
|
||||||
if (H2) {
|
if (Hpose) {
|
||||||
H2->setZero();
|
Hpose->setZero();
|
||||||
return bearing(pose.translation(), H1, H2.cols<3>(3));
|
return bearing(pose.translation(), Hself, Hpose.cols<3>(3));
|
||||||
}
|
}
|
||||||
return bearing(pose.translation(), H1, boost::none);
|
return bearing(pose.translation(), Hself, boost::none);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -75,8 +75,8 @@ public:
|
||||||
|
|
||||||
/// Named constructor with derivatives
|
/// Named constructor with derivatives
|
||||||
static Pose3 Create(const Rot3& R, const Point3& t,
|
static Pose3 Create(const Rot3& R, const Point3& t,
|
||||||
OptionalJacobian<6, 3> H1 = boost::none,
|
OptionalJacobian<6, 3> HR = boost::none,
|
||||||
OptionalJacobian<6, 3> H2 = boost::none);
|
OptionalJacobian<6, 3> Ht = boost::none);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create Pose3 by aligning two point pairs
|
* Create Pose3 by aligning two point pairs
|
||||||
|
@ -117,10 +117,10 @@ public:
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// Exponential map at identity - create a rotation from canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$
|
/// Exponential map at identity - create a rotation from canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$
|
||||||
static Pose3 Expmap(const Vector6& xi, OptionalJacobian<6, 6> H = boost::none);
|
static Pose3 Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi = boost::none);
|
||||||
|
|
||||||
/// Log map at identity - return the canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$ of this rotation
|
/// Log map at identity - return the canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$ of this rotation
|
||||||
static Vector6 Logmap(const Pose3& p, OptionalJacobian<6, 6> H = boost::none);
|
static Vector6 Logmap(const Pose3& pose, OptionalJacobian<6, 6> Hpose = boost::none);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate Adjoint map, transforming a twist in the this pose's (i.e, body) frame to the world spatial frame
|
* Calculate Adjoint map, transforming a twist in the this pose's (i.e, body) frame to the world spatial frame
|
||||||
|
@ -157,7 +157,7 @@ public:
|
||||||
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
|
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
|
||||||
*/
|
*/
|
||||||
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y,
|
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y,
|
||||||
OptionalJacobian<6, 6> = boost::none);
|
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||||
|
|
||||||
// temporary fix for wrappers until case issue is resolved
|
// temporary fix for wrappers until case issue is resolved
|
||||||
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
|
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
|
||||||
|
@ -167,7 +167,7 @@ public:
|
||||||
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
||||||
*/
|
*/
|
||||||
static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y,
|
static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||||
OptionalJacobian<6, 6> H = boost::none);
|
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||||
|
|
||||||
/// Derivative of Expmap
|
/// Derivative of Expmap
|
||||||
static Matrix6 ExpmapDerivative(const Vector6& xi);
|
static Matrix6 ExpmapDerivative(const Vector6& xi);
|
||||||
|
@ -177,8 +177,8 @@ public:
|
||||||
|
|
||||||
// Chart at origin, depends on compile-time flag GTSAM_POSE3_EXPMAP
|
// Chart at origin, depends on compile-time flag GTSAM_POSE3_EXPMAP
|
||||||
struct ChartAtOrigin {
|
struct ChartAtOrigin {
|
||||||
static Pose3 Retract(const Vector6& v, ChartJacobian H = boost::none);
|
static Pose3 Retract(const Vector6& xi, ChartJacobian Hxi = boost::none);
|
||||||
static Vector6 Local(const Pose3& r, ChartJacobian H = boost::none);
|
static Vector6 Local(const Pose3& pose, ChartJacobian Hpose = boost::none);
|
||||||
};
|
};
|
||||||
|
|
||||||
using LieGroup<Pose3, 6>::inverse; // version with derivative
|
using LieGroup<Pose3, 6>::inverse; // version with derivative
|
||||||
|
@ -201,38 +201,38 @@ public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief takes point in Pose coordinates and transforms it to world coordinates
|
* @brief takes point in Pose coordinates and transforms it to world coordinates
|
||||||
* @param p point in Pose coordinates
|
* @param point point in Pose coordinates
|
||||||
* @param Dpose optional 3*6 Jacobian wrpt this pose
|
* @param Hself optional 3*6 Jacobian wrpt this pose
|
||||||
* @param Dpoint optional 3*3 Jacobian wrpt point
|
* @param Hpoint optional 3*3 Jacobian wrpt point
|
||||||
* @return point in world coordinates
|
* @return point in world coordinates
|
||||||
*/
|
*/
|
||||||
Point3 transformFrom(const Point3& p, OptionalJacobian<3, 6> Dpose =
|
Point3 transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself =
|
||||||
boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const;
|
boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/** syntactic sugar for transformFrom */
|
/** syntactic sugar for transformFrom */
|
||||||
inline Point3 operator*(const Point3& p) const {
|
inline Point3 operator*(const Point3& point) const {
|
||||||
return transformFrom(p);
|
return transformFrom(point);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief takes point in world coordinates and transforms it to Pose coordinates
|
* @brief takes point in world coordinates and transforms it to Pose coordinates
|
||||||
* @param p point in world coordinates
|
* @param point point in world coordinates
|
||||||
* @param Dpose optional 3*6 Jacobian wrpt this pose
|
* @param Hself optional 3*6 Jacobian wrpt this pose
|
||||||
* @param Dpoint optional 3*3 Jacobian wrpt point
|
* @param Hpoint optional 3*3 Jacobian wrpt point
|
||||||
* @return point in Pose coordinates
|
* @return point in Pose coordinates
|
||||||
*/
|
*/
|
||||||
Point3 transformTo(const Point3& p, OptionalJacobian<3, 6> Dpose =
|
Point3 transformTo(const Point3& point, OptionalJacobian<3, 6> Hself =
|
||||||
boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const;
|
boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
/// get rotation
|
/// get rotation
|
||||||
const Rot3& rotation(OptionalJacobian<3, 6> H = boost::none) const;
|
const Rot3& rotation(OptionalJacobian<3, 6> Hself = boost::none) const;
|
||||||
|
|
||||||
/// get translation
|
/// get translation
|
||||||
const Point3& translation(OptionalJacobian<3, 6> H = boost::none) const;
|
const Point3& translation(OptionalJacobian<3, 6> Hself = boost::none) const;
|
||||||
|
|
||||||
/// get x
|
/// get x
|
||||||
double x() const {
|
double x() const {
|
||||||
|
@ -252,36 +252,44 @@ public:
|
||||||
/** convert to 4*4 matrix */
|
/** convert to 4*4 matrix */
|
||||||
Matrix4 matrix() const;
|
Matrix4 matrix() const;
|
||||||
|
|
||||||
/** receives a pose in local coordinates and transforms it to world coordinates */
|
/**
|
||||||
Pose3 transformPoseFrom(const Pose3& pose) const;
|
* Assuming self == wTa, takes a pose aTb in local coordinates
|
||||||
|
* and transforms it to world coordinates wTb = wTa * aTb.
|
||||||
|
* This is identical to compose.
|
||||||
|
*/
|
||||||
|
Pose3 transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself = boost::none,
|
||||||
|
OptionalJacobian<6, 6> HaTb = boost::none) const;
|
||||||
|
|
||||||
/** receives a pose in world coordinates and transforms it to local coordinates */
|
/**
|
||||||
Pose3 transformPoseTo(const Pose3& pose, OptionalJacobian<6, 6> H1 = boost::none,
|
* Assuming self == wTa, takes a pose wTb in world coordinates
|
||||||
OptionalJacobian<6, 6> H2 = boost::none) const;
|
* and transforms it to local coordinates aTb = inv(wTa) * wTb
|
||||||
|
*/
|
||||||
|
Pose3 transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself = boost::none,
|
||||||
|
OptionalJacobian<6, 6> HwTb = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate range to a landmark
|
* Calculate range to a landmark
|
||||||
* @param point 3D location of landmark
|
* @param point 3D location of landmark
|
||||||
* @return range (double)
|
* @return range (double)
|
||||||
*/
|
*/
|
||||||
double range(const Point3& point, OptionalJacobian<1, 6> H1 = boost::none,
|
double range(const Point3& point, OptionalJacobian<1, 6> Hself = boost::none,
|
||||||
OptionalJacobian<1, 3> H2 = boost::none) const;
|
OptionalJacobian<1, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate range to another pose
|
* Calculate range to another pose
|
||||||
* @param pose Other SO(3) pose
|
* @param pose Other SO(3) pose
|
||||||
* @return range (double)
|
* @return range (double)
|
||||||
*/
|
*/
|
||||||
double range(const Pose3& pose, OptionalJacobian<1, 6> H1 = boost::none,
|
double range(const Pose3& pose, OptionalJacobian<1, 6> Hself = boost::none,
|
||||||
OptionalJacobian<1, 6> H2 = boost::none) const;
|
OptionalJacobian<1, 6> Hpose = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate bearing to a landmark
|
* Calculate bearing to a landmark
|
||||||
* @param point 3D location of landmark
|
* @param point 3D location of landmark
|
||||||
* @return bearing (Unit3)
|
* @return bearing (Unit3)
|
||||||
*/
|
*/
|
||||||
Unit3 bearing(const Point3& point, OptionalJacobian<2, 6> H1 = boost::none,
|
Unit3 bearing(const Point3& point, OptionalJacobian<2, 6> Hself = boost::none,
|
||||||
OptionalJacobian<2, 3> H2 = boost::none) const;
|
OptionalJacobian<2, 3> Hpoint = boost::none) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate bearing to another pose
|
* Calculate bearing to another pose
|
||||||
|
@ -289,8 +297,8 @@ public:
|
||||||
* information is ignored.
|
* information is ignored.
|
||||||
* @return bearing (Unit3)
|
* @return bearing (Unit3)
|
||||||
*/
|
*/
|
||||||
Unit3 bearing(const Pose3& pose, OptionalJacobian<2, 6> H1 = boost::none,
|
Unit3 bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself = boost::none,
|
||||||
OptionalJacobian<2, 6> H2 = boost::none) const;
|
OptionalJacobian<2, 6> Hpose = boost::none) const;
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
/// @name Advanced Interface
|
/// @name Advanced Interface
|
||||||
|
@ -321,20 +329,20 @@ public:
|
||||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
/// @name Deprecated
|
/// @name Deprecated
|
||||||
/// @{
|
/// @{
|
||||||
Point3 transform_from(const Point3& p,
|
Point3 transform_from(const Point3& point,
|
||||||
OptionalJacobian<3, 6> Dpose = boost::none,
|
OptionalJacobian<3, 6> Hself = boost::none,
|
||||||
OptionalJacobian<3, 3> Dpoint = boost::none) const {
|
OptionalJacobian<3, 3> Hpoint = boost::none) const {
|
||||||
return transformFrom(p, Dpose, Dpoint);
|
return transformFrom(point, Hself, Hpoint);
|
||||||
}
|
}
|
||||||
Point3 transform_to(const Point3& p,
|
Point3 transform_to(const Point3& point,
|
||||||
OptionalJacobian<3, 6> Dpose = boost::none,
|
OptionalJacobian<3, 6> Hself = boost::none,
|
||||||
OptionalJacobian<3, 3> Dpoint = boost::none) const {
|
OptionalJacobian<3, 3> Hpoint = boost::none) const {
|
||||||
return transformTo(p, Dpose, Dpoint);
|
return transformTo(point, Hself, Hpoint);
|
||||||
}
|
}
|
||||||
Pose3 transform_pose_to(const Pose3& pose,
|
Pose3 transform_pose_to(const Pose3& pose,
|
||||||
OptionalJacobian<6, 6> H1 = boost::none,
|
OptionalJacobian<6, 6> Hself = boost::none,
|
||||||
OptionalJacobian<6, 6> H2 = boost::none) const {
|
OptionalJacobian<6, 6> Hpose = boost::none) const {
|
||||||
return transformPoseTo(pose, H1, H2);
|
return transformPoseTo(pose, Hself, Hpose);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* @deprecated: this function is neither here not there. */
|
* @deprecated: this function is neither here not there. */
|
||||||
|
@ -355,7 +363,7 @@ public:
|
||||||
#ifdef GTSAM_USE_QUATERNIONS
|
#ifdef GTSAM_USE_QUATERNIONS
|
||||||
// Align if we are using Quaternions
|
// Align if we are using Quaternions
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
// Pose3 class
|
// Pose3 class
|
||||||
|
|
|
@ -544,7 +544,7 @@ namespace gtsam {
|
||||||
#ifdef GTSAM_USE_QUATERNIONS
|
#ifdef GTSAM_USE_QUATERNIONS
|
||||||
// only align if quaternion, Matrix3 has no alignment requirements
|
// only align if quaternion, Matrix3 has no alignment requirements
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -20,8 +20,8 @@
|
||||||
|
|
||||||
#include <gtsam/base/Lie.h>
|
#include <gtsam/base/Lie.h>
|
||||||
#include <gtsam/base/Manifold.h>
|
#include <gtsam/base/Manifold.h>
|
||||||
|
#include <gtsam/base/make_shared.h>
|
||||||
#include <gtsam/dllexport.h>
|
#include <gtsam/dllexport.h>
|
||||||
|
|
||||||
#include <Eigen/Core>
|
#include <Eigen/Core>
|
||||||
|
|
||||||
#include <iostream> // TODO(frank): how to avoid?
|
#include <iostream> // TODO(frank): how to avoid?
|
||||||
|
@ -54,7 +54,7 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
|
||||||
using VectorN2 = Eigen::Matrix<double, internal::NSquaredSO(N), 1>;
|
using VectorN2 = Eigen::Matrix<double, internal::NSquaredSO(N), 1>;
|
||||||
using MatrixDD = Eigen::Matrix<double, dimension, dimension>;
|
using MatrixDD = Eigen::Matrix<double, dimension, dimension>;
|
||||||
|
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
MatrixNN matrix_; ///< Rotation matrix
|
MatrixNN matrix_; ///< Rotation matrix
|
||||||
|
@ -292,6 +292,10 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
|
||||||
boost::none) const;
|
boost::none) const;
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
|
template <class Archive>
|
||||||
|
friend void save(Archive&, SO&, const unsigned int);
|
||||||
|
template <class Archive>
|
||||||
|
friend void load(Archive&, SO&, const unsigned int);
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
friend void serialize(Archive&, SO&, const unsigned int);
|
friend void serialize(Archive&, SO&, const unsigned int);
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
|
@ -329,6 +333,16 @@ template <>
|
||||||
SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
|
SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
|
||||||
DynamicJacobian H2) const;
|
DynamicJacobian H2) const;
|
||||||
|
|
||||||
|
/** Serialization function */
|
||||||
|
template<class Archive>
|
||||||
|
void serialize(
|
||||||
|
Archive& ar, SOn& Q,
|
||||||
|
const unsigned int file_version
|
||||||
|
) {
|
||||||
|
Matrix& M = Q.matrix_;
|
||||||
|
ar& BOOST_SERIALIZATION_NVP(M);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Define the traits. internal::LieGroup provides both Lie group and Testable
|
* Define the traits. internal::LieGroup provides both Lie group and Testable
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -90,6 +90,8 @@ public:
|
||||||
/// Copy assignment
|
/// Copy assignment
|
||||||
Unit3& operator=(const Unit3 & u) {
|
Unit3& operator=(const Unit3 & u) {
|
||||||
p_ = u.p_;
|
p_ = u.p_;
|
||||||
|
B_ = u.B_;
|
||||||
|
H_B_ = u.H_B_;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,7 +216,7 @@ private:
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
};
|
};
|
||||||
|
|
||||||
// Define GTSAM traits
|
// Define GTSAM traits
|
||||||
|
|
|
@ -10,17 +10,18 @@
|
||||||
* -------------------------------------------------------------------------- */
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file testCal3Fisheye.cpp
|
* @file testCal3DFisheye.cpp
|
||||||
* @brief Unit tests for fisheye calibration class
|
* @brief Unit tests for fisheye calibration class
|
||||||
* @author ghaggin
|
* @author ghaggin
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/base/Testable.h>
|
||||||
#include <gtsam/base/numericalDerivative.h>
|
#include <gtsam/base/numericalDerivative.h>
|
||||||
#include <gtsam/geometry/Cal3Fisheye.h>
|
#include <gtsam/geometry/Cal3Fisheye.h>
|
||||||
#include <gtsam/geometry/Point3.h>
|
#include <gtsam/geometry/Point3.h>
|
||||||
|
|
||||||
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
GTSAM_CONCEPT_TESTABLE_INST(Cal3Fisheye)
|
GTSAM_CONCEPT_TESTABLE_INST(Cal3Fisheye)
|
||||||
|
@ -30,12 +31,27 @@ static const double fx = 250, fy = 260, s = 0.1, u0 = 320, v0 = 240;
|
||||||
static Cal3Fisheye K(fx, fy, s, u0, v0, -0.013721808247486035,
|
static Cal3Fisheye K(fx, fy, s, u0, v0, -0.013721808247486035,
|
||||||
0.020727425669427896, -0.012786476702685545,
|
0.020727425669427896, -0.012786476702685545,
|
||||||
0.0025242267320687625);
|
0.0025242267320687625);
|
||||||
static Point2 p(2, 3);
|
static Point2 kTestPoint2(2, 3);
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(Cal3Fisheye, assert_equal) { CHECK(assert_equal(K, K, 1e-5)); }
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(Cal3Fisheye, retract) {
|
||||||
|
Cal3Fisheye expected(K.fx() + 1, K.fy() + 2, K.skew() + 3, K.px() + 4,
|
||||||
|
K.py() + 5, K.k1() + 6, K.k2() + 7, K.k3() + 8,
|
||||||
|
K.k4() + 9);
|
||||||
|
Vector d(9);
|
||||||
|
d << 1, 2, 3, 4, 5, 6, 7, 8, 9;
|
||||||
|
Cal3Fisheye actual = K.retract(d);
|
||||||
|
CHECK(assert_equal(expected, actual, 1e-7));
|
||||||
|
CHECK(assert_equal(d, K.localCoordinates(actual), 1e-7));
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(Cal3Fisheye, uncalibrate1) {
|
TEST(Cal3Fisheye, uncalibrate1) {
|
||||||
// Calculate the solution
|
// Calculate the solution
|
||||||
const double xi = p.x(), yi = p.y();
|
const double xi = kTestPoint2.x(), yi = kTestPoint2.y();
|
||||||
const double r = sqrt(xi * xi + yi * yi);
|
const double r = sqrt(xi * xi + yi * yi);
|
||||||
const double t = atan(r);
|
const double t = atan(r);
|
||||||
const double tt = t * t, t4 = tt * tt, t6 = tt * t4, t8 = t4 * t4;
|
const double tt = t * t, t4 = tt * tt, t6 = tt * t4, t8 = t4 * t4;
|
||||||
|
@ -46,32 +62,42 @@ TEST(Cal3Fisheye, uncalibrate1) {
|
||||||
|
|
||||||
Point2 uv_sol(v[0] / v[2], v[1] / v[2]);
|
Point2 uv_sol(v[0] / v[2], v[1] / v[2]);
|
||||||
|
|
||||||
Point2 uv = K.uncalibrate(p);
|
Point2 uv = K.uncalibrate(kTestPoint2);
|
||||||
CHECK(assert_equal(uv, uv_sol));
|
CHECK(assert_equal(uv, uv_sol));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/**
|
// For numerical derivatives
|
||||||
* Check that a point at (0,0) projects to the
|
Point2 f(const Cal3Fisheye& k, const Point2& pt) { return k.uncalibrate(pt); }
|
||||||
* image center.
|
|
||||||
*/
|
/* ************************************************************************* */
|
||||||
TEST(Cal3Fisheye, uncalibrate2) {
|
TEST(Cal3Fisheye, Derivatives) {
|
||||||
Point2 pz(0, 0);
|
Matrix H1, H2;
|
||||||
auto uv = K.uncalibrate(pz);
|
K.uncalibrate(kTestPoint2, H1, H2);
|
||||||
CHECK(assert_equal(uv, Point2(u0, v0)));
|
CHECK(assert_equal(numericalDerivative21(f, K, kTestPoint2, 1e-7), H1, 1e-5));
|
||||||
|
CHECK(assert_equal(numericalDerivative22(f, K, kTestPoint2, 1e-7), H2, 1e-5));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/**
|
// Check that a point at (0,0) projects to the image center.
|
||||||
* This test uses cv2::fisheye::projectPoints to test that uncalibrate
|
TEST(Cal3Fisheye, uncalibrate2) {
|
||||||
* properly projects a point into the image plane. One notable difference
|
Point2 pz(0, 0);
|
||||||
* between opencv and the Cal3Fisheye::uncalibrate function is the skew
|
Matrix H1, H2;
|
||||||
* parameter. The equivalence is alpha = s/fx.
|
auto uv = K.uncalibrate(pz, H1, H2);
|
||||||
*
|
CHECK(assert_equal(uv, Point2(u0, v0)));
|
||||||
*
|
CHECK(assert_equal(numericalDerivative21(f, K, pz, 1e-7), H1, 1e-5));
|
||||||
* Python script to project points with fisheye model in OpenCv
|
// TODO(frank): the second jacobian is all NaN for the image center!
|
||||||
* (script run with OpenCv version 4.2.0 and Numpy version 1.18.2)
|
// CHECK(assert_equal(numericalDerivative22(f, K, pz, 1e-7), H2, 1e-5));
|
||||||
*/
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// This test uses cv2::fisheye::projectPoints to test that uncalibrate
|
||||||
|
// properly projects a point into the image plane. One notable difference
|
||||||
|
// between opencv and the Cal3Fisheye::uncalibrate function is the skew
|
||||||
|
// parameter. The equivalence is alpha = s/fx.
|
||||||
|
//
|
||||||
|
// Python script to project points with fisheye model in OpenCv
|
||||||
|
// (script run with OpenCv version 4.2.0 and Numpy version 1.18.2)
|
||||||
// clang-format off
|
// clang-format off
|
||||||
/*
|
/*
|
||||||
===========================================================
|
===========================================================
|
||||||
|
@ -94,6 +120,7 @@ tvec = np.float64([[0.,0.,0.]]);
|
||||||
imagePoints, jacobian = cv2.fisheye.projectPoints(objpts, rvec, tvec, cameraMatrix, distCoeffs, alpha=alpha)
|
imagePoints, jacobian = cv2.fisheye.projectPoints(objpts, rvec, tvec, cameraMatrix, distCoeffs, alpha=alpha)
|
||||||
np.set_printoptions(precision=14)
|
np.set_printoptions(precision=14)
|
||||||
print(imagePoints)
|
print(imagePoints)
|
||||||
|
|
||||||
===========================================================
|
===========================================================
|
||||||
* Script output: [[[457.82638130304935 408.18905848512986]]]
|
* Script output: [[[457.82638130304935 408.18905848512986]]]
|
||||||
*/
|
*/
|
||||||
|
@ -134,21 +161,18 @@ TEST(Cal3Fisheye, calibrate1) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/**
|
// Check that calibrate returns (0,0) for the image center
|
||||||
* Check that calibrate returns (0,0) for the image center
|
|
||||||
*/
|
|
||||||
TEST(Cal3Fisheye, calibrate2) {
|
TEST(Cal3Fisheye, calibrate2) {
|
||||||
Point2 uv(u0, v0);
|
Point2 uv(u0, v0);
|
||||||
auto xi_hat = K.calibrate(uv);
|
auto xi_hat = K.calibrate(uv);
|
||||||
CHECK(assert_equal(xi_hat, Point2(0, 0)))
|
CHECK(assert_equal(xi_hat, Point2(0, 0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* ************************************************************************* */
|
||||||
* Run calibrate on OpenCv test from uncalibrate3
|
// Run calibrate on OpenCv test from uncalibrate3
|
||||||
* (script shown above)
|
// (script shown above)
|
||||||
* 3d point: (23, 27, 31)
|
// 3d point: (23, 27, 31)
|
||||||
* 2d point in image plane: (457.82638130304935, 408.18905848512986)
|
// 2d point in image plane: (457.82638130304935, 408.18905848512986)
|
||||||
*/
|
|
||||||
TEST(Cal3Fisheye, calibrate3) {
|
TEST(Cal3Fisheye, calibrate3) {
|
||||||
Point3 p3(23, 27, 31);
|
Point3 p3(23, 27, 31);
|
||||||
Point2 xi(p3.x() / p3.z(), p3.y() / p3.z());
|
Point2 xi(p3.x() / p3.z(), p3.y() / p3.z());
|
||||||
|
@ -157,47 +181,6 @@ TEST(Cal3Fisheye, calibrate3) {
|
||||||
CHECK(assert_equal(xi_hat, xi));
|
CHECK(assert_equal(xi_hat, xi));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
// For numerical derivatives
|
|
||||||
Point2 uncalibrate_(const Cal3Fisheye& k, const Point2& pt) {
|
|
||||||
return k.uncalibrate(pt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
TEST(Cal3Fisheye, Duncalibrate1) {
|
|
||||||
Matrix computed;
|
|
||||||
K.uncalibrate(p, computed, boost::none);
|
|
||||||
Matrix numerical = numericalDerivative21(uncalibrate_, K, p, 1e-7);
|
|
||||||
CHECK(assert_equal(numerical, computed, 1e-5));
|
|
||||||
Matrix separate = K.D2d_calibration(p);
|
|
||||||
CHECK(assert_equal(numerical, separate, 1e-5));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
TEST(Cal3Fisheye, Duncalibrate2) {
|
|
||||||
Matrix computed;
|
|
||||||
K.uncalibrate(p, boost::none, computed);
|
|
||||||
Matrix numerical = numericalDerivative22(uncalibrate_, K, p, 1e-7);
|
|
||||||
CHECK(assert_equal(numerical, computed, 1e-5));
|
|
||||||
Matrix separate = K.D2d_intrinsic(p);
|
|
||||||
CHECK(assert_equal(numerical, separate, 1e-5));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
TEST(Cal3Fisheye, assert_equal) { CHECK(assert_equal(K, K, 1e-5)); }
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
TEST(Cal3Fisheye, retract) {
|
|
||||||
Cal3Fisheye expected(K.fx() + 1, K.fy() + 2, K.skew() + 3, K.px() + 4,
|
|
||||||
K.py() + 5, K.k1() + 6, K.k2() + 7, K.k3() + 8,
|
|
||||||
K.k4() + 9);
|
|
||||||
Vector d(9);
|
|
||||||
d << 1, 2, 3, 4, 5, 6, 7, 8, 9;
|
|
||||||
Cal3Fisheye actual = K.retract(d);
|
|
||||||
CHECK(assert_equal(expected, actual, 1e-7));
|
|
||||||
CHECK(assert_equal(d, K.localCoordinates(actual), 1e-7));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() {
|
int main() {
|
||||||
TestResult tr;
|
TestResult tr;
|
||||||
|
|
|
@ -418,6 +418,29 @@ TEST(Pose3, transform_to_rotate) {
|
||||||
EXPECT(assert_equal(expected, actual, 0.001));
|
EXPECT(assert_equal(expected, actual, 0.001));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Check transformPoseFrom and its pushforward
|
||||||
|
Pose3 transformPoseFrom_(const Pose3& wTa, const Pose3& aTb) {
|
||||||
|
return wTa.transformPoseFrom(aTb);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(Pose3, transformPoseFrom)
|
||||||
|
{
|
||||||
|
Matrix actual = (T2*T2).matrix();
|
||||||
|
Matrix expected = T2.matrix()*T2.matrix();
|
||||||
|
EXPECT(assert_equal(actual, expected, 1e-8));
|
||||||
|
|
||||||
|
Matrix H1, H2;
|
||||||
|
T2.transformPoseFrom(T2, H1, H2);
|
||||||
|
|
||||||
|
Matrix numericalH1 = numericalDerivative21(transformPoseFrom_, T2, T2);
|
||||||
|
EXPECT(assert_equal(numericalH1, H1, 5e-3));
|
||||||
|
EXPECT(assert_equal(T2.inverse().AdjointMap(), H1, 5e-3));
|
||||||
|
|
||||||
|
Matrix numericalH2 = numericalDerivative22(transformPoseFrom_, T2, T2);
|
||||||
|
EXPECT(assert_equal(numericalH2, H2, 1e-4));
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(Pose3, transformTo) {
|
TEST(Pose3, transformTo) {
|
||||||
Pose3 transform(Rot3::Rodrigues(0, 0, -1.570796), Point3(2, 4, 0));
|
Pose3 transform(Rot3::Rodrigues(0, 0, -1.570796), Point3(2, 4, 0));
|
||||||
|
|
|
@ -484,6 +484,15 @@ TEST(Unit3, ErrorBetweenFactor) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(Unit3, CopyAssign) {
|
||||||
|
Unit3 p{1, 0.2, 0.3};
|
||||||
|
|
||||||
|
EXPECT(p.error(p).isZero());
|
||||||
|
|
||||||
|
p = Unit3{-1, 2, 8};
|
||||||
|
EXPECT(p.error(p).isZero());
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(actualH, Serialization) {
|
TEST(actualH, Serialization) {
|
||||||
Unit3 p(0, 1, 0);
|
Unit3 p(0, 1, 0);
|
||||||
|
|
|
@ -215,7 +215,7 @@ struct CameraProjectionMatrix {
|
||||||
private:
|
private:
|
||||||
const Matrix3 K_;
|
const Matrix3 K_;
|
||||||
public:
|
public:
|
||||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -69,4 +69,6 @@ namespace gtsam {
|
||||||
void saveGraph(const std::string &s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
void saveGraph(const std::string &s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#include <gtsam/inference/BayesNet-inst.h>
|
||||||
|
|
|
@ -136,57 +136,61 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
// separator marginal, uses separator marginal of parent recursively
|
// separator marginal, uses separator marginal of parent recursively
|
||||||
// P(C) = P(F|S) P(S)
|
// P(C) = P(F|S) P(S)
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
template<class DERIVED, class FACTORGRAPH>
|
template <class DERIVED, class FACTORGRAPH>
|
||||||
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
||||||
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::separatorMarginal(Eliminate function) const
|
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::separatorMarginal(
|
||||||
{
|
Eliminate function) const {
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal);
|
gttic(BayesTreeCliqueBase_separatorMarginal);
|
||||||
// Check if the Separator marginal was already calculated
|
// Check if the Separator marginal was already calculated
|
||||||
if (!cachedSeparatorMarginal_)
|
if (!cachedSeparatorMarginal_) {
|
||||||
{
|
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
||||||
|
|
||||||
// If this is the root, there is no separator
|
// If this is the root, there is no separator
|
||||||
if (parent_.expired() /*(if we're the root)*/)
|
if (parent_.expired() /*(if we're the root)*/) {
|
||||||
{
|
|
||||||
// we are root, return empty
|
// we are root, return empty
|
||||||
FactorGraphType empty;
|
FactorGraphType empty;
|
||||||
cachedSeparatorMarginal_ = empty;
|
cachedSeparatorMarginal_ = empty;
|
||||||
}
|
} else {
|
||||||
else
|
// Flatten recursion in timing outline
|
||||||
{
|
gttoc(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
||||||
|
gttoc(BayesTreeCliqueBase_separatorMarginal);
|
||||||
|
|
||||||
// Obtain P(S) = \int P(Cp) = \int P(Fp|Sp) P(Sp)
|
// Obtain P(S) = \int P(Cp) = \int P(Fp|Sp) P(Sp)
|
||||||
// initialize P(Cp) with the parent separator marginal
|
// initialize P(Cp) with the parent separator marginal
|
||||||
derived_ptr parent(parent_.lock());
|
derived_ptr parent(parent_.lock());
|
||||||
gttoc(BayesTreeCliqueBase_separatorMarginal_cachemiss); // Flatten recursion in timing outline
|
FactorGraphType p_Cp(parent->separatorMarginal(function)); // P(Sp)
|
||||||
gttoc(BayesTreeCliqueBase_separatorMarginal);
|
|
||||||
FactorGraphType p_Cp(parent->separatorMarginal(function)); // P(Sp)
|
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal);
|
gttic(BayesTreeCliqueBase_separatorMarginal);
|
||||||
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss);
|
||||||
|
|
||||||
// now add the parent conditional
|
// now add the parent conditional
|
||||||
p_Cp += parent->conditional_; // P(Fp|Sp)
|
p_Cp += parent->conditional_; // P(Fp|Sp)
|
||||||
|
|
||||||
// The variables we want to keepSet are exactly the ones in S
|
// The variables we want to keepSet are exactly the ones in S
|
||||||
KeyVector indicesS(this->conditional()->beginParents(), this->conditional()->endParents());
|
KeyVector indicesS(this->conditional()->beginParents(),
|
||||||
cachedSeparatorMarginal_ = *p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), function);
|
this->conditional()->endParents());
|
||||||
|
auto separatorMarginal =
|
||||||
|
p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), function);
|
||||||
|
cachedSeparatorMarginal_.reset(*separatorMarginal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the shortcut P(S||B)
|
// return the shortcut P(S||B)
|
||||||
return *cachedSeparatorMarginal_; // return the cached version
|
return *cachedSeparatorMarginal_; // return the cached version
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
// marginal2, uses separator marginal of parent recursively
|
// marginal2, uses separator marginal of parent
|
||||||
// P(C) = P(F|S) P(S)
|
// P(C) = P(F|S) P(S)
|
||||||
/* ************************************************************************* */
|
/* *********************************************************************** */
|
||||||
template<class DERIVED, class FACTORGRAPH>
|
template <class DERIVED, class FACTORGRAPH>
|
||||||
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
typename BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::FactorGraphType
|
||||||
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::marginal2(Eliminate function) const
|
BayesTreeCliqueBase<DERIVED, FACTORGRAPH>::marginal2(
|
||||||
{
|
Eliminate function) const {
|
||||||
gttic(BayesTreeCliqueBase_marginal2);
|
gttic(BayesTreeCliqueBase_marginal2);
|
||||||
// initialize with separator marginal P(S)
|
// initialize with separator marginal P(S)
|
||||||
FactorGraphType p_C = this->separatorMarginal(function);
|
FactorGraphType p_C = this->separatorMarginal(function);
|
||||||
|
|
|
@ -65,6 +65,8 @@ namespace gtsam {
|
||||||
Conditional(size_t nrFrontals) : nrFrontals_(nrFrontals) {}
|
Conditional(size_t nrFrontals) : nrFrontals_(nrFrontals) {}
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
|
public:
|
||||||
/// @name Testable
|
/// @name Testable
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
@ -76,7 +78,6 @@ namespace gtsam {
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
public:
|
|
||||||
/// @name Standard Interface
|
/// @name Standard Interface
|
||||||
/// @{
|
/// @{
|
||||||
|
|
||||||
|
|
|
@ -505,7 +505,7 @@ Vector JacobianFactor::error_vector(const VectorValues& c) const {
|
||||||
double JacobianFactor::error(const VectorValues& c) const {
|
double JacobianFactor::error(const VectorValues& c) const {
|
||||||
Vector e = unweighted_error(c);
|
Vector e = unweighted_error(c);
|
||||||
// Use the noise model distance function to get the correct error if available.
|
// Use the noise model distance function to get the correct error if available.
|
||||||
if (model_) return 0.5 * model_->distance(e);
|
if (model_) return 0.5 * model_->squaredMahalanobisDistance(e);
|
||||||
return 0.5 * e.dot(e);
|
return 0.5 * e.dot(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,12 +137,12 @@ Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Fair::weight(double error) const {
|
double Fair::weight(double distance) const {
|
||||||
return 1.0 / (1.0 + std::abs(error) / c_);
|
return 1.0 / (1.0 + std::abs(distance) / c_);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Fair::residual(double error) const {
|
double Fair::loss(double distance) const {
|
||||||
const double absError = std::abs(error);
|
const double absError = std::abs(distance);
|
||||||
const double normalizedError = absError / c_;
|
const double normalizedError = absError / c_;
|
||||||
const double c_2 = c_ * c_;
|
const double c_2 = c_ * c_;
|
||||||
return c_2 * (normalizedError - std::log1p(normalizedError));
|
return c_2 * (normalizedError - std::log1p(normalizedError));
|
||||||
|
@ -170,15 +170,15 @@ Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Huber::weight(double error) const {
|
double Huber::weight(double distance) const {
|
||||||
const double absError = std::abs(error);
|
const double absError = std::abs(distance);
|
||||||
return (absError <= k_) ? (1.0) : (k_ / absError);
|
return (absError <= k_) ? (1.0) : (k_ / absError);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Huber::residual(double error) const {
|
double Huber::loss(double distance) const {
|
||||||
const double absError = std::abs(error);
|
const double absError = std::abs(distance);
|
||||||
if (absError <= k_) { // |x| <= k
|
if (absError <= k_) { // |x| <= k
|
||||||
return error*error / 2;
|
return distance*distance / 2;
|
||||||
} else { // |x| > k
|
} else { // |x| > k
|
||||||
return k_ * (absError - (k_/2));
|
return k_ * (absError - (k_/2));
|
||||||
}
|
}
|
||||||
|
@ -208,12 +208,12 @@ Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Cauchy::weight(double error) const {
|
double Cauchy::weight(double distance) const {
|
||||||
return ksquared_ / (ksquared_ + error*error);
|
return ksquared_ / (ksquared_ + distance*distance);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Cauchy::residual(double error) const {
|
double Cauchy::loss(double distance) const {
|
||||||
const double val = std::log1p(error * error / ksquared_);
|
const double val = std::log1p(distance * distance / ksquared_);
|
||||||
return ksquared_ * val * 0.5;
|
return ksquared_ * val * 0.5;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,18 +241,18 @@ Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Tukey::weight(double error) const {
|
double Tukey::weight(double distance) const {
|
||||||
if (std::abs(error) <= c_) {
|
if (std::abs(distance) <= c_) {
|
||||||
const double one_minus_xc2 = 1.0 - error*error/csquared_;
|
const double one_minus_xc2 = 1.0 - distance*distance/csquared_;
|
||||||
return one_minus_xc2 * one_minus_xc2;
|
return one_minus_xc2 * one_minus_xc2;
|
||||||
}
|
}
|
||||||
return 0.0;
|
return 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
double Tukey::residual(double error) const {
|
double Tukey::loss(double distance) const {
|
||||||
double absError = std::abs(error);
|
double absError = std::abs(distance);
|
||||||
if (absError <= c_) {
|
if (absError <= c_) {
|
||||||
const double one_minus_xc2 = 1.0 - error*error/csquared_;
|
const double one_minus_xc2 = 1.0 - distance*distance/csquared_;
|
||||||
const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2;
|
const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2;
|
||||||
return csquared_ * (1 - t) / 6.0;
|
return csquared_ * (1 - t) / 6.0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -280,13 +280,13 @@ Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
|
||||||
|
|
||||||
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
|
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
|
||||||
|
|
||||||
double Welsch::weight(double error) const {
|
double Welsch::weight(double distance) const {
|
||||||
const double xc2 = (error*error)/csquared_;
|
const double xc2 = (distance*distance)/csquared_;
|
||||||
return std::exp(-xc2);
|
return std::exp(-xc2);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Welsch::residual(double error) const {
|
double Welsch::loss(double distance) const {
|
||||||
const double xc2 = (error*error)/csquared_;
|
const double xc2 = (distance*distance)/csquared_;
|
||||||
return csquared_ * 0.5 * -std::expm1(-xc2);
|
return csquared_ * 0.5 * -std::expm1(-xc2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,16 +311,16 @@ GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight)
|
||||||
: Base(reweight), c_(c) {
|
: Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
|
|
||||||
double GemanMcClure::weight(double error) const {
|
double GemanMcClure::weight(double distance) const {
|
||||||
const double c2 = c_*c_;
|
const double c2 = c_*c_;
|
||||||
const double c4 = c2*c2;
|
const double c4 = c2*c2;
|
||||||
const double c2error = c2 + error*error;
|
const double c2error = c2 + distance*distance;
|
||||||
return c4/(c2error*c2error);
|
return c4/(c2error*c2error);
|
||||||
}
|
}
|
||||||
|
|
||||||
double GemanMcClure::residual(double error) const {
|
double GemanMcClure::loss(double distance) const {
|
||||||
const double c2 = c_*c_;
|
const double c2 = c_*c_;
|
||||||
const double error2 = error*error;
|
const double error2 = distance*distance;
|
||||||
return 0.5 * (c2 * error2) / (c2 + error2);
|
return 0.5 * (c2 * error2) / (c2 + error2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,8 +345,8 @@ DCS::DCS(double c, const ReweightScheme reweight)
|
||||||
: Base(reweight), c_(c) {
|
: Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
|
|
||||||
double DCS::weight(double error) const {
|
double DCS::weight(double distance) const {
|
||||||
const double e2 = error*error;
|
const double e2 = distance*distance;
|
||||||
if (e2 > c_)
|
if (e2 > c_)
|
||||||
{
|
{
|
||||||
const double w = 2.0*c_/(c_ + e2);
|
const double w = 2.0*c_/(c_ + e2);
|
||||||
|
@ -356,10 +356,10 @@ double DCS::weight(double error) const {
|
||||||
return 1.0;
|
return 1.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
double DCS::residual(double error) const {
|
double DCS::loss(double distance) const {
|
||||||
// This is the simplified version of Eq 9 from (Agarwal13icra)
|
// This is the simplified version of Eq 9 from (Agarwal13icra)
|
||||||
// after you simplify and cancel terms.
|
// after you simplify and cancel terms.
|
||||||
const double e2 = error*error;
|
const double e2 = distance*distance;
|
||||||
const double e4 = e2*e2;
|
const double e4 = e2*e2;
|
||||||
const double c2 = c_*c_;
|
const double c2 = c_*c_;
|
||||||
|
|
||||||
|
@ -391,17 +391,17 @@ L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double L2WithDeadZone::weight(double error) const {
|
double L2WithDeadZone::weight(double distance) const {
|
||||||
// note that this code is slightly uglier than residual, because there are three distinct
|
// note that this code is slightly uglier than residual, because there are three distinct
|
||||||
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
|
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
|
||||||
// cases (deadzone, non-deadzone) in residual.
|
// cases (deadzone, non-deadzone) in residual.
|
||||||
if (std::abs(error) <= k_) return 0.0;
|
if (std::abs(distance) <= k_) return 0.0;
|
||||||
else if (error > k_) return (-k_+error)/error;
|
else if (distance > k_) return (-k_+distance)/distance;
|
||||||
else return (k_+error)/error;
|
else return (k_+distance)/distance;
|
||||||
}
|
}
|
||||||
|
|
||||||
double L2WithDeadZone::residual(double error) const {
|
double L2WithDeadZone::loss(double distance) const {
|
||||||
const double abs_error = std::abs(error);
|
const double abs_error = std::abs(distance);
|
||||||
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
|
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,12 +36,12 @@ namespace noiseModel {
|
||||||
* The mEstimator name space contains all robust error functions.
|
* The mEstimator name space contains all robust error functions.
|
||||||
* It mirrors the exposition at
|
* It mirrors the exposition at
|
||||||
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
||||||
* which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice.
|
* which talks about minimizing \sum \rho(r_i), where \rho is a loss function of choice.
|
||||||
*
|
*
|
||||||
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
|
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
|
||||||
*
|
*
|
||||||
* Name Symbol Least-Squares L1-norm Huber
|
* Name Symbol Least-Squares L1-norm Huber
|
||||||
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
|
* Loss \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
|
||||||
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
|
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
|
||||||
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
|
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
|
||||||
*
|
*
|
||||||
|
@ -75,27 +75,31 @@ class GTSAM_EXPORT Base {
|
||||||
* the quadratic function for an L2 penalty, the absolute value function for
|
* the quadratic function for an L2 penalty, the absolute value function for
|
||||||
* an L1 penalty, etc.
|
* an L1 penalty, etc.
|
||||||
*
|
*
|
||||||
* TODO(mikebosse): When the residual function has as input the norm of the
|
* TODO(mikebosse): When the loss function has as input the norm of the
|
||||||
* residual vector, then it prevents implementations of asymmeric loss
|
* error vector, then it prevents implementations of asymmeric loss
|
||||||
* functions. It would be better for this function to accept the vector and
|
* functions. It would be better for this function to accept the vector and
|
||||||
* internally call the norm if necessary.
|
* internally call the norm if necessary.
|
||||||
*/
|
*/
|
||||||
virtual double residual(double error) const { return 0; };
|
virtual double loss(double distance) const { return 0; };
|
||||||
|
|
||||||
|
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||||
|
virtual double residual(double distance) const { return loss(distance); };
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This method is responsible for returning the weight function for a given
|
* This method is responsible for returning the weight function for a given
|
||||||
* amount of error. The weight function is related to the analytic derivative
|
* amount of error. The weight function is related to the analytic derivative
|
||||||
* of the residual function. See
|
* of the loss function. See
|
||||||
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
|
||||||
* for details. This method is required when optimizing cost functions with
|
* for details. This method is required when optimizing cost functions with
|
||||||
* robust penalties using iteratively re-weighted least squares.
|
* robust penalties using iteratively re-weighted least squares.
|
||||||
*/
|
*/
|
||||||
virtual double weight(double error) const = 0;
|
virtual double weight(double distance) const = 0;
|
||||||
|
|
||||||
virtual void print(const std::string &s) const = 0;
|
virtual void print(const std::string &s) const = 0;
|
||||||
virtual bool equals(const Base &expected, double tol = 1e-8) const = 0;
|
virtual bool equals(const Base &expected, double tol = 1e-8) const = 0;
|
||||||
|
|
||||||
double sqrtWeight(double error) const { return std::sqrt(weight(error)); }
|
double sqrtWeight(double distance) const { return std::sqrt(weight(distance)); }
|
||||||
|
|
||||||
/** produce a weight vector according to an error vector and the implemented
|
/** produce a weight vector according to an error vector and the implemented
|
||||||
* robust function */
|
* robust function */
|
||||||
|
@ -123,7 +127,7 @@ class GTSAM_EXPORT Base {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Null class is not robust so is a Gaussian ?
|
/// Null class should behave as Gaussian
|
||||||
class GTSAM_EXPORT Null : public Base {
|
class GTSAM_EXPORT Null : public Base {
|
||||||
public:
|
public:
|
||||||
typedef boost::shared_ptr<Null> shared_ptr;
|
typedef boost::shared_ptr<Null> shared_ptr;
|
||||||
|
@ -131,7 +135,7 @@ class GTSAM_EXPORT Null : public Base {
|
||||||
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
|
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
|
||||||
~Null() {}
|
~Null() {}
|
||||||
double weight(double /*error*/) const { return 1.0; }
|
double weight(double /*error*/) const { return 1.0; }
|
||||||
double residual(double error) const { return error; }
|
double loss(double distance) const { return 0.5 * distance * distance; }
|
||||||
void print(const std::string &s) const;
|
void print(const std::string &s) const;
|
||||||
bool equals(const Base & /*expected*/, double /*tol*/) const { return true; }
|
bool equals(const Base & /*expected*/, double /*tol*/) const { return true; }
|
||||||
static shared_ptr Create();
|
static shared_ptr Create();
|
||||||
|
@ -154,8 +158,8 @@ class GTSAM_EXPORT Fair : public Base {
|
||||||
typedef boost::shared_ptr<Fair> shared_ptr;
|
typedef boost::shared_ptr<Fair> shared_ptr;
|
||||||
|
|
||||||
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
|
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
|
||||||
|
@ -179,8 +183,8 @@ class GTSAM_EXPORT Huber : public Base {
|
||||||
typedef boost::shared_ptr<Huber> shared_ptr;
|
typedef boost::shared_ptr<Huber> shared_ptr;
|
||||||
|
|
||||||
Huber(double k = 1.345, const ReweightScheme reweight = Block);
|
Huber(double k = 1.345, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -209,8 +213,8 @@ class GTSAM_EXPORT Cauchy : public Base {
|
||||||
typedef boost::shared_ptr<Cauchy> shared_ptr;
|
typedef boost::shared_ptr<Cauchy> shared_ptr;
|
||||||
|
|
||||||
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
|
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -234,8 +238,8 @@ class GTSAM_EXPORT Tukey : public Base {
|
||||||
typedef boost::shared_ptr<Tukey> shared_ptr;
|
typedef boost::shared_ptr<Tukey> shared_ptr;
|
||||||
|
|
||||||
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
|
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -259,8 +263,8 @@ class GTSAM_EXPORT Welsch : public Base {
|
||||||
typedef boost::shared_ptr<Welsch> shared_ptr;
|
typedef boost::shared_ptr<Welsch> shared_ptr;
|
||||||
|
|
||||||
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
|
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -295,8 +299,8 @@ class GTSAM_EXPORT GemanMcClure : public Base {
|
||||||
|
|
||||||
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
|
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
|
||||||
~GemanMcClure() {}
|
~GemanMcClure() {}
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -325,8 +329,8 @@ class GTSAM_EXPORT DCS : public Base {
|
||||||
|
|
||||||
DCS(double c = 1.0, const ReweightScheme reweight = Block);
|
DCS(double c = 1.0, const ReweightScheme reweight = Block);
|
||||||
~DCS() {}
|
~DCS() {}
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
@ -358,8 +362,8 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
|
||||||
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
|
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
|
||||||
|
|
||||||
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
|
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
|
||||||
double weight(double error) const override;
|
double weight(double distance) const override;
|
||||||
double residual(double error) const override;
|
double loss(double distance) const override;
|
||||||
void print(const std::string &s) const override;
|
void print(const std::string &s) const override;
|
||||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue