Merge branch 'develop' of https://bitbucket.org/gtborg/gtsam into fix/trailing_whitespaces
# Conflicts: # gtsam/symbolic/tests/testVariableIndex.cpp # gtsam_unstable/gtsam_unstable.h # gtsam_unstable/linear/RawQP.cpp # gtsam_unstable/linear/RawQP.hrelease/4.3a0
commit
b2e5dadcdb
|
@ -1,4 +1,5 @@
|
|||
/build*
|
||||
/debug*
|
||||
.idea
|
||||
*.pyc
|
||||
*.DS_Store
|
||||
|
|
123
CMakeLists.txt
123
CMakeLists.txt
|
@ -61,8 +61,8 @@ option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the defau
|
|||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." OFF)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions deprecated in GTSAM 4" ON)
|
||||
|
@ -78,6 +78,7 @@ endif()
|
|||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_INSTALL_CYTHON_TOOLBOX "Enable/Disable installation of Cython toolbox" OFF)
|
||||
option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab/cython wrap utility (necessary for matlab/cython interface)" ON)
|
||||
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of python to build the cython wrapper or python module for (or Default)")
|
||||
|
||||
# Check / set dependent variables for MATLAB wrapper
|
||||
if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP)
|
||||
|
@ -121,15 +122,27 @@ if(MSVC)
|
|||
# If we use Boost shared libs, disable auto linking.
|
||||
# Some libraries, at least Boost Program Options, rely on this to export DLL symbols.
|
||||
if(NOT Boost_USE_STATIC_LIBS)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK)
|
||||
endif()
|
||||
# Virtual memory range for PCH exceeded on VS2015
|
||||
if(MSVC_VERSION LESS 1910) # older than VS2017
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS -Zm295)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
find_package(Boost 1.43 COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
|
||||
# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT()
|
||||
# or explicit instantiation will generate build errors.
|
||||
# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017
|
||||
#
|
||||
if(MSVC AND BUILD_SHARED_LIBS)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
|
||||
endif()
|
||||
|
||||
# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such.
|
||||
set(BOOST_FIND_MINIMUM_VERSION 1.43)
|
||||
set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
|
||||
|
||||
find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS})
|
||||
|
||||
# Required components
|
||||
if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
|
||||
|
@ -137,22 +150,43 @@ if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILE
|
|||
message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.")
|
||||
endif()
|
||||
|
||||
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
|
||||
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
|
||||
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
|
||||
|
||||
# JLBC: This was once updated to target-based names (Boost::xxx), but it caused
|
||||
# problems with Boost versions newer than FindBoost.cmake was prepared to handle,
|
||||
# so we downgraded this to classic filenames-based variables, and manually adding
|
||||
# the target_include_directories(xxx ${Boost_INCLUDE_DIR})
|
||||
set(GTSAM_BOOST_LIBRARIES
|
||||
Boost::serialization
|
||||
Boost::system
|
||||
Boost::filesystem
|
||||
Boost::thread
|
||||
Boost::date_time
|
||||
Boost::regex
|
||||
optimized
|
||||
${Boost_SERIALIZATION_LIBRARY_RELEASE}
|
||||
${Boost_SYSTEM_LIBRARY_RELEASE}
|
||||
${Boost_FILESYSTEM_LIBRARY_RELEASE}
|
||||
${Boost_THREAD_LIBRARY_RELEASE}
|
||||
${Boost_DATE_TIME_LIBRARY_RELEASE}
|
||||
${Boost_REGEX_LIBRARY_RELEASE}
|
||||
debug
|
||||
${Boost_SERIALIZATION_LIBRARY_DEBUG}
|
||||
${Boost_SYSTEM_LIBRARY_DEBUG}
|
||||
${Boost_FILESYSTEM_LIBRARY_DEBUG}
|
||||
${Boost_THREAD_LIBRARY_DEBUG}
|
||||
${Boost_DATE_TIME_LIBRARY_DEBUG}
|
||||
${Boost_REGEX_LIBRARY_DEBUG}
|
||||
)
|
||||
message(STATUS "GTSAM_BOOST_LIBRARIES: ${GTSAM_BOOST_LIBRARIES}")
|
||||
if (GTSAM_DISABLE_NEW_TIMERS)
|
||||
message("WARNING: GTSAM timing instrumentation manually disabled")
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS DGTSAM_DISABLE_NEW_TIMERS)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS)
|
||||
else()
|
||||
if(Boost_TIMER_LIBRARY)
|
||||
list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono)
|
||||
list(APPEND GTSAM_BOOST_LIBRARIES
|
||||
optimized
|
||||
${Boost_TIMER_LIBRARY_RELEASE}
|
||||
${Boost_CHRONO_LIBRARY_RELEASE}
|
||||
debug
|
||||
${Boost_TIMER_LIBRARY_DEBUG}
|
||||
${Boost_CHRONO_LIBRARY_DEBUG}
|
||||
)
|
||||
else()
|
||||
list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt
|
||||
message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.")
|
||||
|
@ -162,7 +196,7 @@ endif()
|
|||
|
||||
if(NOT (${Boost_VERSION} LESS 105600))
|
||||
message("Ignoring Boost restriction on optional lvalue assignment from rvalues")
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS BOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES BOOST_OPTIONAL_CONFIG_ALLOW_BINDING_TO_RVALUES)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES BOOST_OPTIONAL_CONFIG_ALLOW_BINDING_TO_RVALUES)
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
|
@ -212,7 +246,6 @@ find_package(MKL)
|
|||
if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL)
|
||||
set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h
|
||||
set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL
|
||||
include_directories(${MKL_INCLUDE_DIR})
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES})
|
||||
|
||||
# --no-as-needed is required with gcc according to the MKL link advisor
|
||||
|
@ -247,10 +280,9 @@ option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', us
|
|||
# Switch for using system Eigen or GTSAM-bundled Eigen
|
||||
if(GTSAM_USE_SYSTEM_EIGEN)
|
||||
find_package(Eigen3 REQUIRED)
|
||||
include_directories(AFTER "${EIGEN3_INCLUDE_DIR}")
|
||||
|
||||
# Use generic Eigen include paths e.g. <Eigen/Core>
|
||||
set(GTSAM_EIGEN_INCLUDE_PREFIX "${EIGEN3_INCLUDE_DIR}")
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}")
|
||||
|
||||
# check if MKL is also enabled - can have one or the other, but not both!
|
||||
# Note: Eigen >= v3.2.5 includes our patches
|
||||
|
@ -264,28 +296,29 @@ if(GTSAM_USE_SYSTEM_EIGEN)
|
|||
message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.")
|
||||
endif()
|
||||
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}")
|
||||
else()
|
||||
# Use bundled Eigen include path.
|
||||
# Clear any variables set by FindEigen3
|
||||
if(EIGEN3_INCLUDE_DIR)
|
||||
set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE)
|
||||
endif()
|
||||
# Add the bundled version of eigen to the include path so that it can still be included
|
||||
# with #include <Eigen/Core>
|
||||
include_directories(BEFORE "gtsam/3rdparty/Eigen/")
|
||||
|
||||
# set full path to be used by external projects
|
||||
# this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in
|
||||
set(GTSAM_EIGEN_INCLUDE_PREFIX "${CMAKE_INSTALL_PREFIX}/include/gtsam/3rdparty/Eigen/")
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/")
|
||||
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/")
|
||||
endif()
|
||||
|
||||
if (MSVC)
|
||||
if (BUILD_SHARED_LIBS)
|
||||
# mute eigen static assert to avoid errors in shared lib
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS DEIGEN_NO_STATIC_ASSERT)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC DEIGEN_NO_STATIC_ASSERT)
|
||||
endif()
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS "/wd4244") # Disable loss of precision which is thrown all over our Eigen
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
|
@ -326,52 +359,29 @@ elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc")
|
|||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc")
|
||||
endif()
|
||||
|
||||
# Include boost - use 'BEFORE' so that a specific boost specified to CMake
|
||||
# takes precedence over a system-installed one.
|
||||
include_directories(BEFORE SYSTEM ${Boost_INCLUDE_DIR})
|
||||
|
||||
if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
set(METIS_INCLUDE_DIRECTORIES
|
||||
gtsam/3rdparty/metis/include
|
||||
gtsam/3rdparty/metis/libmetis
|
||||
gtsam/3rdparty/metis/GKlib)
|
||||
else()
|
||||
set(METIS_INCLUDE_DIRECTORIES)
|
||||
endif()
|
||||
|
||||
# Add includes for source directories 'BEFORE' boost and any system include
|
||||
# paths so that the compiler uses GTSAM headers in our source directory instead
|
||||
# of any previously installed GTSAM headers.
|
||||
include_directories(BEFORE
|
||||
gtsam/3rdparty/SuiteSparse_config
|
||||
gtsam/3rdparty/CCOLAMD/Include
|
||||
${METIS_INCLUDE_DIRECTORIES}
|
||||
${PROJECT_SOURCE_DIR}
|
||||
${PROJECT_BINARY_DIR} # So we can include generated config header files
|
||||
CppUnitLite)
|
||||
|
||||
if(MSVC)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS /bigobj) # Allow large object files for template-based code
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code
|
||||
endif()
|
||||
|
||||
# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc.
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS -Wno-unused-local-typedefs)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# As of XCode 7, clang also complains about this
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS -Wno-unused-local-typedefs)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(GTSAM_ENABLE_CONSISTENCY_CHECKS)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS GTSAM_EXTRA_CONSISTENCY_CHECKS)
|
||||
# This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS)
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
|
@ -574,6 +584,7 @@ endif()
|
|||
|
||||
message(STATUS "Cython toolbox flags ")
|
||||
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
||||
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
||||
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
||||
message(STATUS "===============================================================")
|
||||
|
||||
|
@ -582,10 +593,10 @@ if(GTSAM_WITH_TBB AND NOT TBB_FOUND)
|
|||
message(WARNING "TBB was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.")
|
||||
endif()
|
||||
if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND)
|
||||
message(WARNING "MKL was not found - this is ok, but note that MKL yields better performance. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning.")
|
||||
message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.")
|
||||
endif()
|
||||
if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND)
|
||||
message(WARNING "Your compiler does not support OpenMP - this is ok, but performance may be improved with OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning.")
|
||||
message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.")
|
||||
endif()
|
||||
if(GTSAM_BUILD_PYTHON AND GTSAM_PYTHON_WARNINGS)
|
||||
message(WARNING "${GTSAM_PYTHON_WARNINGS}")
|
||||
|
|
|
@ -6,6 +6,7 @@ file(GLOB cppunitlite_src "*.cpp")
|
|||
add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers})
|
||||
list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite)
|
||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||
target_include_directories(CppUnitLite PUBLIC ${Boost_INCLUDE_DIR}) # boost/lexical_cast.h
|
||||
|
||||
gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure
|
||||
|
||||
|
|
|
@ -97,12 +97,24 @@ Note that in the Lie group case, the usual valid expressions for Retract and Loc
|
|||
|
||||
For Lie groups, the `exponential map` above is the most obvious mapping: it
|
||||
associates straight lines in the tangent space with geodesics on the manifold
|
||||
(and it's inverse, the log map). However, there are two cases in which we deviate from this:
|
||||
(and it's inverse, the log map). However, there are several cases in which we deviate from this:
|
||||
|
||||
However, the exponential map is unnecessarily expensive for use in optimization. Hence, in GTSAM there is the option to provide a cheaper chart by means of the `ChartAtOrigin` struct in a class. This is done for *SE(2)*, *SO(3)* and *SE(3)* (see `Pose2`, `Rot3`, `Pose3`)
|
||||
|
||||
Most Lie groups we care about are *Matrix groups*, continuous sub-groups of *GL(n)*, the group of *n x n* invertible matrices. In this case, a lot of the derivatives calculations needed can be standardized, and this is done by the `LieGroup` superclass. You only need to provide an `AdjointMap` method.
|
||||
|
||||
A CRTP helper class `LieGroup` is available that can take a class and create some of the Lie group methods automatically. The class needs:
|
||||
|
||||
* operator* : implements group operator
|
||||
* inverse: implements group inverse
|
||||
* AdjointMap: maps tangent vectors according to group element
|
||||
* Expmap/Logmap: exponential map and its inverse
|
||||
* ChartAtOrigin: struct where you define Retract/Local at origin
|
||||
|
||||
To use, simply derive, but also say `using LieGroup<Class,N>::inverse` so you get an inverse with a derivative.
|
||||
|
||||
Finally, to create the traits automatically you can use `internal::LieGroupTraits<Class>`
|
||||
|
||||
Vector Space
|
||||
------------
|
||||
|
||||
|
|
146
INSTALL
146
INSTALL
|
@ -1,146 +0,0 @@
|
|||
Quickstart
|
||||
|
||||
In the root library folder execute:
|
||||
|
||||
$] mkdir build
|
||||
$] cd build
|
||||
$] cmake ..
|
||||
$] make check (optional, runs unit tests)
|
||||
$] make install
|
||||
|
||||
Important Installation Notes
|
||||
----------------------------
|
||||
|
||||
1)
|
||||
GTSAM requires the following libraries to be installed on your system:
|
||||
- BOOST version 1.43 or greater (install through Linux repositories or MacPorts)
|
||||
- Cmake version 2.6 or higher
|
||||
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
|
||||
|
||||
Optional dependent libraries:
|
||||
- If TBB is installed and detectable by CMake GTSAM will use it automatically.
|
||||
Ensure that CMake prints "Use Intel TBB : Yes". To disable the use of TBB,
|
||||
disable the CMake flag GTSAM_WITH_TBB (enabled by default). On Ubuntu, TBB
|
||||
may be installed from the Ubuntu repositories, and for other platforms it
|
||||
may be downloaded from https://www.threadingbuildingblocks.org/
|
||||
|
||||
Tested compilers:
|
||||
|
||||
- GCC 4.2-4.7
|
||||
- OSX Clang 2.9-5.0
|
||||
- OSX GCC 4.2
|
||||
- MSVC 2010, 2012
|
||||
|
||||
Tested systems:
|
||||
|
||||
- Ubuntu 11.04 - 13.10
|
||||
- MacOS 10.6 - 10.9
|
||||
- Windows 7, 8, 8.1
|
||||
|
||||
Known issues:
|
||||
|
||||
- MSVC 2013 is not yet supported because it cannot build the serialization module
|
||||
of Boost 1.55 (or earlier).
|
||||
|
||||
2)
|
||||
GTSAM makes extensive use of debug assertions, and we highly recommend you work
|
||||
in Debug mode while developing (enabled by default). Likewise, it is imperative
|
||||
that you switch to release mode when running finished code and for timing. GTSAM
|
||||
will run up to 10x faster in Release mode! See the end of this document for
|
||||
additional debugging tips.
|
||||
|
||||
3)
|
||||
GTSAM has Doxygen documentation. To generate, run 'make doc' from your
|
||||
build directory.
|
||||
|
||||
4)
|
||||
The instructions below install the library to the default system install path and
|
||||
build all components. From a terminal, starting in the root library folder,
|
||||
execute commands as follows for an out-of-source build:
|
||||
|
||||
$] mkdir build
|
||||
$] cd build
|
||||
$] cmake ..
|
||||
$] make check (optional, runs unit tests)
|
||||
$] make install
|
||||
|
||||
This will build the library and unit tests, run all of the unit tests,
|
||||
and then install the library itself.
|
||||
|
||||
- CMake Configuration Options and Details
|
||||
|
||||
GTSAM has a number of options that can be configured, which is best done with
|
||||
one of the following:
|
||||
|
||||
ccmake the curses GUI for cmake
|
||||
cmake-gui a real GUI for cmake
|
||||
|
||||
Important Options:
|
||||
|
||||
CMAKE_BUILD_TYPE: We support several build configurations for GTSAM (case insensitive)
|
||||
Debug (default) All error checking options on, no optimization. Use for development.
|
||||
Release Optimizations turned on, no debug symbols.
|
||||
Timing Adds ENABLE_TIMING flag to provide statistics on operation
|
||||
Profiling Standard configuration for use during profiling
|
||||
RelWithDebInfo Same as Release, but with the -g flag for debug symbols
|
||||
|
||||
CMAKE_INSTALL_PREFIX: The install folder. The default is typically /usr/local/
|
||||
To configure to install to your home directory, you could execute:
|
||||
$] cmake -DCMAKE_INSTALL_PREFIX:PATH=$HOME ..
|
||||
|
||||
GTSAM_TOOLBOX_INSTALL_PATH: The Matlab toolbox will be installed in a subdirectory
|
||||
of this folder, called 'gtsam'.
|
||||
$] cmake -DGTSAM_TOOLBOX_INSTALL_PATH:PATH=$HOME/toolbox ..
|
||||
|
||||
GTSAM_BUILD_CONVENIENCE_LIBRARIES: This is a build option to allow for tests in
|
||||
subfolders to be linked against convenience libraries rather than the full libgtsam.
|
||||
Set with the command line as follows:
|
||||
$] cmake -DGTSAM_BUILD_CONVENIENCE_LIBRARIES:OPTION=ON ..
|
||||
ON (Default) This builds convenience libraries and links tests against them. This
|
||||
option is suggested for gtsam developers, as it is possible to build
|
||||
and run tests without first building the rest of the library, and
|
||||
speeds up compilation for a single test. The downside of this option
|
||||
is that it will build the entire library again to build the full
|
||||
libgtsam library, so build/install will be slower.
|
||||
OFF This will build all of libgtsam before any of the tests, and then
|
||||
link all of the tests at once. This option is best for users of GTSAM,
|
||||
as it avoids rebuilding the entirety of gtsam an extra time.
|
||||
|
||||
GTSAM_BUILD_UNSTABLE: Enable build and install for libgtsam_unstable library.
|
||||
Set with the command line as follows:
|
||||
$] cmake -DGTSAM_BUILD_UNSTABLE:OPTION=ON ..
|
||||
ON When enabled, libgtsam_unstable will be built and installed with the
|
||||
same options as libgtsam. In addition, if tests are enabled, the
|
||||
unit tests will be built as well. The Matlab toolbox will also
|
||||
be generated if the matlab toolbox is enabled, installing into a
|
||||
folder called "gtsam_unstable".
|
||||
OFF (Default) If disabled, no gtsam_unstable code will be included in build or install.
|
||||
|
||||
Check
|
||||
|
||||
"make check" will build and run all of the tests. Note that the tests will only be
|
||||
built when using the "check" targets, to prevent "make install" from building the tests
|
||||
unnecessarily. You can also run "make timing" to build all of the timing scripts.
|
||||
To run check on a particular module only, run "make check.[subfolder]", so to run
|
||||
just the geometry tests, run "make check.geometry". Individual tests can be run by
|
||||
appending ".run" to the name of the test, for example, to run testMatrix, run
|
||||
"make testMatrix.run".
|
||||
|
||||
MEX_COMMAND: Path to the mex compiler. Defaults to assume the path is included in your
|
||||
shell's PATH environment variable. mex is installed with matlab at
|
||||
$MATLABROOT/bin/mex
|
||||
|
||||
$MATLABROOT can be found by executing the command 'matlabroot' in MATLAB
|
||||
|
||||
Debugging tips:
|
||||
|
||||
Another useful debugging symbol is _GLIBCXX_DEBUG, which enables debug checks
|
||||
and safe containers in the standard C++ library and makes problems much easier
|
||||
to find.
|
||||
|
||||
NOTE: The native Snow Leopard g++ compiler/library contains a bug that makes
|
||||
it impossible to use _GLIBCXX_DEBUG. MacPorts g++ compilers do work with it though.
|
||||
|
||||
NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against
|
||||
gtsam will need to be compiled with _GLIBCXX_DEBUG as well, due to the use of
|
||||
header-only Eigen.
|
|
@ -0,0 +1,168 @@
|
|||
# Quickstart
|
||||
|
||||
In the root library folder execute:
|
||||
|
||||
```sh
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake ..
|
||||
$ make check # (optional, runs unit tests)
|
||||
$ make install
|
||||
```
|
||||
|
||||
## Important Installation Notes
|
||||
|
||||
1. GTSAM requires the following libraries to be installed on your system:
|
||||
- BOOST version 1.43 or greater (install through Linux repositories or MacPorts)
|
||||
- Cmake version 3.0 or higher
|
||||
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
|
||||
|
||||
Optional dependent libraries:
|
||||
- If TBB is installed and detectable by CMake GTSAM will use it automatically.
|
||||
Ensure that CMake prints "Use Intel TBB : Yes". To disable the use of TBB,
|
||||
disable the CMake flag GTSAM_WITH_TBB (enabled by default). On Ubuntu, TBB
|
||||
may be installed from the Ubuntu repositories, and for other platforms it
|
||||
may be downloaded from https://www.threadingbuildingblocks.org/
|
||||
- GTSAM may be configured to use MKL by toggling `GTSAM_WITH_EIGEN_MKL` and
|
||||
`GTSAM_WITH_EIGEN_MKL_OPENMP` to `ON`; however, best performance is usually
|
||||
achieved with MKL disabled. We therefore advise you to benchmark your problem
|
||||
before using MKL.
|
||||
|
||||
Tested compilers:
|
||||
|
||||
- GCC 4.2-7.3
|
||||
- OS X Clang 2.9-10.0
|
||||
- OS X GCC 4.2
|
||||
- MSVC 2010, 2012, 2017
|
||||
|
||||
Tested systems:
|
||||
|
||||
- Ubuntu 16.04 - 18.04
|
||||
- MacOS 10.6 - 10.14
|
||||
- Windows 7, 8, 8.1, 10
|
||||
|
||||
Known issues:
|
||||
|
||||
- MSVC 2013 is not yet supported because it cannot build the serialization module
|
||||
of Boost 1.55 (or earlier).
|
||||
|
||||
2. GTSAM makes extensive use of debug assertions, and we highly recommend you work
|
||||
in Debug mode while developing (enabled by default). Likewise, it is imperative
|
||||
that you switch to release mode when running finished code and for timing. GTSAM
|
||||
will run up to 10x faster in Release mode! See the end of this document for
|
||||
additional debugging tips.
|
||||
|
||||
3. GTSAM has Doxygen documentation. To generate, run 'make doc' from your
|
||||
build directory.
|
||||
|
||||
4. The instructions below install the library to the default system install path and
|
||||
build all components. From a terminal, starting in the root library folder,
|
||||
execute commands as follows for an out-of-source build:
|
||||
|
||||
```sh
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake ..
|
||||
$ make check (optional, runs unit tests)
|
||||
$ make install
|
||||
```
|
||||
|
||||
This will build the library and unit tests, run all of the unit tests,
|
||||
and then install the library itself.
|
||||
|
||||
## CMake Configuration Options and Details
|
||||
|
||||
GTSAM has a number of options that can be configured, which is best done with
|
||||
one of the following:
|
||||
|
||||
- ccmake the curses GUI for cmake
|
||||
- cmake-gui a real GUI for cmake
|
||||
|
||||
### Important Options:
|
||||
|
||||
#### CMAKE_BUILD_TYPE
|
||||
We support several build configurations for GTSAM (case insensitive)
|
||||
|
||||
```cmake -DCMAKE_BUILD_TYPE=[Option] ..```
|
||||
|
||||
- Debug (default) All error checking options on, no optimization. Use for development.
|
||||
- Release Optimizations turned on, no debug symbols.
|
||||
- Timing Adds ENABLE_TIMING flag to provide statistics on operation
|
||||
- Profiling Standard configuration for use during profiling
|
||||
- RelWithDebInfo Same as Release, but with the -g flag for debug symbols
|
||||
|
||||
#### CMAKE_INSTALL_PREFIX
|
||||
|
||||
The install folder. The default is typically `/usr/local/`.
|
||||
To configure to install to your home directory, you could execute:
|
||||
|
||||
```cmake -DCMAKE_INSTALL_PREFIX:PATH=$HOME ..```
|
||||
|
||||
#### GTSAM_TOOLBOX_INSTALL_PATH
|
||||
|
||||
The Matlab toolbox will be installed in a subdirectory
|
||||
of this folder, called 'gtsam'.
|
||||
|
||||
```cmake -DGTSAM_TOOLBOX_INSTALL_PATH:PATH=$HOME/toolbox ..```
|
||||
|
||||
#### GTSAM_BUILD_CONVENIENCE_LIBRARIES
|
||||
|
||||
This is a build option to allow for tests in subfolders to be linked against convenience libraries rather than the full libgtsam.
|
||||
Set with the command line as follows:
|
||||
|
||||
```cmake -DGTSAM_BUILD_CONVENIENCE_LIBRARIES:OPTION=ON ..```
|
||||
- ON (Default): This builds convenience libraries and links tests against them. This option is suggested for gtsam developers, as it is possible to build and run tests without first building the rest of the library, and speeds up compilation for a single test. The downside of this option is that it will build the entire library again to build the full libgtsam library, so build/install will be slower.
|
||||
- OFF: This will build all of libgtsam before any of the tests, and then link all of the tests at once. This option is best for users of GTSAM, as it avoids rebuilding the entirety of gtsam an extra time.
|
||||
|
||||
#### GTSAM_BUILD_UNSTABLE
|
||||
|
||||
Enable build and install for libgtsam_unstable library.
|
||||
Set with the command line as follows:
|
||||
|
||||
```cmake -DGTSAM_BUILD_UNSTABLE:OPTION=ON ..```
|
||||
|
||||
ON: When enabled, libgtsam_unstable will be built and installed with the same options as libgtsam. In addition, if tests are enabled, the unit tests will be built as well. The Matlab toolbox will also be generated if the matlab toolbox is enabled, installing into a folder called `gtsam_unstable`.
|
||||
OFF (Default) If disabled, no `gtsam_unstable` code will be included in build or install.
|
||||
|
||||
## Check
|
||||
|
||||
`make check` will build and run all of the tests. Note that the tests will only be
|
||||
built when using the "check" targets, to prevent `make install` from building the tests
|
||||
unnecessarily. You can also run `make timing` to build all of the timing scripts.
|
||||
To run check on a particular module only, run `make check.[subfolder]`, so to run
|
||||
just the geometry tests, run `make check.geometry`. Individual tests can be run by
|
||||
appending `.run` to the name of the test, for example, to run testMatrix, run
|
||||
`make testMatrix.run`.
|
||||
|
||||
MEX_COMMAND: Path to the mex compiler. Defaults to assume the path is included in your shell's PATH environment variable. mex is installed with matlab at `$MATLABROOT/bin/mex`
|
||||
|
||||
$MATLABROOT can be found by executing the command `matlabroot` in MATLAB
|
||||
|
||||
## Performance
|
||||
|
||||
Here are some tips to get the best possible performance out of GTSAM.
|
||||
|
||||
1. Build in `Release` mode. GTSAM will run up to 10x faster compared to `Debug` mode.
|
||||
2. Enable TBB. On modern processors with multiple cores, this can easily speed up
|
||||
optimization by 30-50%. Please note that this may not be true for very small
|
||||
problems where the overhead of dispatching work to multiple threads outweighs
|
||||
the benefit. We recommend that you benchmark your problem with/without TBB.
|
||||
3. Add `-march=native` to `GTSAM_CMAKE_CXX_FLAGS`. A performance gain of
|
||||
25-30% can be expected on modern processors. Note that this affects the portability
|
||||
of your executable. It may not run when copied to another system with older/different
|
||||
processor architecture.
|
||||
Also note that all dependent projects *must* be compiled with the same flag, or
|
||||
seg-faults and other undefined behavior may result.
|
||||
4. Possibly enable MKL. Please note that our benchmarks have shown that this helps only
|
||||
in very limited cases, and actually hurts performance in the usual case. We therefore
|
||||
recommend that you do *not* enable MKL, unless you have benchmarked it on
|
||||
your problem and have verified that it improves performance.
|
||||
|
||||
|
||||
## Debugging tips
|
||||
|
||||
Another useful debugging symbol is _GLIBCXX_DEBUG, which enables debug checks and safe containers in the standard C++ library and makes problems much easier to find.
|
||||
|
||||
NOTE: The native Snow Leopard g++ compiler/library contains a bug that makes it impossible to use _GLIBCXX_DEBUG. MacPorts g++ compilers do work with it though.
|
||||
|
||||
NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against gtsam will need to be compiled with _GLIBCXX_DEBUG as well, due to the use of header-only Eigen.
|
12
README.md
12
README.md
|
@ -30,7 +30,7 @@ $ make install
|
|||
Prerequisites:
|
||||
|
||||
- [Boost](http://www.boost.org/users/download/) >= 1.43 (Ubuntu: `sudo apt-get install libboost-all-dev`)
|
||||
- [CMake](http://www.cmake.org/cmake/resources/software.html) >= 2.6 (Ubuntu: `sudo apt-get install cmake`)
|
||||
- [CMake](http://www.cmake.org/cmake/resources/software.html) >= 3.0 (Ubuntu: `sudo apt-get install cmake`)
|
||||
- A modern compiler, i.e., at least gcc 4.7.3 on Linux.
|
||||
|
||||
Optional prerequisites - used automatically if findable by CMake:
|
||||
|
@ -54,7 +54,7 @@ GTSAM includes a state of the art IMU handling scheme based on
|
|||
|
||||
Our implementation improves on this using integration on the manifold, as detailed in
|
||||
|
||||
- Luca Carlone, Zsolt Kira, Chris Beall, Vadim Indelman, and Frank Dellaert, "Eliminating conditionally independent sets in factor graphs: a unifying perspective based on smart factors", Int. Conf. on Robotics and Automation (ICRA), 2014.
|
||||
- Luca Carlone, Zsolt Kira, Chris Beall, Vadim Indelman, and Frank Dellaert, "Eliminating conditionally independent sets in factor graphs: a unifying perspective based on smart factors", Int. Conf. on Robotics and Automation (ICRA), 2014.
|
||||
- Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, "IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation", Robotics: Science and Systems (RSS), 2015.
|
||||
|
||||
If you are using the factor in academic work, please cite the publications above.
|
||||
|
@ -67,14 +67,14 @@ Additional Information
|
|||
|
||||
There is a [`GTSAM users Google group`](https://groups.google.com/forum/#!forum/gtsam-users) for general discussion.
|
||||
|
||||
Read about important [`GTSAM-Concepts`](GTSAM-Concepts.md) here. A primer on GTSAM Expressions,
|
||||
which support (superfast) automatic differentiation,
|
||||
Read about important [`GTSAM-Concepts`](GTSAM-Concepts.md) here. A primer on GTSAM Expressions,
|
||||
which support (superfast) automatic differentiation,
|
||||
can be found on the [GTSAM wiki on BitBucket](https://bitbucket.org/gtborg/gtsam/wiki/Home).
|
||||
|
||||
See the [`INSTALL`](INSTALL) file for more detailed installation instructions.
|
||||
See the [`INSTALL`](INSTALL.md) file for more detailed installation instructions.
|
||||
|
||||
GTSAM is open source under the BSD license, see the [`LICENSE`](LICENSE) and [`LICENSE.BSD`](LICENSE.BSD) files.
|
||||
|
||||
Please see the [`examples/`](examples) directory and the [`USAGE`](USAGE.md) file for examples on how to use GTSAM.
|
||||
|
||||
GTSAM was developed in the lab of [Frank Dellaert](http://www.cc.gatech.edu/~dellaert) at the [Georgia Institute of Technology](http://www.gatech.edu), with the help of many contributors over the years, see [THANKS](THANKS).
|
||||
GTSAM was developed in the lab of [Frank Dellaert](http://www.cc.gatech.edu/~dellaert) at the [Georgia Institute of Technology](http://www.gatech.edu), with the help of many contributors over the years, see [THANKS](THANKS).
|
||||
|
|
2
THANKS
2
THANKS
|
@ -1,5 +1,6 @@
|
|||
GTSAM was made possible by the efforts of many collaborators at Georgia Tech, listed below with their current afffiliation, if they left Tech:
|
||||
|
||||
* Jeremy Aguilon, Facebook
|
||||
* Sungtae An
|
||||
* Doru Balcan, Bank of America
|
||||
* Chris Beall
|
||||
|
@ -26,6 +27,7 @@ GTSAM was made possible by the efforts of many collaborators at Georgia Tech, li
|
|||
* Natesh Srinivasan
|
||||
* Alex Trevor
|
||||
* Stephen Williams, BossaNova
|
||||
* Matthew Broadway
|
||||
|
||||
at ETH, Zurich
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# - Config file for @CMAKE_PROJECT_NAME@
|
||||
# It defines the following variables
|
||||
# @PACKAGE_NAME@_INCLUDE_DIR - include directories for @CMAKE_PROJECT_NAME@
|
||||
|
||||
|
||||
# Compute paths
|
||||
get_filename_component(OUR_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
|
||||
if(EXISTS "${OUR_CMAKE_DIR}/CMakeCache.txt")
|
||||
|
@ -11,7 +11,11 @@ else()
|
|||
# Find installed library
|
||||
set(@PACKAGE_NAME@_INCLUDE_DIR "${OUR_CMAKE_DIR}/@CONF_REL_INCLUDE_DIR@" CACHE PATH "@PACKAGE_NAME@ include directory")
|
||||
endif()
|
||||
|
||||
|
||||
# Find dependencies, required by cmake exported targets:
|
||||
include(CMakeFindDependencyMacro)
|
||||
find_dependency(Boost @BOOST_FIND_MINIMUM_VERSION@ COMPONENTS @BOOST_FIND_MINIMUM_COMPONENTS@)
|
||||
|
||||
# Load exports
|
||||
include(${OUR_CMAKE_DIR}/@PACKAGE_NAME@-exports.cmake)
|
||||
|
||||
|
|
|
@ -29,10 +29,15 @@
|
|||
|
||||
# Use the Cython executable that lives next to the Python executable
|
||||
# if it is a local installation.
|
||||
find_package( PythonInterp )
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
find_package(PythonInterp)
|
||||
else()
|
||||
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT)
|
||||
endif()
|
||||
|
||||
if ( PYTHONINTERP_FOUND )
|
||||
execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||
"import Cython; print Cython.__path__"
|
||||
"import Cython; print(Cython.__path__[0])"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE CYTHON_PATH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
|
@ -51,7 +56,7 @@ endif ()
|
|||
# RESULT=0 means ok
|
||||
if ( NOT RESULT )
|
||||
execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||
"import Cython; print Cython.__version__"
|
||||
"import Cython; print(Cython.__version__)"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE CYTHON_VAR_OUTPUT
|
||||
ERROR_VARIABLE CYTHON_VAR_OUTPUT
|
||||
|
|
|
@ -206,6 +206,15 @@ ELSEIF(MKL_ROOT_DIR) # UNIX and macOS
|
|||
)
|
||||
ENDIF()
|
||||
|
||||
IF(NOT MKL_LAPACK_LIBRARY)
|
||||
FIND_LIBRARY(MKL_LAPACK_LIBRARY
|
||||
mkl_intel_lp64
|
||||
PATHS
|
||||
${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR}
|
||||
${MKL_ROOT_DIR}/lib/
|
||||
)
|
||||
ENDIF()
|
||||
|
||||
# iomp5
|
||||
IF("${MKL_ARCH_DIR}" STREQUAL "32")
|
||||
IF(UNIX AND NOT APPLE)
|
||||
|
|
|
@ -40,9 +40,17 @@
|
|||
|
||||
# Finding NumPy involves calling the Python interpreter
|
||||
if(NumPy_FIND_REQUIRED)
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
find_package(PythonInterp REQUIRED)
|
||||
else()
|
||||
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
|
||||
endif()
|
||||
else()
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
find_package(PythonInterp)
|
||||
else()
|
||||
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT PYTHONINTERP_FOUND)
|
||||
|
|
|
@ -3,8 +3,23 @@
|
|||
# in the current environment are different from the cached!
|
||||
unset(PYTHON_EXECUTABLE CACHE)
|
||||
unset(CYTHON_EXECUTABLE CACHE)
|
||||
unset(PYTHON_INCLUDE_DIR CACHE)
|
||||
unset(PYTHON_MAJOR_VERSION CACHE)
|
||||
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
find_package(PythonInterp REQUIRED)
|
||||
find_package(PythonLibs REQUIRED)
|
||||
else()
|
||||
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
|
||||
find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
|
||||
endif()
|
||||
find_package(Cython 0.25.2 REQUIRED)
|
||||
|
||||
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||
"from __future__ import print_function;import sys;print(sys.version[0], end='')"
|
||||
OUTPUT_VARIABLE PYTHON_MAJOR_VERSION
|
||||
)
|
||||
|
||||
# User-friendly Cython wrapping and installing function.
|
||||
# Builds a Cython module from the provided interface_header.
|
||||
# For example, for the interface header gtsam.h,
|
||||
|
@ -29,12 +44,12 @@ endfunction()
|
|||
|
||||
function(set_up_required_cython_packages)
|
||||
# Set up building of cython module
|
||||
find_package(PythonLibs 2.7 REQUIRED)
|
||||
include_directories(${PYTHON_INCLUDE_DIRS})
|
||||
find_package(NumPy REQUIRED)
|
||||
include_directories(${NUMPY_INCLUDE_DIRS})
|
||||
endfunction()
|
||||
|
||||
|
||||
# Convert pyx to cpp by executing cython
|
||||
# This is the first step to compile cython from the command line
|
||||
# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html
|
||||
|
@ -52,7 +67,7 @@ function(pyx_to_cpp target pyx_file generated_cpp include_dirs)
|
|||
add_custom_command(
|
||||
OUTPUT ${generated_cpp}
|
||||
COMMAND
|
||||
${CYTHON_EXECUTABLE} -X boundscheck=False -v --fast-fail --cplus ${includes_for_cython} ${pyx_file} -o ${generated_cpp}
|
||||
${CYTHON_EXECUTABLE} -X boundscheck=False -v --fast-fail --cplus -${PYTHON_MAJOR_VERSION} ${includes_for_cython} ${pyx_file} -o ${generated_cpp}
|
||||
VERBATIM)
|
||||
add_custom_target(${target} ALL DEPENDS ${generated_cpp})
|
||||
endfunction()
|
||||
|
|
|
@ -22,7 +22,10 @@ include_directories(BEFORE "${PROJECT_SOURCE_DIR}")
|
|||
###################################################################################
|
||||
# Find GTSAM components
|
||||
find_package(GTSAM REQUIRED) # Uses installed package
|
||||
include_directories(${GTSAM_INCLUDE_DIR})
|
||||
# Note: Since Jan-2019, GTSAMConfig.cmake defines exported CMake targets
|
||||
# that automatically do include the include_directories() without the need
|
||||
# to call include_directories(), just target_link_libraries(NAME gtsam)
|
||||
#include_directories(${GTSAM_INCLUDE_DIR})
|
||||
|
||||
###################################################################################
|
||||
# Build static library from common sources
|
||||
|
|
|
@ -19,22 +19,25 @@ if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
|||
# wrap gtsam_unstable
|
||||
if(GTSAM_BUILD_UNSTABLE)
|
||||
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
|
||||
set(GTSAM_UNSTABLE_IMPORT "from gtsam_unstable import *")
|
||||
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
|
||||
"from gtsam.gtsam cimport *" # extra imports
|
||||
"${GTSAM_CYTHON_INSTALL_PATH}/gtsam" # install path
|
||||
"${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable" # install path
|
||||
gtsam_unstable # library to link with
|
||||
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
|
||||
)
|
||||
# for some reasons cython gtsam_unstable can't find gtsam/gtsam.pxd without doing this
|
||||
file(WRITE ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py "")
|
||||
endif()
|
||||
|
||||
file(READ "${PROJECT_SOURCE_DIR}/cython/requirements.txt" CYTHON_INSTALL_REQUIREMENTS)
|
||||
file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
|
||||
|
||||
# Install the custom-generated __init__.py
|
||||
# This is to make the build/cython/gtsam folder a python package, so gtsam can be found while wrapping gtsam_unstable
|
||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py.in ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py)
|
||||
install_cython_files("${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py" "${GTSAM_CYTHON_INSTALL_PATH}/gtsam")
|
||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py COPYONLY)
|
||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py COPYONLY)
|
||||
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${PROJECT_BINARY_DIR}/cython/setup.py)
|
||||
install_cython_files("${PROJECT_BINARY_DIR}/cython/setup.py" "${GTSAM_CYTHON_INSTALL_PATH}")
|
||||
# install scripts and tests
|
||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||
|
||||
endif ()
|
||||
|
|
|
@ -2,23 +2,30 @@ This is the Cython/Python wrapper around the GTSAM C++ library.
|
|||
|
||||
INSTALL
|
||||
=======
|
||||
- if you want to build the gtsam python library for a specific python version (eg 2.7), use the `-DGTSAM_PYTHON_VERSION=2.7` option when running `cmake` otherwise the default interpreter will be used.
|
||||
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment) then the environment should be active while building gtsam.
|
||||
- This wrapper needs Cython(>=0.25.2), backports_abc>=0.5, and numpy. These can be installed as follows:
|
||||
|
||||
```bash
|
||||
pip install -r <gtsam_folder>/cython/requirements.txt
|
||||
```
|
||||
|
||||
- For compatiblity with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
|
||||
- For compatibility with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
|
||||
named **gtsam_eigency**, to interface between C++'s Eigen and Python's numpy.
|
||||
|
||||
- Build and install gtsam using cmake with GTSAM_INSTALL_CYTHON_TOOLBOX enabled.
|
||||
The wrapped module will be installed to GTSAM_CYTHON_INSTALL_PATH, which is
|
||||
by default: <your CMAKE_INSTALL_PREFIX>/cython
|
||||
- Build and install gtsam using cmake with `GTSAM_INSTALL_CYTHON_TOOLBOX` enabled.
|
||||
The wrapped module will be installed to `GTSAM_CYTHON_INSTALL_PATH`, which is
|
||||
by default: `<your CMAKE_INSTALL_PREFIX>/cython`
|
||||
|
||||
- Modify your PYTHONPATH to include the GTSAM_CYTHON_INSTALL_PATH:
|
||||
- To use the library without installing system-wide: modify your `PYTHONPATH` to include the `GTSAM_CYTHON_INSTALL_PATH`:
|
||||
```bash
|
||||
export PYTHONPATH=$PYTHONPATH:<GTSAM_CYTHON_INSTALL_PATH>
|
||||
```
|
||||
- To install system-wide: run `make install` then navigate to `GTSAM_CYTHON_INSTALL_PATH` and run `python setup.py install`
|
||||
- (the same command can be used to install into a virtual environment if it is active)
|
||||
- note: if you don't want gtsam to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install gtsam to a subdirectory of the build directory.
|
||||
- if you run `setup.py` from the build directory rather than the installation directory, the script will warn you with the message: `setup.py is being run from an unexpected location`.
|
||||
Before `make install` is run, not all the components of the package have been copied across, so running `setup.py` from the build directory would result in an incomplete package.
|
||||
|
||||
UNIT TESTS
|
||||
==========
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
from .gtsam import *
|
||||
|
||||
try:
|
||||
import gtsam_unstable
|
||||
|
||||
|
||||
def _deprecated_wrapper(item, name):
|
||||
def wrapper(*args, **kwargs):
|
||||
from warnings import warn
|
||||
message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) +
|
||||
'Please import it from gtsam_unstable.')
|
||||
warn(message)
|
||||
return item(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
for name in dir(gtsam_unstable):
|
||||
if not name.startswith('__'):
|
||||
item = getattr(gtsam_unstable, name)
|
||||
if callable(item):
|
||||
item = _deprecated_wrapper(item, name)
|
||||
globals()[name] = item
|
||||
|
||||
except ImportError:
|
||||
pass
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
from gtsam import *
|
||||
${GTSAM_UNSTABLE_IMPORT}
|
|
@ -17,6 +17,9 @@ import numpy as np
|
|||
|
||||
import gtsam
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
# Create noise models
|
||||
ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
|
@ -50,3 +53,17 @@ params = gtsam.LevenbergMarquardtParams()
|
|||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
print("\nFinal Result:\n{}".format(result))
|
||||
|
||||
# 5. Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for i in range(1, 4):
|
||||
print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i)))
|
||||
|
||||
fig = plt.figure(0)
|
||||
for i in range(1, 4):
|
||||
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i))
|
||||
plt.axis('equal')
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
|
|||
import gtsam
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
from gtsam import Pose2
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
def vector3(x, y, z):
|
||||
|
@ -114,7 +115,7 @@ class ThreeLinkArm(object):
|
|||
|
||||
l1Zl1 = expmap(0.0, 0.0, q[0])
|
||||
l2Zl2 = expmap(0.0, self.L1, q[1])
|
||||
l3Zl3 = expmap(0.0, 7.0, q[2])
|
||||
l3Zl3 = expmap(0.0, self.L1+self.L2, q[2])
|
||||
return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0)
|
||||
|
||||
def ik(self, sTt_desired, e=1e-9):
|
||||
|
@ -202,7 +203,7 @@ Q1 = np.radians(vector3(-30, -45, -90))
|
|||
Q2 = np.radians(vector3(-90, 90, 0))
|
||||
|
||||
|
||||
class TestPose2SLAMExample(unittest.TestCase):
|
||||
class TestPose2SLAMExample(GtsamTestCase):
|
||||
"""Unit tests for functions used below."""
|
||||
|
||||
def setUp(self):
|
||||
|
@ -296,12 +297,18 @@ def run_example():
|
|||
""" Use trajectory interpolation and then trajectory tracking a la Murray
|
||||
to move a 3-link arm on a straight line.
|
||||
"""
|
||||
# Create arm
|
||||
arm = ThreeLinkArm()
|
||||
|
||||
# Get initial pose using forward kinematics
|
||||
q = np.radians(vector3(30, -30, 45))
|
||||
sTt_initial = arm.fk(q)
|
||||
|
||||
# Create interpolated trajectory in task space to desired goal pose
|
||||
sTt_goal = Pose2(2.4, 4.3, math.radians(0))
|
||||
poses = trajectory(sTt_initial, sTt_goal, 50)
|
||||
|
||||
# Setup figure and plot initial pose
|
||||
fignum = 0
|
||||
fig = plt.figure(fignum)
|
||||
axes = fig.gca()
|
||||
|
@ -309,6 +316,8 @@ def run_example():
|
|||
axes.set_ylim(0, 10)
|
||||
gtsam_plot.plot_pose2(fignum, arm.fk(q))
|
||||
|
||||
# For all poses in interpolated trajectory, calculate dq to move to next pose.
|
||||
# We do this by calculating the local Jacobian J and doing dq = inv(J)*delta(sTt, pose).
|
||||
for pose in poses:
|
||||
sTt = arm.fk(q)
|
||||
error = delta(sTt, pose)
|
||||
|
|
|
@ -19,6 +19,9 @@ import numpy as np
|
|||
|
||||
import gtsam
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
|
||||
def vector3(x, y, z):
|
||||
"""Create 3d double numpy array."""
|
||||
|
@ -85,3 +88,10 @@ print("Final Result:\n{}".format(result))
|
|||
marginals = gtsam.Marginals(graph, result)
|
||||
for i in range(1, 6):
|
||||
print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i)))
|
||||
|
||||
fig = plt.figure(0)
|
||||
for i in range(1, 6):
|
||||
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i))
|
||||
|
||||
plt.axis('equal')
|
||||
plt.show()
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Initialize PoseSLAM with Chordal init
|
||||
Author: Luca Carlone, Frank Dellaert (python port)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
# Read graph from file
|
||||
g2oFile = gtsam.findExampleDataFile("pose3example.txt")
|
||||
|
||||
is3D = True
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
|
||||
# Add prior on the first key. TODO: assumes first key ios z
|
||||
priorModel = gtsam.noiseModel_Diagonal.Variances(
|
||||
np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
|
||||
firstKey = initial.keys().at(0)
|
||||
graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel))
|
||||
|
||||
# Initializing Pose3 - chordal relaxation"
|
||||
initialization = gtsam.InitializePose3.initialize(graph)
|
||||
|
||||
print(initialization)
|
|
@ -1,4 +1,57 @@
|
|||
These examples are almost identical to the old handwritten python wrapper
|
||||
examples. However, there are just some slight name changes, for example
|
||||
noiseModel.Diagonal becomes noiseModel_Diagonal etc...
|
||||
Also, annoyingly, instead of gtsam.Symbol('b',0) we now need to say gtsam.symbol(ord('b'), 0))
|
||||
`noiseModel.Diagonal` becomes `noiseModel_Diagonal` etc...
|
||||
Also, annoyingly, instead of `gtsam.Symbol('b', 0)` we now need to say `gtsam.symbol(ord('b'), 0))`
|
||||
|
||||
# Porting Progress
|
||||
|
||||
| C++ Example Name | Ported |
|
||||
|-------------------------------------------------------|--------|
|
||||
| CameraResectioning | |
|
||||
| CreateSFMExampleData | |
|
||||
| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through cython |
|
||||
| easyPoint2KalmanFilter | ExtendedKalmanFilter not exposed through cython |
|
||||
| elaboratePoint2KalmanFilter | GaussianSequentialSolver not exposed through cython |
|
||||
| ImuFactorExample2 | X |
|
||||
| ImuFactorsExample | |
|
||||
| ISAM2Example_SmartFactor | |
|
||||
| ISAM2_SmartFactorStereo_IMU | |
|
||||
| LocalizationExample | impossible? |
|
||||
| METISOrderingExample | |
|
||||
| OdometryExample | X |
|
||||
| PlanarSLAMExample | X |
|
||||
| Pose2SLAMExample | X |
|
||||
| Pose2SLAMExampleExpressions | |
|
||||
| Pose2SLAMExample_g2o | X |
|
||||
| Pose2SLAMExample_graph | |
|
||||
| Pose2SLAMExample_graphviz | |
|
||||
| Pose2SLAMExample_lago | lago not exposed through cython |
|
||||
| Pose2SLAMStressTest | |
|
||||
| Pose2SLAMwSPCG | |
|
||||
| Pose3SLAMExample_changeKeys | |
|
||||
| Pose3SLAMExampleExpressions_BearingRangeWithTransform | |
|
||||
| Pose3SLAMExample_g2o | X |
|
||||
| Pose3SLAMExample_initializePose3Chordal | |
|
||||
| Pose3SLAMExample_initializePose3Gradient | |
|
||||
| RangeISAMExample_plaza2 | |
|
||||
| SelfCalibrationExample | |
|
||||
| SFMExample_bal_COLAMD_METIS | |
|
||||
| SFMExample_bal | |
|
||||
| SFMExample | X |
|
||||
| SFMExampleExpressions_bal | |
|
||||
| SFMExampleExpressions | |
|
||||
| SFMExample_SmartFactor | |
|
||||
| SFMExample_SmartFactorPCG | |
|
||||
| SimpleRotation | X |
|
||||
| SolverComparer | |
|
||||
| StereoVOExample | |
|
||||
| StereoVOExample_large | |
|
||||
| TimeTBB | |
|
||||
| UGM_chain | discrete functionality not exposed |
|
||||
| UGM_small | discrete functionality not exposed |
|
||||
| VisualISAM2Example | X |
|
||||
| VisualISAMExample | X |
|
||||
|
||||
Extra Examples (with no C++ equivalent)
|
||||
- PlanarManipulatorExample
|
||||
- SFMData
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
"""
|
||||
GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
A structure-from-motion problem on a simulated dataset
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam.examples import SFMdata
|
||||
from gtsam.gtsam import (Cal3_S2, DoglegOptimizer,
|
||||
GenericProjectionFactorCal3_S2, NonlinearFactorGraph,
|
||||
Point3, Pose3, PriorFactorPoint3, PriorFactorPose3,
|
||||
Rot3, SimpleCamera, Values)
|
||||
|
||||
|
||||
def symbol(name: str, index: int) -> int:
|
||||
""" helper for creating a symbol without explicitly casting 'name' from str to int """
|
||||
return gtsam.symbol(ord(name), index)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).
|
||||
|
||||
Each variable in the system (poses and landmarks) must be identified with a unique key.
|
||||
We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1).
|
||||
Here we will use Symbols
|
||||
|
||||
In GTSAM, measurement functions are represented as 'factors'. Several common factors
|
||||
have been provided with the library for solving robotics/SLAM/Bundle Adjustment problems.
|
||||
Here we will use Projection factors to model the camera's landmark observations.
|
||||
Also, we will initialize the robot at some location using a Prior factor.
|
||||
|
||||
When the factors are created, we will add them to a Factor Graph. As the factors we are using
|
||||
are nonlinear factors, we will need a Nonlinear Factor Graph.
|
||||
|
||||
Finally, once all of the factors have been added to our factor graph, we will want to
|
||||
solve/optimize to graph to find the best (Maximum A Posteriori) set of variable values.
|
||||
GTSAM includes several nonlinear optimizers to perform this step. Here we will use a
|
||||
trust-region method known as Powell's Degleg
|
||||
|
||||
The nonlinear solvers within GTSAM are iterative solvers, meaning they linearize the
|
||||
nonlinear functions around an initial linearization point, then solve the linear system
|
||||
to update the linearization point. This happens repeatedly until the solver converges
|
||||
to a consistent set of variable values. This requires us to specify an initial guess
|
||||
for each variable, held in a Values container.
|
||||
"""
|
||||
|
||||
# Define the camera calibration parameters
|
||||
K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)
|
||||
|
||||
# Define the camera observation noise model
|
||||
measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v
|
||||
|
||||
# Create the set of ground-truth landmarks
|
||||
points = SFMdata.createPoints()
|
||||
|
||||
# Create the set of ground-truth poses
|
||||
poses = SFMdata.createPoses(K)
|
||||
|
||||
# Create a factor graph
|
||||
graph = NonlinearFactorGraph()
|
||||
|
||||
# Add a prior on pose x1. This indirectly specifies where the origin is.
|
||||
# 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z
|
||||
pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
|
||||
factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Simulated measurements from each camera pose, adding them to the factor graph
|
||||
for i, pose in enumerate(poses):
|
||||
camera = SimpleCamera(pose, K)
|
||||
for j, point in enumerate(points):
|
||||
measurement = camera.project(point)
|
||||
factor = GenericProjectionFactorCal3_S2(
|
||||
measurement, measurement_noise, symbol('x', i), symbol('l', j), K)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained
|
||||
# Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance
|
||||
# between the first camera and the first landmark. All other landmark positions are interpreted using this scale.
|
||||
point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
|
||||
factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise)
|
||||
graph.push_back(factor)
|
||||
graph.print_('Factor Graph:\n')
|
||||
|
||||
# Create the data structure to hold the initial estimate to the solution
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
initial_estimate = Values()
|
||||
for i, pose in enumerate(poses):
|
||||
r = Rot3.Rodrigues(-0.1, 0.2, 0.25)
|
||||
t = Point3(0.05, -0.10, 0.20)
|
||||
transformed_pose = pose.compose(Pose3(r, t))
|
||||
initial_estimate.insert(symbol('x', i), transformed_pose)
|
||||
for j, point in enumerate(points):
|
||||
transformed_point = Point3(point.vector() + np.array([-0.25, 0.20, 0.15]))
|
||||
initial_estimate.insert(symbol('l', j), transformed_point)
|
||||
initial_estimate.print_('Initial Estimates:\n')
|
||||
|
||||
# Optimize the graph and print results
|
||||
params = gtsam.DoglegParams()
|
||||
params.setVerbosity('TERMINATION')
|
||||
optimizer = DoglegOptimizer(graph, initial_estimate, params)
|
||||
print('Optimizing:')
|
||||
result = optimizer.optimize()
|
||||
result.print_('Final results:\n')
|
||||
print('initial error = {}'.format(graph.error(initial_estimate)))
|
||||
print('final error = {}'.format(graph.error(result)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,84 @@
|
|||
"""
|
||||
GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
This example will perform a relatively trivial optimization on
|
||||
a single variable with a single factor.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import gtsam
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Step 1: Create a factor to express a unary constraint
|
||||
|
||||
The "prior" in this case is the measurement from a sensor,
|
||||
with a model of the noise on the measurement.
|
||||
|
||||
The "Key" created here is a label used to associate parts of the
|
||||
state (stored in "RotValues") with particular factors. They require
|
||||
an index to allow for lookup, and should be unique.
|
||||
|
||||
In general, creating a factor requires:
|
||||
- A key or set of keys labeling the variables that are acted upon
|
||||
- A measurement value
|
||||
- A measurement model with the correct dimensionality for the factor
|
||||
"""
|
||||
prior = gtsam.Rot2.fromAngle(np.deg2rad(30))
|
||||
prior.print_('goal angle')
|
||||
model = gtsam.noiseModel_Isotropic.Sigma(dim=1, sigma=np.deg2rad(1))
|
||||
key = gtsam.symbol(ord('x'), 1)
|
||||
factor = gtsam.PriorFactorRot2(key, prior, model)
|
||||
|
||||
"""
|
||||
Step 2: Create a graph container and add the factor to it
|
||||
|
||||
Before optimizing, all factors need to be added to a Graph container,
|
||||
which provides the necessary top-level functionality for defining a
|
||||
system of constraints.
|
||||
|
||||
In this case, there is only one factor, but in a practical scenario,
|
||||
many more factors would be added.
|
||||
"""
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
graph.push_back(factor)
|
||||
graph.print_('full graph')
|
||||
|
||||
"""
|
||||
Step 3: Create an initial estimate
|
||||
|
||||
An initial estimate of the solution for the system is necessary to
|
||||
start optimization. This system state is the "Values" instance,
|
||||
which is similar in structure to a dictionary, in that it maps
|
||||
keys (the label created in step 1) to specific values.
|
||||
|
||||
The initial estimate provided to optimization will be used as
|
||||
a linearization point for optimization, so it is important that
|
||||
all of the variables in the graph have a corresponding value in
|
||||
this structure.
|
||||
"""
|
||||
initial = gtsam.Values()
|
||||
initial.insert(key, gtsam.Rot2.fromAngle(np.deg2rad(20)))
|
||||
initial.print_('initial estimate')
|
||||
|
||||
"""
|
||||
Step 4: Optimize
|
||||
|
||||
After formulating the problem with a graph of constraints
|
||||
and an initial estimate, executing optimization is as simple
|
||||
as calling a general optimization function with the graph and
|
||||
initial estimate. This will yield a new RotValues structure
|
||||
with the final state of the optimization.
|
||||
"""
|
||||
result = gtsam.LevenbergMarquardtOptimizer(graph, initial).optimize()
|
||||
result.print_('final result')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,106 @@
|
|||
"""
|
||||
GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
A visualSLAM example for the structure-from-motion problem on a simulated dataset
|
||||
This version uses iSAM to solve the problem incrementally
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import gtsam
|
||||
from gtsam.examples import SFMdata
|
||||
from gtsam.gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2,
|
||||
NonlinearFactorGraph, NonlinearISAM, Point3, Pose3,
|
||||
PriorFactorPoint3, PriorFactorPose3, Rot3,
|
||||
SimpleCamera, Values)
|
||||
|
||||
|
||||
def symbol(name: str, index: int) -> int:
|
||||
""" helper for creating a symbol without explicitly casting 'name' from str to int """
|
||||
return gtsam.symbol(ord(name), index)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
A structure-from-motion example with landmarks
|
||||
- The landmarks form a 10 meter cube
|
||||
- The robot rotates around the landmarks, always facing towards the cube
|
||||
"""
|
||||
|
||||
# Define the camera calibration parameters
|
||||
K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)
|
||||
|
||||
# Define the camera observation noise model
|
||||
camera_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v
|
||||
|
||||
# Create the set of ground-truth landmarks
|
||||
points = SFMdata.createPoints()
|
||||
# Create the set of ground-truth poses
|
||||
poses = SFMdata.createPoses(K)
|
||||
|
||||
# Create a NonlinearISAM object which will relinearize and reorder the variables
|
||||
# every "reorderInterval" updates
|
||||
isam = NonlinearISAM(reorderInterval=3)
|
||||
|
||||
# Create a Factor Graph and Values to hold the new data
|
||||
graph = NonlinearFactorGraph()
|
||||
initial_estimate = Values()
|
||||
|
||||
# Loop over the different poses, adding the observations to iSAM incrementally
|
||||
for i, pose in enumerate(poses):
|
||||
camera = SimpleCamera(pose, K)
|
||||
# Add factors for each landmark observation
|
||||
for j, point in enumerate(points):
|
||||
measurement = camera.project(point)
|
||||
factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, symbol('x', i), symbol('l', j), K)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), t=Point3(0.05, -0.10, 0.20))
|
||||
initial_xi = pose.compose(noise)
|
||||
|
||||
# Add an initial guess for the current pose
|
||||
initial_estimate.insert(symbol('x', i), initial_xi)
|
||||
|
||||
# If this is the first iteration, add a prior on the first pose to set the coordinate frame
|
||||
# and a prior on the first landmark to set the scale
|
||||
# Also, as iSAM solves incrementally, we must wait until each is observed at least twice before
|
||||
# adding it to iSAM.
|
||||
if i == 0:
|
||||
# Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z
|
||||
pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
|
||||
factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Add a prior on landmark l0
|
||||
point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
|
||||
factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Add initial guesses to all observed landmarks
|
||||
noise = np.array([-0.25, 0.20, 0.15])
|
||||
for j, point in enumerate(points):
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
initial_lj = points[j].vector() + noise
|
||||
initial_estimate.insert(symbol('l', j), Point3(initial_lj))
|
||||
else:
|
||||
# Update iSAM with the new factors
|
||||
isam.update(graph, initial_estimate)
|
||||
current_estimate = isam.estimate()
|
||||
print('*' * 50)
|
||||
print('Frame {}:'.format(i))
|
||||
current_estimate.print_('Current estimate: ')
|
||||
|
||||
# Clear the factor graph and values for the next iteration
|
||||
graph.resize(0)
|
||||
initial_estimate.clear()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,112 +0,0 @@
|
|||
"""
|
||||
This file is not a real python unittest. It contains small experiments
|
||||
to test the wrapper with gtsam_test, a short version of gtsam.h.
|
||||
Its name convention is different from other tests so it won't be discovered.
|
||||
"""
|
||||
import gtsam
|
||||
import numpy as np
|
||||
|
||||
r = gtsam.Rot3()
|
||||
print(r)
|
||||
print(r.pitch())
|
||||
r2 = gtsam.Rot3()
|
||||
r3 = r.compose(r2)
|
||||
print("r3 pitch:", r3.pitch())
|
||||
|
||||
v = np.array([1, 1, 1])
|
||||
print("v = ", v)
|
||||
r4 = r3.retract(v)
|
||||
print("r4 pitch:", r4.pitch())
|
||||
r4.print_(b'r4: ')
|
||||
r3.print_(b"r3: ")
|
||||
|
||||
v = r3.localCoordinates(r4)
|
||||
print("localCoordinates:", v)
|
||||
|
||||
Rmat = np.array([
|
||||
[0.990074, -0.0942928, 0.104218],
|
||||
[0.104218, 0.990074, -0.0942928],
|
||||
[-0.0942928, 0.104218, 0.990074]
|
||||
])
|
||||
r5 = gtsam.Rot3(Rmat)
|
||||
r5.print_(b"r5: ")
|
||||
|
||||
l = gtsam.Rot3.Logmap(r5)
|
||||
print("l = ", l)
|
||||
|
||||
|
||||
noise = gtsam.noiseModel_Gaussian.Covariance(Rmat)
|
||||
noise.print_(b"noise:")
|
||||
|
||||
D = np.array([1.,2.,3.])
|
||||
diag = gtsam.noiseModel_Diagonal.Variances(D)
|
||||
print("diag:", diag)
|
||||
diag.print_(b"diag:")
|
||||
print("diag R:", diag.R())
|
||||
|
||||
p = gtsam.Point3()
|
||||
p.print_("p:")
|
||||
factor = gtsam.BetweenFactorPoint3(1,2,p, noise)
|
||||
factor.print_(b"factor:")
|
||||
|
||||
vv = gtsam.VectorValues()
|
||||
vv.print_(b"vv:")
|
||||
vv.insert(1, np.array([1.,2.,3.]))
|
||||
vv.insert(2, np.array([3.,4.]))
|
||||
vv.insert(3, np.array([5.,6.,7.,8.]))
|
||||
vv.print_(b"vv:")
|
||||
|
||||
vv2 = gtsam.VectorValues(vv)
|
||||
vv2.insert(4, np.array([4.,2.,1]))
|
||||
vv2.print_(b"vv2:")
|
||||
vv.print_(b"vv:")
|
||||
|
||||
vv.insert(4, np.array([1.,2.,4.]))
|
||||
vv.print_(b"vv:")
|
||||
vv3 = vv.add(vv2)
|
||||
|
||||
vv3.print_(b"vv3:")
|
||||
|
||||
values = gtsam.Values()
|
||||
values.insert(1, gtsam.Point3())
|
||||
values.insert(2, gtsam.Rot3())
|
||||
values.print_(b"values:")
|
||||
|
||||
factor = gtsam.PriorFactorVector(1, np.array([1.,2.,3.]), diag)
|
||||
print "Prior factor vector: ", factor
|
||||
|
||||
|
||||
|
||||
keys = gtsam.KeyVector()
|
||||
|
||||
keys.push_back(1)
|
||||
keys.push_back(2)
|
||||
print 'size: ', keys.size()
|
||||
print keys.at(0)
|
||||
print keys.at(1)
|
||||
|
||||
noise = gtsam.noiseModel_Isotropic.Precision(2, 3.0)
|
||||
noise.print_('noise:')
|
||||
print 'noise print:', noise
|
||||
f = gtsam.JacobianFactor(7, np.ones([2,2]), model=noise, b=np.ones(2))
|
||||
print 'JacobianFactor(7):\n', f
|
||||
print "A = ", f.getA()
|
||||
print "b = ", f.getb()
|
||||
|
||||
f = gtsam.JacobianFactor(np.ones(2))
|
||||
f.print_('jacoboian b_in:')
|
||||
|
||||
|
||||
print "JacobianFactor initalized with b_in:", f
|
||||
|
||||
diag = gtsam.noiseModel_Diagonal.Sigmas(np.array([1.,2.,3.]))
|
||||
fv = gtsam.PriorFactorVector(1, np.array([4.,5.,6.]), diag)
|
||||
print "priorfactorvector: ", fv
|
||||
|
||||
print "base noise: ", fv.get_noiseModel()
|
||||
print "casted to gaussian2: ", gtsam.dynamic_cast_noiseModel_Diagonal_noiseModel_Base(fv.get_noiseModel())
|
||||
|
||||
X = gtsam.symbol(65, 19)
|
||||
print X
|
||||
print gtsam.symbolChr(X)
|
||||
print gtsam.symbolIndex(X)
|
|
@ -1,772 +0,0 @@
|
|||
namespace gtsam {
|
||||
|
||||
#include <gtsam/inference/Key.h>
|
||||
typedef size_t Key;
|
||||
|
||||
#include <gtsam/base/FastVector.h>
|
||||
template<T> class FastVector {
|
||||
FastVector();
|
||||
FastVector(const This& f);
|
||||
void push_back(const T& e);
|
||||
//T& operator[](int);
|
||||
T at(int i);
|
||||
size_t size() const;
|
||||
};
|
||||
|
||||
typedef gtsam::FastVector<gtsam::Key> KeyVector;
|
||||
|
||||
//*************************************************************************
|
||||
// geometry
|
||||
//*************************************************************************
|
||||
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
class Point2 {
|
||||
// Standard Constructors
|
||||
Point2();
|
||||
Point2(double x, double y);
|
||||
Point2(Vector v);
|
||||
//Point2(const gtsam::Point2& l);
|
||||
|
||||
// Testable
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::Point2& pose, double tol) const;
|
||||
|
||||
// Group
|
||||
static gtsam::Point2 identity();
|
||||
|
||||
// Standard Interface
|
||||
double x() const;
|
||||
double y() const;
|
||||
Vector vector() const;
|
||||
double distance(const gtsam::Point2& p2) const;
|
||||
double norm() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
class Point3 {
|
||||
// Standard Constructors
|
||||
Point3();
|
||||
Point3(double x, double y, double z);
|
||||
Point3(Vector v);
|
||||
//Point3(const gtsam::Point3& l);
|
||||
|
||||
// Testable
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::Point3& p, double tol) const;
|
||||
|
||||
// Group
|
||||
static gtsam::Point3 identity();
|
||||
|
||||
// Standard Interface
|
||||
Vector vector() const;
|
||||
double x() const;
|
||||
double y() const;
|
||||
double z() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Rot2.h>
|
||||
class Rot2 {
|
||||
// Standard Constructors and Named Constructors
|
||||
Rot2();
|
||||
Rot2(double theta);
|
||||
//Rot2(const gtsam::Rot2& l);
|
||||
|
||||
static gtsam::Rot2 fromAngle(double theta);
|
||||
static gtsam::Rot2 fromDegrees(double theta);
|
||||
static gtsam::Rot2 fromCosSin(double c, double s);
|
||||
|
||||
// Testable
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::Rot2& rot, double tol) const;
|
||||
|
||||
// Group
|
||||
static gtsam::Rot2 identity();
|
||||
gtsam::Rot2 inverse();
|
||||
gtsam::Rot2 compose(const gtsam::Rot2& p2) const;
|
||||
gtsam::Rot2 between(const gtsam::Rot2& p2) const;
|
||||
|
||||
// Manifold
|
||||
gtsam::Rot2 retract(Vector v) const;
|
||||
Vector localCoordinates(const gtsam::Rot2& p) const;
|
||||
|
||||
// Lie Group
|
||||
static gtsam::Rot2 Expmap(Vector v);
|
||||
static Vector Logmap(const gtsam::Rot2& p);
|
||||
|
||||
// Group Action on Point2
|
||||
gtsam::Point2 rotate(const gtsam::Point2& point) const;
|
||||
gtsam::Point2 unrotate(const gtsam::Point2& point) const;
|
||||
|
||||
// Standard Interface
|
||||
static gtsam::Rot2 relativeBearing(const gtsam::Point2& d); // Ignoring derivative
|
||||
static gtsam::Rot2 atan2(double y, double x);
|
||||
double theta() const;
|
||||
double degrees() const;
|
||||
double c() const;
|
||||
double s() const;
|
||||
Matrix matrix() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
class Rot3 {
|
||||
// Standard Constructors and Named Constructors
|
||||
Rot3();
|
||||
Rot3(Matrix R);
|
||||
//Rot3(const gtsam::Rot3& l);
|
||||
|
||||
static gtsam::Rot3 Rx(double t);
|
||||
static gtsam::Rot3 Ry(double t);
|
||||
static gtsam::Rot3 Rz(double t);
|
||||
static gtsam::Rot3 RzRyRx(double x, double y, double z);
|
||||
static gtsam::Rot3 RzRyRx(Vector xyz);
|
||||
static gtsam::Rot3 Yaw(double t); // positive yaw is to right (as in aircraft heading)
|
||||
static gtsam::Rot3 Pitch(double t); // positive pitch is up (increasing aircraft altitude)
|
||||
static gtsam::Rot3 Roll(double t); // positive roll is to right (increasing yaw in aircraft)
|
||||
static gtsam::Rot3 Ypr(double y, double p, double r);
|
||||
static gtsam::Rot3 Quaternion(double w, double x, double y, double z);
|
||||
static gtsam::Rot3 Rodrigues(Vector v);
|
||||
|
||||
// Testable
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::Rot3& rot, double tol) const;
|
||||
|
||||
// Group
|
||||
static gtsam::Rot3 identity();
|
||||
gtsam::Rot3 inverse() const;
|
||||
gtsam::Rot3 compose(const gtsam::Rot3& p2) const;
|
||||
gtsam::Rot3 between(const gtsam::Rot3& p2) const;
|
||||
|
||||
// Manifold
|
||||
//gtsam::Rot3 retractCayley(Vector v) const; // FIXME, does not exist in both Matrix and Quaternion options
|
||||
gtsam::Rot3 retract(Vector v) const;
|
||||
Vector localCoordinates(const gtsam::Rot3& p) const;
|
||||
|
||||
// Group Action on Point3
|
||||
gtsam::Point3 rotate(const gtsam::Point3& p) const;
|
||||
gtsam::Point3 unrotate(const gtsam::Point3& p) const;
|
||||
|
||||
// Standard Interface
|
||||
static gtsam::Rot3 Expmap(Vector v);
|
||||
static Vector Logmap(const gtsam::Rot3& p);
|
||||
Matrix matrix() const;
|
||||
Matrix transpose() const;
|
||||
gtsam::Point3 column(size_t index) const;
|
||||
Vector xyz() const;
|
||||
Vector ypr() const;
|
||||
Vector rpy() const;
|
||||
double roll() const;
|
||||
double pitch() const;
|
||||
double yaw() const;
|
||||
// Vector toQuaternion() const; // FIXME: Can't cast to Vector properly
|
||||
Vector quaternion() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
class Pose2 {
|
||||
// Standard Constructor
|
||||
Pose2();
|
||||
//Pose2(const gtsam::Pose2& pose);
|
||||
Pose2(double x, double y, double theta);
|
||||
Pose2(double theta, const gtsam::Point2& t);
|
||||
Pose2(const gtsam::Rot2& r, const gtsam::Point2& t);
|
||||
Pose2(Vector v);
|
||||
|
||||
// Testable
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::Pose2& pose, double tol) const;
|
||||
|
||||
// Group
|
||||
static gtsam::Pose2 identity();
|
||||
gtsam::Pose2 inverse() const;
|
||||
gtsam::Pose2 compose(const gtsam::Pose2& p2) const;
|
||||
gtsam::Pose2 between(const gtsam::Pose2& p2) const;
|
||||
|
||||
// Manifold
|
||||
gtsam::Pose2 retract(Vector v) const;
|
||||
Vector localCoordinates(const gtsam::Pose2& p) const;
|
||||
|
||||
// Lie Group
|
||||
static gtsam::Pose2 Expmap(Vector v);
|
||||
static Vector Logmap(const gtsam::Pose2& p);
|
||||
Matrix AdjointMap() const;
|
||||
Vector Adjoint(const Vector& xi) const;
|
||||
static Matrix wedge(double vx, double vy, double w);
|
||||
|
||||
// Group Actions on Point2
|
||||
gtsam::Point2 transform_from(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 transform_to(const gtsam::Point2& p) const;
|
||||
|
||||
// Standard Interface
|
||||
double x() const;
|
||||
double y() const;
|
||||
double theta() const;
|
||||
gtsam::Rot2 bearing(const gtsam::Point2& point) const;
|
||||
double range(const gtsam::Point2& point) const;
|
||||
gtsam::Point2 translation() const;
|
||||
gtsam::Rot2 rotation() const;
|
||||
Matrix matrix() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
class Pose3 {
|
||||
// Standard Constructors
|
||||
Pose3();
|
||||
//Pose3(const gtsam::Pose3& pose);
|
||||
Pose3(const gtsam::Rot3& r, const gtsam::Point3& t);
|
||||
Pose3(const gtsam::Pose2& pose2); // FIXME: shadows Pose3(Pose3 pose)
|
||||
Pose3(Matrix t);
|
||||
|
||||
// Testable
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::Pose3& pose, double tol) const;
|
||||
|
||||
// Group
|
||||
static gtsam::Pose3 identity();
|
||||
gtsam::Pose3 inverse() const;
|
||||
gtsam::Pose3 compose(const gtsam::Pose3& p2) const;
|
||||
gtsam::Pose3 between(const gtsam::Pose3& p2) const;
|
||||
|
||||
// Manifold
|
||||
gtsam::Pose3 retract(Vector v) const;
|
||||
Vector localCoordinates(const gtsam::Pose3& T2) const;
|
||||
|
||||
// Lie Group
|
||||
static gtsam::Pose3 Expmap(Vector v);
|
||||
static Vector Logmap(const gtsam::Pose3& p);
|
||||
Matrix AdjointMap() const;
|
||||
Vector Adjoint(Vector xi) const;
|
||||
static Matrix wedge(double wx, double wy, double wz, double vx, double vy, double vz);
|
||||
|
||||
// Group Action on Point3
|
||||
gtsam::Point3 transform_from(const gtsam::Point3& p) const;
|
||||
gtsam::Point3 transform_to(const gtsam::Point3& p) const;
|
||||
|
||||
// Standard Interface
|
||||
gtsam::Rot3 rotation() const;
|
||||
gtsam::Point3 translation() const;
|
||||
double x() const;
|
||||
double y() const;
|
||||
double z() const;
|
||||
Matrix matrix() const;
|
||||
gtsam::Pose3 transform_to(const gtsam::Pose3& pose) const; // FIXME: shadows other transform_to()
|
||||
double range(const gtsam::Point3& point);
|
||||
double range(const gtsam::Pose3& pose);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
//*************************************************************************
|
||||
// noise
|
||||
//*************************************************************************
|
||||
|
||||
namespace noiseModel {
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
virtual class Base {
|
||||
};
|
||||
|
||||
virtual class Gaussian : gtsam::noiseModel::Base {
|
||||
static gtsam::noiseModel::Gaussian* SqrtInformation(Matrix R);
|
||||
static gtsam::noiseModel::Gaussian* Covariance(Matrix R);
|
||||
Matrix R() const;
|
||||
bool equals(gtsam::noiseModel::Base& expected, double tol);
|
||||
void print(string s) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
virtual class Diagonal : gtsam::noiseModel::Gaussian {
|
||||
static gtsam::noiseModel::Diagonal* Sigmas(Vector sigmas);
|
||||
static gtsam::noiseModel::Diagonal* Variances(Vector variances);
|
||||
static gtsam::noiseModel::Diagonal* Precisions(Vector precisions);
|
||||
Matrix R() const;
|
||||
void print(string s) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
virtual class Constrained : gtsam::noiseModel::Diagonal {
|
||||
static gtsam::noiseModel::Constrained* MixedSigmas(const Vector& mu, const Vector& sigmas);
|
||||
static gtsam::noiseModel::Constrained* MixedSigmas(double m, const Vector& sigmas);
|
||||
static gtsam::noiseModel::Constrained* MixedVariances(const Vector& mu, const Vector& variances);
|
||||
static gtsam::noiseModel::Constrained* MixedVariances(const Vector& variances);
|
||||
static gtsam::noiseModel::Constrained* MixedPrecisions(const Vector& mu, const Vector& precisions);
|
||||
static gtsam::noiseModel::Constrained* MixedPrecisions(const Vector& precisions);
|
||||
|
||||
static gtsam::noiseModel::Constrained* All(size_t dim);
|
||||
static gtsam::noiseModel::Constrained* All(size_t dim, double mu);
|
||||
|
||||
gtsam::noiseModel::Constrained* unit() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
virtual class Isotropic : gtsam::noiseModel::Diagonal {
|
||||
static gtsam::noiseModel::Isotropic* Sigma(size_t dim, double sigma);
|
||||
static gtsam::noiseModel::Isotropic* Variance(size_t dim, double varianace);
|
||||
static gtsam::noiseModel::Isotropic* Precision(size_t dim, double precision);
|
||||
void print(string s) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
virtual class Unit : gtsam::noiseModel::Isotropic {
|
||||
static gtsam::noiseModel::Unit* Create(size_t dim);
|
||||
void print(string s) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
namespace mEstimator {
|
||||
virtual class Base {
|
||||
};
|
||||
|
||||
virtual class Null: gtsam::noiseModel::mEstimator::Base {
|
||||
Null();
|
||||
//Null(const gtsam::noiseModel::mEstimator::Null& other);
|
||||
void print(string s) const;
|
||||
static gtsam::noiseModel::mEstimator::Null* Create();
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
||||
Fair(double c);
|
||||
//Fair(const gtsam::noiseModel::mEstimator::Fair& other);
|
||||
void print(string s) const;
|
||||
static gtsam::noiseModel::mEstimator::Fair* Create(double c);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
||||
Huber(double k);
|
||||
//Huber(const gtsam::noiseModel::mEstimator::Huber& other);
|
||||
|
||||
void print(string s) const;
|
||||
static gtsam::noiseModel::mEstimator::Huber* Create(double k);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
||||
Tukey(double k);
|
||||
//Tukey(const gtsam::noiseModel::mEstimator::Tukey& other);
|
||||
|
||||
void print(string s) const;
|
||||
static gtsam::noiseModel::mEstimator::Tukey* Create(double k);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
}///\namespace mEstimator
|
||||
|
||||
virtual class Robust : gtsam::noiseModel::Base {
|
||||
Robust(const gtsam::noiseModel::mEstimator::Base* robust, const gtsam::noiseModel::Base* noise);
|
||||
//Robust(const gtsam::noiseModel::Robust& other);
|
||||
|
||||
static gtsam::noiseModel::Robust* Create(const gtsam::noiseModel::mEstimator::Base* robust, const gtsam::noiseModel::Base* noise);
|
||||
void print(string s) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
};
|
||||
|
||||
}///\namespace noiseModel
|
||||
|
||||
#include <gtsam/linear/Sampler.h>
|
||||
class Sampler {
|
||||
//Constructors
|
||||
Sampler(gtsam::noiseModel::Diagonal* model, int seed);
|
||||
Sampler(Vector sigmas, int seed);
|
||||
Sampler(int seed);
|
||||
//Sampler(const gtsam::Sampler& other);
|
||||
|
||||
|
||||
//Standard Interface
|
||||
size_t dim() const;
|
||||
Vector sigmas() const;
|
||||
gtsam::noiseModel::Diagonal* model() const;
|
||||
Vector sample();
|
||||
Vector sampleNewModel(gtsam::noiseModel::Diagonal* model);
|
||||
};
|
||||
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
class VectorValues {
|
||||
//Constructors
|
||||
VectorValues();
|
||||
VectorValues(const gtsam::VectorValues& other);
|
||||
|
||||
//Named Constructors
|
||||
static gtsam::VectorValues Zero(const gtsam::VectorValues& model);
|
||||
|
||||
//Standard Interface
|
||||
size_t size() const;
|
||||
size_t dim(size_t j) const;
|
||||
bool exists(size_t j) const;
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::VectorValues& expected, double tol) const;
|
||||
void insert(size_t j, Vector value);
|
||||
Vector vector() const;
|
||||
Vector at(size_t j) const;
|
||||
void update(const gtsam::VectorValues& values);
|
||||
|
||||
//Advanced Interface
|
||||
void setZero();
|
||||
|
||||
gtsam::VectorValues add(const gtsam::VectorValues& c) const;
|
||||
void addInPlace(const gtsam::VectorValues& c);
|
||||
gtsam::VectorValues subtract(const gtsam::VectorValues& c) const;
|
||||
gtsam::VectorValues scale(double a) const;
|
||||
void scaleInPlace(double a);
|
||||
|
||||
bool hasSameStructure(const gtsam::VectorValues& other) const;
|
||||
double dot(const gtsam::VectorValues& V) const;
|
||||
double norm() const;
|
||||
double squaredNorm() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/linear/GaussianFactor.h>
|
||||
virtual class GaussianFactor {
|
||||
gtsam::KeyVector keys() const;
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::GaussianFactor& lf, double tol) const;
|
||||
double error(const gtsam::VectorValues& c) const;
|
||||
gtsam::GaussianFactor* clone() const;
|
||||
gtsam::GaussianFactor* negate() const;
|
||||
Matrix augmentedInformation() const;
|
||||
Matrix information() const;
|
||||
Matrix augmentedJacobian() const;
|
||||
pair<Matrix, Vector> jacobian() const;
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
};
|
||||
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
virtual class JacobianFactor : gtsam::GaussianFactor {
|
||||
//Constructors
|
||||
JacobianFactor();
|
||||
JacobianFactor(const gtsam::GaussianFactor& factor);
|
||||
JacobianFactor(Vector b_in);
|
||||
JacobianFactor(size_t i1, Matrix A1, Vector b,
|
||||
const gtsam::noiseModel::Diagonal* model);
|
||||
JacobianFactor(size_t i1, Matrix A1, size_t i2, Matrix A2, Vector b,
|
||||
const gtsam::noiseModel::Diagonal* model);
|
||||
JacobianFactor(size_t i1, Matrix A1, size_t i2, Matrix A2, size_t i3, Matrix A3,
|
||||
Vector b, const gtsam::noiseModel::Diagonal* model);
|
||||
//JacobianFactor(const gtsam::GaussianFactorGraph& graph);
|
||||
//JacobianFactor(const gtsam::JacobianFactor& other);
|
||||
|
||||
//Testable
|
||||
void print(string s) const;
|
||||
void printKeys(string s) const;
|
||||
bool equals(const gtsam::GaussianFactor& lf, double tol) const;
|
||||
size_t size() const;
|
||||
Vector unweighted_error(const gtsam::VectorValues& c) const;
|
||||
Vector error_vector(const gtsam::VectorValues& c) const;
|
||||
double error(const gtsam::VectorValues& c) const;
|
||||
|
||||
//Standard Interface
|
||||
Matrix getA() const;
|
||||
Vector getb() const;
|
||||
size_t rows() const;
|
||||
size_t cols() const;
|
||||
bool isConstrained() const;
|
||||
pair<Matrix, Vector> jacobianUnweighted() const;
|
||||
Matrix augmentedJacobianUnweighted() const;
|
||||
|
||||
void transposeMultiplyAdd(double alpha, const Vector& e, gtsam::VectorValues& x) const;
|
||||
gtsam::JacobianFactor whiten() const;
|
||||
|
||||
//pair<gtsam::GaussianConditional*, gtsam::JacobianFactor*> eliminate(const gtsam::Ordering& keys) const;
|
||||
|
||||
void setModel(bool anyConstrained, const Vector& sigmas);
|
||||
|
||||
gtsam::noiseModel::Diagonal* get_model() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
virtual class HessianFactor : gtsam::GaussianFactor {
|
||||
//Constructors
|
||||
HessianFactor();
|
||||
HessianFactor(const gtsam::GaussianFactor& factor);
|
||||
HessianFactor(size_t j, Matrix G, Vector g, double f);
|
||||
HessianFactor(size_t j, Vector mu, Matrix Sigma);
|
||||
HessianFactor(size_t j1, size_t j2, Matrix G11, Matrix G12, Vector g1, Matrix G22,
|
||||
Vector g2, double f);
|
||||
HessianFactor(size_t j1, size_t j2, size_t j3, Matrix G11, Matrix G12, Matrix G13,
|
||||
Vector g1, Matrix G22, Matrix G23, Vector g2, Matrix G33, Vector g3,
|
||||
double f);
|
||||
//HessianFactor(const gtsam::GaussianFactorGraph& factors);
|
||||
//HessianFactor(const gtsam::HessianFactor& other);
|
||||
|
||||
//Testable
|
||||
size_t size() const;
|
||||
void print(string s) const;
|
||||
void printKeys(string s) const;
|
||||
bool equals(const gtsam::GaussianFactor& lf, double tol) const;
|
||||
double error(const gtsam::VectorValues& c) const;
|
||||
|
||||
//Standard Interface
|
||||
size_t rows() const;
|
||||
Matrix information() const;
|
||||
double constantTerm() const;
|
||||
Vector linearTerm() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
class Values {
|
||||
Values();
|
||||
//Values(const gtsam::Values& other);
|
||||
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
void clear();
|
||||
size_t dim() const;
|
||||
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::Values& other, double tol) const;
|
||||
|
||||
void insert(const gtsam::Values& values);
|
||||
void update(const gtsam::Values& values);
|
||||
void erase(size_t j);
|
||||
void swap(gtsam::Values& values);
|
||||
|
||||
bool exists(size_t j) const;
|
||||
gtsam::KeyVector keys() const;
|
||||
|
||||
gtsam::VectorValues zeroVectors() const;
|
||||
|
||||
gtsam::Values retract(const gtsam::VectorValues& delta) const;
|
||||
gtsam::VectorValues localCoordinates(const gtsam::Values& cp) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// New in 4.0, we have to specialize every insert/update/at to generate wrappers
|
||||
// Instead of the old:
|
||||
// void insert(size_t j, const gtsam::Value& value);
|
||||
// void update(size_t j, const gtsam::Value& val);
|
||||
// gtsam::Value at(size_t j) const;
|
||||
|
||||
// template <T = {gtsam::Point2, gtsam::Rot2, gtsam::Pose2, gtsam::Point3,
|
||||
// gtsam::Rot3, gtsam::Pose3}>
|
||||
// void insert(size_t j, const T& t);
|
||||
|
||||
// template <T = {gtsam::Point2, gtsam::Rot2, gtsam::Pose2, gtsam::Point3,
|
||||
// gtsam::Rot3, gtsam::Pose3}>
|
||||
// void update(size_t j, const T& t);
|
||||
void insert(size_t j, const gtsam::Point2& t);
|
||||
void insert(size_t j, const gtsam::Point3& t);
|
||||
void insert(size_t j, const gtsam::Rot2& t);
|
||||
void insert(size_t j, const gtsam::Pose2& t);
|
||||
void insert(size_t j, const gtsam::Rot3& t);
|
||||
void insert(size_t j, const gtsam::Pose3& t);
|
||||
void insert(size_t j, Vector t);
|
||||
void insert(size_t j, Matrix t);
|
||||
|
||||
void update(size_t j, const gtsam::Point2& t);
|
||||
void update(size_t j, const gtsam::Point3& t);
|
||||
void update(size_t j, const gtsam::Rot2& t);
|
||||
void update(size_t j, const gtsam::Pose2& t);
|
||||
void update(size_t j, const gtsam::Rot3& t);
|
||||
void update(size_t j, const gtsam::Pose3& t);
|
||||
void update(size_t j, Vector t);
|
||||
void update(size_t j, Matrix t);
|
||||
|
||||
template <T = {gtsam::Point2, gtsam::Rot2, gtsam::Pose2, gtsam::Point3,
|
||||
gtsam::Rot3, gtsam::Pose3, Vector, Matrix}>
|
||||
T at(size_t j);
|
||||
|
||||
/// version for double
|
||||
void insertDouble(size_t j, double c);
|
||||
double atDouble(size_t j) const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
virtual class NonlinearFactor {
|
||||
// Factor base class
|
||||
size_t size() const;
|
||||
gtsam::KeyVector keys() const;
|
||||
void print(string s) const;
|
||||
void printKeys(string s) const;
|
||||
// NonlinearFactor
|
||||
bool equals(const gtsam::NonlinearFactor& other, double tol) const;
|
||||
|
||||
double error(const gtsam::Values& c) const;
|
||||
size_t dim() const;
|
||||
bool active(const gtsam::Values& c) const;
|
||||
gtsam::GaussianFactor* linearize(const gtsam::Values& c) const;
|
||||
gtsam::NonlinearFactor* clone() const;
|
||||
// gtsam::NonlinearFactor* rekey(const gtsam::KeyVector& newKeys) const; //FIXME: Conversion from KeyVector to std::vector does not happen
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
class NonlinearFactorGraph {
|
||||
NonlinearFactorGraph();
|
||||
//NonlinearFactorGraph(const gtsam::NonlinearFactorGraph& graph);
|
||||
|
||||
// FactorGraph
|
||||
void print(string s) const;
|
||||
bool equals(const gtsam::NonlinearFactorGraph& fg, double tol) const;
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
void remove(size_t i);
|
||||
size_t nrFactors() const;
|
||||
gtsam::NonlinearFactor* at(size_t idx) const;
|
||||
void push_back(const gtsam::NonlinearFactorGraph& factors);
|
||||
void push_back(gtsam::NonlinearFactor* factor);
|
||||
void add(gtsam::NonlinearFactor* factor);
|
||||
bool exists(size_t idx) const;
|
||||
// gtsam::KeySet keys() const;
|
||||
|
||||
// NonlinearFactorGraph
|
||||
double error(const gtsam::Values& values) const;
|
||||
double probPrime(const gtsam::Values& values) const;
|
||||
//gtsam::Ordering orderingCOLAMD() const;
|
||||
// Ordering* orderingCOLAMDConstrained(const gtsam::Values& c, const std::map<gtsam::Key,int>& constraints) const;
|
||||
//gtsam::GaussianFactorGraph* linearize(const gtsam::Values& values) const;
|
||||
gtsam::NonlinearFactorGraph clone() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
virtual class NoiseModelFactor: gtsam::NonlinearFactor {
|
||||
void equals(const gtsam::NoiseModelFactor& other, double tol) const;
|
||||
gtsam::noiseModel::Base* get_noiseModel() const; // deprecated by below
|
||||
gtsam::noiseModel::Base* noiseModel() const;
|
||||
Vector unwhitenedError(const gtsam::Values& x) const;
|
||||
Vector whitenedError(const gtsam::Values& x) const;
|
||||
};
|
||||
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
template<T = {Vector, gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3}>
|
||||
virtual class PriorFactor : gtsam::NoiseModelFactor {
|
||||
PriorFactor(size_t key, const T& prior, const gtsam::noiseModel::Base* noiseModel);
|
||||
//PriorFactor(const This& other);
|
||||
T prior() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
template<T = {Vector, gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3}>
|
||||
virtual class BetweenFactor : gtsam::NoiseModelFactor {
|
||||
BetweenFactor(size_t key1, size_t key2, const T& relativePose, const gtsam::noiseModel::Base* noiseModel);
|
||||
//BetweenFactor(const This& other);
|
||||
T measured() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
size_t symbol(char chr, size_t index);
|
||||
char symbolChr(size_t key);
|
||||
size_t symbolIndex(size_t key);
|
||||
|
||||
#include <gtsam/inference/Key.h>
|
||||
// Default keyformatter
|
||||
void PrintKeyVector(const gtsam::KeyVector& keys);
|
||||
void PrintKeyVector(const gtsam::KeyVector& keys, string s);
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
bool checkConvergence(double relativeErrorTreshold,
|
||||
double absoluteErrorTreshold, double errorThreshold,
|
||||
double currentError, double newError);
|
||||
|
||||
#include <gtsam/slam/dataset.h>
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename,
|
||||
gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise, bool smart);
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename,
|
||||
gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise);
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename,
|
||||
gtsam::noiseModel::Diagonal* model, int maxID);
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename,
|
||||
gtsam::noiseModel::Diagonal* model);
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D(string filename);
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load2D_robust(string filename,
|
||||
gtsam::noiseModel::Base* model);
|
||||
void save2D(const gtsam::NonlinearFactorGraph& graph,
|
||||
const gtsam::Values& config, gtsam::noiseModel::Diagonal* model,
|
||||
string filename);
|
||||
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> readG2o(string filename);
|
||||
void writeG2o(const gtsam::NonlinearFactorGraph& graph,
|
||||
const gtsam::Values& estimate, string filename);
|
||||
|
||||
//*************************************************************************
|
||||
// Utilities
|
||||
//*************************************************************************
|
||||
|
||||
namespace utilities {
|
||||
|
||||
#include <gtsam/nonlinear/utilities.h>
|
||||
// gtsam::KeyList createKeyList(Vector I);
|
||||
// gtsam::KeyList createKeyList(string s, Vector I);
|
||||
gtsam::KeyVector createKeyVector(Vector I);
|
||||
gtsam::KeyVector createKeyVector(string s, Vector I);
|
||||
// gtsam::KeySet createKeySet(Vector I);
|
||||
// gtsam::KeySet createKeySet(string s, Vector I);
|
||||
Matrix extractPoint2(const gtsam::Values& values);
|
||||
Matrix extractPoint3(const gtsam::Values& values);
|
||||
Matrix extractPose2(const gtsam::Values& values);
|
||||
gtsam::Values allPose3s(gtsam::Values& values);
|
||||
Matrix extractPose3(const gtsam::Values& values);
|
||||
void perturbPoint2(gtsam::Values& values, double sigma, int seed);
|
||||
void perturbPose2 (gtsam::Values& values, double sigmaT, double sigmaR, int seed);
|
||||
void perturbPoint3(gtsam::Values& values, double sigma, int seed);
|
||||
// void insertBackprojections(gtsam::Values& values, const gtsam::SimpleCamera& c, Vector J, Matrix Z, double depth);
|
||||
// void insertProjectionFactors(gtsam::NonlinearFactorGraph& graph, size_t i, Vector J, Matrix Z, const gtsam::noiseModel::Base* model, const gtsam::Cal3_S2* K);
|
||||
// void insertProjectionFactors(gtsam::NonlinearFactorGraph& graph, size_t i, Vector J, Matrix Z, const gtsam::noiseModel::Base* model, const gtsam::Cal3_S2* K, const gtsam::Pose3& body_P_sensor);
|
||||
Matrix reprojectionErrors(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& values);
|
||||
gtsam::Values localToWorld(const gtsam::Values& local, const gtsam::Pose2& base);
|
||||
gtsam::Values localToWorld(const gtsam::Values& local, const gtsam::Pose2& base, const gtsam::KeyVector& keys);
|
||||
|
||||
} //\namespace utilities
|
||||
|
||||
#include <gtsam/nonlinear/utilities.h>
|
||||
class RedirectCout {
|
||||
RedirectCout();
|
||||
string str();
|
||||
};
|
||||
|
||||
} //\namespace gtsam
|
|
@ -1,9 +1,22 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Cal3Unified unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
class TestCal3Unified(unittest.TestCase):
|
||||
|
||||
class TestCal3Unified(GtsamTestCase):
|
||||
|
||||
def test_Cal3Unified(self):
|
||||
K = gtsam.Cal3Unified()
|
||||
|
@ -11,12 +24,15 @@ class TestCal3Unified(unittest.TestCase):
|
|||
self.assertEqual(K.fx(), 1.)
|
||||
|
||||
def test_retract(self):
|
||||
expected = gtsam.Cal3Unified(100 + 2, 105 + 3, 0.0 + 4, 320 + 5, 240 + 6, 1e-3 + 7, 2.0*1e-3 + 8, 3.0*1e-3 + 9, 4.0*1e-3 + 10, 0.1 + 1)
|
||||
K = gtsam.Cal3Unified(100, 105, 0.0, 320, 240, 1e-3, 2.0*1e-3, 3.0*1e-3, 4.0*1e-3, 0.1)
|
||||
expected = gtsam.Cal3Unified(100 + 2, 105 + 3, 0.0 + 4, 320 + 5, 240 + 6,
|
||||
1e-3 + 7, 2.0*1e-3 + 8, 3.0*1e-3 + 9, 4.0*1e-3 + 10, 0.1 + 1)
|
||||
K = gtsam.Cal3Unified(100, 105, 0.0, 320, 240,
|
||||
1e-3, 2.0*1e-3, 3.0*1e-3, 4.0*1e-3, 0.1)
|
||||
d = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10, 1], order='F')
|
||||
actual = K.retract(d)
|
||||
self.assertTrue(actual.equals(expected, 1e-9))
|
||||
self.gtsamAssertEquals(actual, expected)
|
||||
np.testing.assert_allclose(d, K.localCoordinates(actual))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,8 +1,22 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
JacobianFactor unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
|
||||
import numpy as np
|
||||
|
||||
class TestJacobianFactor(unittest.TestCase):
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestJacobianFactor(GtsamTestCase):
|
||||
|
||||
def test_eliminate(self):
|
||||
# Recommended way to specify a matrix (see cython/README)
|
||||
|
@ -54,7 +68,7 @@ class TestJacobianFactor(unittest.TestCase):
|
|||
expectedCG = gtsam.GaussianConditional(
|
||||
x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel_Unit.Create(2))
|
||||
# check if the result matches
|
||||
self.assertTrue(actualCG.equals(expectedCG, 1e-4))
|
||||
self.gtsamAssertEquals(actualCG, expectedCG, 1e-4)
|
||||
|
||||
# the expected linear factor
|
||||
Bl1 = np.array([[4.47214, 0.00],
|
||||
|
@ -72,7 +86,7 @@ class TestJacobianFactor(unittest.TestCase):
|
|||
expectedLF = gtsam.JacobianFactor(l1, Bl1, x1, Bx1, b1, model2)
|
||||
|
||||
# check if the result matches the combined (reduced) factor
|
||||
self.assertTrue(lf.equals(expectedLF, 1e-4))
|
||||
self.gtsamAssertEquals(lf, expectedLF, 1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,8 +1,22 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
KalmanFilter unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
|
||||
import numpy as np
|
||||
|
||||
class TestKalmanFilter(unittest.TestCase):
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestKalmanFilter(GtsamTestCase):
|
||||
|
||||
def test_KalmanFilter(self):
|
||||
F = np.eye(2)
|
||||
|
|
|
@ -1,8 +1,22 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Localization unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
|
||||
import numpy as np
|
||||
|
||||
class TestLocalizationExample(unittest.TestCase):
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestLocalizationExample(GtsamTestCase):
|
||||
|
||||
def test_LocalizationExample(self):
|
||||
# Create the graph (defined in pose2SLAM.h, derived from
|
||||
|
@ -43,7 +57,7 @@ class TestLocalizationExample(unittest.TestCase):
|
|||
P = [None] * result.size()
|
||||
for i in range(0, result.size()):
|
||||
pose_i = result.atPose2(i)
|
||||
self.assertTrue(pose_i.equals(groundTruth.atPose2(i), 1e-4))
|
||||
self.gtsamAssertEquals(pose_i, groundTruth.atPose2(i), 1e-4)
|
||||
P[i] = marginals.marginalCovariance(i)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Unit tests for IMU testing scenarios.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, no-name-in-module
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer,
|
||||
GaussNewtonParams, LevenbergMarquardtOptimizer,
|
||||
LevenbergMarquardtParams, NonlinearFactorGraph, Ordering,
|
||||
Point2, PriorFactorPoint2, Values)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
KEY1 = 1
|
||||
KEY2 = 2
|
||||
|
||||
|
||||
class TestScenario(GtsamTestCase):
|
||||
def test_optimize(self):
|
||||
"""Do trivial test with three optimizer variants."""
|
||||
fg = NonlinearFactorGraph()
|
||||
model = gtsam.noiseModel_Unit.Create(2)
|
||||
fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model))
|
||||
|
||||
# test error at minimum
|
||||
xstar = Point2(0, 0)
|
||||
optimal_values = Values()
|
||||
optimal_values.insert(KEY1, xstar)
|
||||
self.assertEqual(0.0, fg.error(optimal_values), 0.0)
|
||||
|
||||
# test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
|
||||
x0 = Point2(3, 3)
|
||||
initial_values = Values()
|
||||
initial_values.insert(KEY1, x0)
|
||||
self.assertEqual(9.0, fg.error(initial_values), 1e-3)
|
||||
|
||||
# optimize parameters
|
||||
ordering = Ordering()
|
||||
ordering.push_back(KEY1)
|
||||
|
||||
# Gauss-Newton
|
||||
gnParams = GaussNewtonParams()
|
||||
gnParams.setOrdering(ordering)
|
||||
actual1 = GaussNewtonOptimizer(fg, initial_values, gnParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual1))
|
||||
|
||||
# Levenberg-Marquardt
|
||||
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||
lmParams.setOrdering(ordering)
|
||||
actual2 = LevenbergMarquardtOptimizer(
|
||||
fg, initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual2))
|
||||
|
||||
# Dogleg
|
||||
dlParams = DoglegParams()
|
||||
dlParams.setOrdering(ordering)
|
||||
actual3 = DoglegOptimizer(fg, initial_values, dlParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual3))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -1,8 +1,22 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Odometry unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
|
||||
import numpy as np
|
||||
|
||||
class TestOdometryExample(unittest.TestCase):
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestOdometryExample(GtsamTestCase):
|
||||
|
||||
def test_OdometryExample(self):
|
||||
# Create the graph (defined in pose2SLAM.h, derived from
|
||||
|
@ -39,7 +53,7 @@ class TestOdometryExample(unittest.TestCase):
|
|||
|
||||
# Check first pose equality
|
||||
pose_1 = result.atPose2(1)
|
||||
self.assertTrue(pose_1.equals(gtsam.Pose2(), 1e-4))
|
||||
self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,11 +1,25 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
PlanarSLAM unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
from math import pi
|
||||
|
||||
import numpy as np
|
||||
|
||||
class TestPose2SLAMExample(unittest.TestCase):
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
def test_Pose2SLAMExample(self):
|
||||
|
||||
class TestPlanarSLAM(GtsamTestCase):
|
||||
|
||||
def test_PlanarSLAM(self):
|
||||
# Assumptions
|
||||
# - All values are axis aligned
|
||||
# - Robot poses are facing along the X axis (horizontal, to the right in images)
|
||||
|
@ -56,7 +70,7 @@ class TestPose2SLAMExample(unittest.TestCase):
|
|||
P = marginals.marginalCovariance(1)
|
||||
|
||||
pose_1 = result.atPose2(1)
|
||||
self.assertTrue(pose_1.equals(gtsam.Pose2(), 1e-4))
|
||||
self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4)
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose2 unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import Pose2
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPose2(GtsamTestCase):
|
||||
"""Test selected Pose2 methods."""
|
||||
|
||||
def test_adjoint(self):
|
||||
"""Test adjoint method."""
|
||||
xi = np.array([1, 2, 3])
|
||||
expected = np.dot(Pose2.adjointMap_(xi), xi)
|
||||
actual = Pose2.adjoint_(xi, xi)
|
||||
np.testing.assert_array_equal(actual, expected)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -1,9 +1,23 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose2SLAM unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
from math import pi
|
||||
|
||||
import numpy as np
|
||||
|
||||
class TestPose2SLAMExample(unittest.TestCase):
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPose2SLAMExample(GtsamTestCase):
|
||||
|
||||
def test_Pose2SLAMExample(self):
|
||||
# Assumptions
|
||||
|
@ -56,7 +70,7 @@ class TestPose2SLAMExample(unittest.TestCase):
|
|||
P = marginals.marginalCovariance(1)
|
||||
|
||||
pose_1 = result.atPose2(1)
|
||||
self.assertTrue(pose_1.equals(gtsam.Pose2(), 1e-4))
|
||||
self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,13 +1,24 @@
|
|||
"""Pose3 unit tests."""
|
||||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose3 unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import math
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import Point3, Pose3, Rot3
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPose3(unittest.TestCase):
|
||||
class TestPose3(GtsamTestCase):
|
||||
"""Test selected Pose3 methods."""
|
||||
|
||||
def test_between(self):
|
||||
|
@ -16,14 +27,14 @@ class TestPose3(unittest.TestCase):
|
|||
T3 = Pose3(Rot3.Rodrigues(-90, 0, 0), Point3(1, 2, 3))
|
||||
expected = T2.inverse().compose(T3)
|
||||
actual = T2.between(T3)
|
||||
self.assertTrue(actual.equals(expected, 1e-6))
|
||||
self.gtsamAssertEquals(actual, expected, 1e-6)
|
||||
|
||||
def test_transform_to(self):
|
||||
"""Test transform_to method."""
|
||||
transform = Pose3(Rot3.Rodrigues(0, 0, -1.570796), Point3(2, 4, 0))
|
||||
actual = transform.transform_to(Point3(3, 2, 10))
|
||||
expected = Point3(2, 1, 10)
|
||||
self.assertTrue(actual.equals(expected, 1e-6))
|
||||
self.gtsamAssertEquals(actual, expected, 1e-6)
|
||||
|
||||
def test_range(self):
|
||||
"""Test range method."""
|
||||
|
@ -49,8 +60,8 @@ class TestPose3(unittest.TestCase):
|
|||
def test_adjoint(self):
|
||||
"""Test adjoint method."""
|
||||
xi = np.array([1, 2, 3, 4, 5, 6])
|
||||
expected = np.dot(Pose3.adjointMap(xi), xi)
|
||||
actual = Pose3.adjoint(xi, xi)
|
||||
expected = np.dot(Pose3.adjointMap_(xi), xi)
|
||||
actual = Pose3.adjoint_(xi, xi)
|
||||
np.testing.assert_array_equal(actual, expected)
|
||||
|
||||
|
||||
|
|
|
@ -1,10 +1,24 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
import gtsam
|
||||
from math import pi
|
||||
from gtsam.utils.circlePose3 import *
|
||||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
class TestPose3SLAMExample(unittest.TestCase):
|
||||
See LICENSE for the license information
|
||||
|
||||
PoseSLAM unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
from math import pi
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
from gtsam.utils.circlePose3 import *
|
||||
|
||||
|
||||
class TestPose3SLAMExample(GtsamTestCase):
|
||||
|
||||
def test_Pose3SLAMExample(self):
|
||||
# Create a hexagon of poses
|
||||
|
@ -40,7 +54,7 @@ class TestPose3SLAMExample(unittest.TestCase):
|
|||
result = optimizer.optimizeSafely()
|
||||
|
||||
pose_1 = result.atPose3(1)
|
||||
self.assertTrue(pose_1.equals(p1, 1e-4))
|
||||
self.gtsamAssertEquals(pose_1, p1, 1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,8 +1,22 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
PriorFactor unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
|
||||
import numpy as np
|
||||
|
||||
class TestPriorFactor(unittest.TestCase):
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPriorFactor(GtsamTestCase):
|
||||
|
||||
def test_PriorFactor(self):
|
||||
values = gtsam.Values()
|
||||
|
|
|
@ -1,11 +1,24 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
SFM unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
from gtsam import symbol
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.visual_data_generator as generator
|
||||
from gtsam import symbol
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestSFMExample(unittest.TestCase):
|
||||
class TestSFMExample(GtsamTestCase):
|
||||
|
||||
def test_SFMExample(self):
|
||||
options = generator.Options()
|
||||
|
@ -59,11 +72,11 @@ class TestSFMExample(unittest.TestCase):
|
|||
# Check optimized results, should be equal to ground truth
|
||||
for i in range(len(truth.cameras)):
|
||||
pose_i = result.atPose3(symbol(ord('x'), i))
|
||||
self.assertTrue(pose_i.equals(truth.cameras[i].pose(), 1e-5))
|
||||
self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5)
|
||||
|
||||
for j in range(len(truth.points)):
|
||||
point_j = result.atPoint3(symbol(ord('p'), j))
|
||||
self.assertTrue(point_j.equals(truth.points[j], 1e-5))
|
||||
self.gtsamAssertEquals(point_j, truth.points[j], 1e-5)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,11 +1,27 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Scenario unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import math
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
|
||||
class TestScenario(unittest.TestCase):
|
||||
class TestScenario(GtsamTestCase):
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
|
@ -29,7 +45,8 @@ class TestScenario(unittest.TestCase):
|
|||
T30 = scenario.pose(T)
|
||||
np.testing.assert_almost_equal(
|
||||
np.array([math.pi, 0, math.pi]), T30.rotation().xyz())
|
||||
self.assert_(gtsam.Point3(0, 0, 2 * R).equals(T30.translation(), 1e-9))
|
||||
self.gtsamAssertEquals(gtsam.Point3(
|
||||
0, 0, 2.0 * R), T30.translation(), 1e-9)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -1,18 +1,31 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
SimpleCamera unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import math
|
||||
import numpy as np
|
||||
import unittest
|
||||
|
||||
from gtsam import Pose2, Point3, Rot3, Pose3, Cal3_S2, SimpleCamera
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, SimpleCamera
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
K = Cal3_S2(625, 625, 0, 0, 0)
|
||||
|
||||
class TestSimpleCamera(unittest.TestCase):
|
||||
class TestSimpleCamera(GtsamTestCase):
|
||||
|
||||
def test_constructor(self):
|
||||
pose1 = Pose3(Rot3(np.diag([1, -1, -1])), Point3(0, 0, 0.5))
|
||||
camera = SimpleCamera(pose1, K)
|
||||
self.assertTrue(camera.calibration().equals(K, 1e-9))
|
||||
self.assertTrue(camera.pose().equals(pose1, 1e-9))
|
||||
self.gtsamAssertEquals(camera.calibration(), K, 1e-9)
|
||||
self.gtsamAssertEquals(camera.pose(), pose1, 1e-9)
|
||||
|
||||
def test_level2(self):
|
||||
# Create a level camera, looking in Y-direction
|
||||
|
@ -25,7 +38,7 @@ class TestSimpleCamera(unittest.TestCase):
|
|||
z = Point3(0,1,0)
|
||||
wRc = Rot3(x,y,z)
|
||||
expected = Pose3(wRc,Point3(0.4,0.3,0.1))
|
||||
self.assertTrue(camera.pose().equals(expected, 1e-9))
|
||||
self.gtsamAssertEquals(camera.pose(), expected, 1e-9)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,10 +1,23 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Stereo VO unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
import gtsam
|
||||
from gtsam import symbol
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import symbol
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
class TestStereoVOExample(unittest.TestCase):
|
||||
|
||||
class TestStereoVOExample(GtsamTestCase):
|
||||
|
||||
def test_StereoVOExample(self):
|
||||
## Assumptions
|
||||
|
@ -60,10 +73,10 @@ class TestStereoVOExample(unittest.TestCase):
|
|||
|
||||
## check equality for the first pose and point
|
||||
pose_x1 = result.atPose3(x1)
|
||||
self.assertTrue(pose_x1.equals(first_pose,1e-4))
|
||||
self.gtsamAssertEquals(pose_x1, first_pose,1e-4)
|
||||
|
||||
point_l1 = result.atPoint3(l1)
|
||||
self.assertTrue(point_l1.equals(expected_l1,1e-4))
|
||||
self.gtsamAssertEquals(point_l1, expected_l1,1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,19 +1,34 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Values unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101, E0611
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from gtsam import Point2, Point3, Unit3, Rot2, Pose2, Rot3, Pose3
|
||||
from gtsam import Values, Cal3_S2, Cal3DS2, Cal3Bundler, EssentialMatrix, imuBias_ConstantBias
|
||||
import gtsam
|
||||
from gtsam import (Cal3_S2, Cal3Bundler, Cal3DS2, EssentialMatrix, Point2,
|
||||
Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values,
|
||||
imuBias_ConstantBias)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestValues(unittest.TestCase):
|
||||
class TestValues(GtsamTestCase):
|
||||
|
||||
def test_values(self):
|
||||
values = Values()
|
||||
E = EssentialMatrix(Rot3(), Unit3())
|
||||
tol = 1e-9
|
||||
|
||||
values.insert(0, Point2(0,0))
|
||||
values.insert(1, Point3(0,0,0))
|
||||
values.insert(0, Point2(0, 0))
|
||||
values.insert(1, Point3(0, 0, 0))
|
||||
values.insert(2, Rot2())
|
||||
values.insert(3, Pose2())
|
||||
values.insert(4, Rot3())
|
||||
|
@ -34,36 +49,38 @@ class TestValues(unittest.TestCase):
|
|||
# The wrapper will automatically fix the type and storage order for you,
|
||||
# but for performance reasons, it's recommended to specify the correct
|
||||
# type and storage order.
|
||||
vec = np.array([1., 2., 3.]) # for vectors, the order is not important, but dtype still is
|
||||
# for vectors, the order is not important, but dtype still is
|
||||
vec = np.array([1., 2., 3.])
|
||||
values.insert(11, vec)
|
||||
mat = np.array([[1., 2.], [3., 4.]], order='F')
|
||||
values.insert(12, mat)
|
||||
# Test with dtype int and the default order='C'
|
||||
# This still works as the wrapper converts to the correct type and order for you
|
||||
# but is nornally not recommended!
|
||||
mat2 = np.array([[1,2,],[3,5]])
|
||||
mat2 = np.array([[1, 2, ], [3, 5]])
|
||||
values.insert(13, mat2)
|
||||
|
||||
self.assertTrue(values.atPoint2(0).equals(Point2(), tol))
|
||||
self.assertTrue(values.atPoint3(1).equals(Point3(), tol))
|
||||
self.assertTrue(values.atRot2(2).equals(Rot2(), tol))
|
||||
self.assertTrue(values.atPose2(3).equals(Pose2(), tol))
|
||||
self.assertTrue(values.atRot3(4).equals(Rot3(), tol))
|
||||
self.assertTrue(values.atPose3(5).equals(Pose3(), tol))
|
||||
self.assertTrue(values.atCal3_S2(6).equals(Cal3_S2(), tol))
|
||||
self.assertTrue(values.atCal3DS2(7).equals(Cal3DS2(), tol))
|
||||
self.assertTrue(values.atCal3Bundler(8).equals(Cal3Bundler(), tol))
|
||||
self.assertTrue(values.atEssentialMatrix(9).equals(E, tol))
|
||||
self.assertTrue(values.atimuBias_ConstantBias(
|
||||
10).equals(imuBias_ConstantBias(), tol))
|
||||
self.gtsamAssertEquals(values.atPoint2(0), Point2(0,0), tol)
|
||||
self.gtsamAssertEquals(values.atPoint3(1), Point3(0,0,0), tol)
|
||||
self.gtsamAssertEquals(values.atRot2(2), Rot2(), tol)
|
||||
self.gtsamAssertEquals(values.atPose2(3), Pose2(), tol)
|
||||
self.gtsamAssertEquals(values.atRot3(4), Rot3(), tol)
|
||||
self.gtsamAssertEquals(values.atPose3(5), Pose3(), tol)
|
||||
self.gtsamAssertEquals(values.atCal3_S2(6), Cal3_S2(), tol)
|
||||
self.gtsamAssertEquals(values.atCal3DS2(7), Cal3DS2(), tol)
|
||||
self.gtsamAssertEquals(values.atCal3Bundler(8), Cal3Bundler(), tol)
|
||||
self.gtsamAssertEquals(values.atEssentialMatrix(9), E, tol)
|
||||
self.gtsamAssertEquals(values.atimuBias_ConstantBias(
|
||||
10), imuBias_ConstantBias(), tol)
|
||||
|
||||
# special cases for Vector and Matrix:
|
||||
actualVector = values.atVector(11)
|
||||
self.assertTrue(np.allclose(vec, actualVector, tol))
|
||||
np.testing.assert_allclose(vec, actualVector, tol)
|
||||
actualMatrix = values.atMatrix(12)
|
||||
self.assertTrue(np.allclose(mat, actualMatrix, tol))
|
||||
np.testing.assert_allclose(mat, actualMatrix, tol)
|
||||
actualMatrix2 = values.atMatrix(13)
|
||||
self.assertTrue(np.allclose(mat2, actualMatrix2, tol))
|
||||
np.testing.assert_allclose(mat2, actualMatrix2, tol)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,10 +1,25 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
visual_isam unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
from gtsam import symbol
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.visual_data_generator as generator
|
||||
import gtsam.utils.visual_isam as visual_isam
|
||||
from gtsam import symbol
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
class TestVisualISAMExample(unittest.TestCase):
|
||||
|
||||
class TestVisualISAMExample(GtsamTestCase):
|
||||
|
||||
def test_VisualISAMExample(self):
|
||||
# Data Options
|
||||
|
@ -32,11 +47,11 @@ class TestVisualISAMExample(unittest.TestCase):
|
|||
|
||||
for i in range(len(truth.cameras)):
|
||||
pose_i = result.atPose3(symbol(ord('x'), i))
|
||||
self.assertTrue(pose_i.equals(truth.cameras[i].pose(), 1e-5))
|
||||
self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5)
|
||||
|
||||
for j in range(len(truth.points)):
|
||||
point_j = result.atPoint3(symbol(ord('l'), j))
|
||||
self.assertTrue(point_j.equals(truth.points[j], 1e-5))
|
||||
self.gtsamAssertEquals(point_j, truth.points[j], 1e-5)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Unit tests for testing dataset access.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, no-name-in-module, no-member
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import BetweenFactorPose3, BetweenFactorPose3s
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestDataset(GtsamTestCase):
|
||||
"""Tests for datasets.h wrapper."""
|
||||
|
||||
def setUp(self):
|
||||
"""Get some common paths."""
|
||||
self.pose3_example_g2o_file = gtsam.findExampleDataFile(
|
||||
"pose3example.txt")
|
||||
|
||||
def test_readG2o3D(self):
|
||||
"""Test reading directly into factor graph."""
|
||||
is3D = True
|
||||
graph, initial = gtsam.readG2o(self.pose3_example_g2o_file, is3D)
|
||||
self.assertEqual(graph.size(), 6)
|
||||
self.assertEqual(initial.size(), 5)
|
||||
|
||||
def test_parse3Dfactors(self):
|
||||
"""Test parsing into data structure."""
|
||||
factors = gtsam.parse3DFactors(self.pose3_example_g2o_file)
|
||||
self.assertEqual(factors.size(), 6)
|
||||
self.assertIsInstance(factors.at(0), BetweenFactorPose3)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -0,0 +1,38 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Unit tests for Disjoint Set Forest.
|
||||
Author: Frank Dellaert & Varun Agrawal
|
||||
"""
|
||||
# pylint: disable=invalid-name, no-name-in-module, no-member
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestDSFMap(GtsamTestCase):
|
||||
"""Tests for DSFMap."""
|
||||
|
||||
def test_all(self):
|
||||
"""Test everything in DFSMap."""
|
||||
def key(index_pair):
|
||||
return index_pair.i(), index_pair.j()
|
||||
|
||||
dsf = gtsam.DSFMapIndexPair()
|
||||
pair1 = gtsam.IndexPair(1, 18)
|
||||
self.assertEqual(key(dsf.find(pair1)), key(pair1))
|
||||
pair2 = gtsam.IndexPair(2, 2)
|
||||
dsf.merge(pair1, pair2)
|
||||
self.assertTrue(dsf.find(pair1), dsf.find(pair1))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -0,0 +1,89 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Unit tests for 3D SLAM initialization, using rotation relaxation.
|
||||
Author: Luca Carlone and Frank Dellaert (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101, E0611
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import NonlinearFactorGraph, Point3, Pose3, Rot3, Values
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
x0, x1, x2, x3 = 0, 1, 2, 3
|
||||
|
||||
|
||||
class TestValues(GtsamTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
model = gtsam.noiseModel_Isotropic.Sigma(6, 0.1)
|
||||
|
||||
# We consider a small graph:
|
||||
# symbolic FG
|
||||
# x2 0 1
|
||||
# / | \ 1 2
|
||||
# / | \ 2 3
|
||||
# x3 | x1 2 0
|
||||
# \ | / 0 3
|
||||
# \ | /
|
||||
# x0
|
||||
#
|
||||
p0 = Point3(0, 0, 0)
|
||||
self.R0 = Rot3.Expmap(np.array([0.0, 0.0, 0.0]))
|
||||
p1 = Point3(1, 2, 0)
|
||||
self.R1 = Rot3.Expmap(np.array([0.0, 0.0, 1.570796]))
|
||||
p2 = Point3(0, 2, 0)
|
||||
self.R2 = Rot3.Expmap(np.array([0.0, 0.0, 3.141593]))
|
||||
p3 = Point3(-1, 1, 0)
|
||||
self.R3 = Rot3.Expmap(np.array([0.0, 0.0, 4.712389]))
|
||||
|
||||
pose0 = Pose3(self.R0, p0)
|
||||
pose1 = Pose3(self.R1, p1)
|
||||
pose2 = Pose3(self.R2, p2)
|
||||
pose3 = Pose3(self.R3, p3)
|
||||
|
||||
g = NonlinearFactorGraph()
|
||||
g.add(gtsam.BetweenFactorPose3(x0, x1, pose0.between(pose1), model))
|
||||
g.add(gtsam.BetweenFactorPose3(x1, x2, pose1.between(pose2), model))
|
||||
g.add(gtsam.BetweenFactorPose3(x2, x3, pose2.between(pose3), model))
|
||||
g.add(gtsam.BetweenFactorPose3(x2, x0, pose2.between(pose0), model))
|
||||
g.add(gtsam.BetweenFactorPose3(x0, x3, pose0.between(pose3), model))
|
||||
g.add(gtsam.PriorFactorPose3(x0, pose0, model))
|
||||
self.graph = g
|
||||
|
||||
def test_buildPose3graph(self):
|
||||
pose3graph = gtsam.InitializePose3.buildPose3graph(self.graph)
|
||||
|
||||
def test_orientations(self):
|
||||
pose3Graph = gtsam.InitializePose3.buildPose3graph(self.graph)
|
||||
|
||||
initial = gtsam.InitializePose3.computeOrientationsChordal(pose3Graph)
|
||||
|
||||
# comparison is up to M_PI, that's why we add some multiples of 2*M_PI
|
||||
self.gtsamAssertEquals(initial.atRot3(x0), self.R0, 1e-6)
|
||||
self.gtsamAssertEquals(initial.atRot3(x1), self.R1, 1e-6)
|
||||
self.gtsamAssertEquals(initial.atRot3(x2), self.R2, 1e-6)
|
||||
self.gtsamAssertEquals(initial.atRot3(x3), self.R3, 1e-6)
|
||||
|
||||
def test_initializePoses(self):
|
||||
g2oFile = gtsam.findExampleDataFile("pose3example-grid")
|
||||
is3D = True
|
||||
inputGraph, expectedValues = gtsam.readG2o(g2oFile, is3D)
|
||||
priorModel = gtsam.noiseModel_Unit.Create(6)
|
||||
inputGraph.add(gtsam.PriorFactorPose3(0, Pose3(), priorModel))
|
||||
|
||||
initial = gtsam.InitializePose3.initialize(inputGraph)
|
||||
# TODO(frank): very loose !!
|
||||
self.gtsamAssertEquals(initial, expectedValues, 0.1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -2,9 +2,10 @@
|
|||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib import patches
|
||||
|
||||
|
||||
def plot_pose2_on_axes(axes, pose, axis_length=0.1):
|
||||
def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None):
|
||||
"""Plot a 2D pose on given axis 'axes' with given 'axis_length'."""
|
||||
# get rotation and translation (center)
|
||||
gRp = pose.rotation().matrix() # rotation from pose to global
|
||||
|
@ -20,13 +21,26 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1):
|
|||
line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0)
|
||||
axes.plot(line[:, 0], line[:, 1], 'g-')
|
||||
|
||||
if covariance is not None:
|
||||
pPp = covariance[0:2, 0:2]
|
||||
gPp = np.matmul(np.matmul(gRp, pPp), gRp.T)
|
||||
|
||||
def plot_pose2(fignum, pose, axis_length=0.1):
|
||||
w, v = np.linalg.eig(gPp)
|
||||
|
||||
# k = 2.296
|
||||
k = 5.0
|
||||
|
||||
angle = np.arctan2(v[1, 0], v[0, 0])
|
||||
e1 = patches.Ellipse(origin, np.sqrt(w[0]*k), np.sqrt(w[1]*k),
|
||||
np.rad2deg(angle), fill=False)
|
||||
axes.add_patch(e1)
|
||||
|
||||
def plot_pose2(fignum, pose, axis_length=0.1, covariance=None):
|
||||
"""Plot a 2D pose on given figure with given 'axis_length'."""
|
||||
# get figure object
|
||||
fig = plt.figure(fignum)
|
||||
axes = fig.gca()
|
||||
plot_pose2_on_axes(axes, pose, axis_length)
|
||||
plot_pose2_on_axes(axes, pose, axis_length, covariance)
|
||||
|
||||
|
||||
def plot_point3_on_axes(axes, point, linespec):
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
TestCase class with GTSAM assert utils.
|
||||
Author: Frank Dellaert
|
||||
"""
|
||||
import unittest
|
||||
|
||||
|
||||
class GtsamTestCase(unittest.TestCase):
|
||||
"""TestCase class with GTSAM assert utils."""
|
||||
|
||||
def gtsamAssertEquals(self, actual, expected, tol=1e-9):
|
||||
""" AssertEqual function that prints out actual and expected if not equal.
|
||||
Usage:
|
||||
self.gtsamAssertEqual(actual,expected)
|
||||
Keyword Arguments:
|
||||
tol {float} -- tolerance passed to 'equals', default 1e-9
|
||||
"""
|
||||
equal = actual.equals(expected, tol)
|
||||
if not equal:
|
||||
raise self.failureException(
|
||||
"Values are not equal:\n{}!={}".format(actual, expected))
|
|
@ -22,6 +22,17 @@ cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conv
|
|||
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
||||
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
|
||||
${OUTPUT_DIR} "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
||||
|
||||
# Include Eigen headers:
|
||||
target_include_directories(cythonize_eigency_conversions PUBLIC
|
||||
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
|
||||
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
|
||||
)
|
||||
target_include_directories(cythonize_eigency_core PUBLIC
|
||||
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
|
||||
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
|
||||
)
|
||||
|
||||
add_dependencies(cythonize_eigency_core cythonize_eigency_conversions)
|
||||
add_custom_target(cythonize_eigency)
|
||||
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import numpy as np
|
||||
|
||||
__eigen_dir__ = "${GTSAM_EIGEN_INCLUDE_PREFIX}"
|
||||
__eigen_dir__ = "${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}"
|
||||
|
||||
def get_includes(include_eigen=True):
|
||||
root = os.path.dirname(__file__)
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
from .gtsam_unstable import *
|
|
@ -0,0 +1,102 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Demonstration of the fixed-lag smoothers using a planar robot example
|
||||
and multiple odometry-like sensors
|
||||
Author: Frank Dellaert (C++), Jeremy Aguilon (Python)
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam_unstable
|
||||
|
||||
|
||||
def _timestamp_key_value(key, value):
|
||||
"""
|
||||
|
||||
"""
|
||||
return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue(
|
||||
key, value
|
||||
)
|
||||
|
||||
|
||||
def BatchFixedLagSmootherExample():
|
||||
"""
|
||||
Runs a batch fixed smoother on an agent with two odometry
|
||||
sensors that is simply moving to the
|
||||
"""
|
||||
|
||||
# Define a batch fixed lag smoother, which uses
|
||||
# Levenberg-Marquardt to perform the nonlinear optimization
|
||||
lag = 2.0
|
||||
smoother_batch = gtsam_unstable.BatchFixedLagSmoother(lag)
|
||||
|
||||
# Create containers to store the factors and linearization points
|
||||
# that will be sent to the smoothers
|
||||
new_factors = gtsam.NonlinearFactorGraph()
|
||||
new_values = gtsam.Values()
|
||||
new_timestamps = gtsam_unstable.FixedLagSmootherKeyTimestampMap()
|
||||
|
||||
# Create a prior on the first pose, placing it at the origin
|
||||
prior_mean = gtsam.Pose2(0, 0, 0)
|
||||
prior_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
X1 = 0
|
||||
new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise))
|
||||
new_values.insert(X1, prior_mean)
|
||||
new_timestamps.insert(_timestamp_key_value(X1, 0.0))
|
||||
|
||||
delta_time = 0.25
|
||||
time = 0.25
|
||||
|
||||
while time <= 3.0:
|
||||
previous_key = 1000 * (time - delta_time)
|
||||
current_key = 1000 * time
|
||||
|
||||
# assign current key to the current timestamp
|
||||
new_timestamps.insert(_timestamp_key_value(current_key, time))
|
||||
|
||||
# Add a guess for this pose to the new values
|
||||
# Assume that the robot moves at 2 m/s. Position is time[s] * 2[m/s]
|
||||
current_pose = gtsam.Pose2(time * 2, 0, 0)
|
||||
new_values.insert(current_key, current_pose)
|
||||
|
||||
# Add odometry factors from two different sources with different error
|
||||
# stats
|
||||
odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02)
|
||||
odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.1, 0.1, 0.05]))
|
||||
new_factors.push_back(gtsam.BetweenFactorPose2(
|
||||
previous_key, current_key, odometry_measurement_1, odometry_noise_1
|
||||
))
|
||||
|
||||
odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01)
|
||||
odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.05, 0.05, 0.05]))
|
||||
new_factors.push_back(gtsam.BetweenFactorPose2(
|
||||
previous_key, current_key, odometry_measurement_2, odometry_noise_2
|
||||
))
|
||||
|
||||
# Update the smoothers with the new factors. In this case,
|
||||
# one iteration must pass for Levenberg-Marquardt to accurately
|
||||
# estimate
|
||||
if time >= 0.50:
|
||||
smoother_batch.update(new_factors, new_values, new_timestamps)
|
||||
print("Timestamp = " + str(time) + ", Key = " + str(current_key))
|
||||
print(smoother_batch.calculateEstimatePose2(current_key))
|
||||
|
||||
new_timestamps.clear()
|
||||
new_values.clear()
|
||||
new_factors.resize(0)
|
||||
|
||||
time += delta_time
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
BatchFixedLagSmootherExample()
|
||||
print("Example complete")
|
|
@ -0,0 +1,135 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Cal3Unified unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam_unstable
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
def _timestamp_key_value(key, value):
|
||||
return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue(
|
||||
key, value
|
||||
)
|
||||
|
||||
|
||||
class TestFixedLagSmootherExample(GtsamTestCase):
|
||||
'''
|
||||
Tests the fixed lag smoother wrapper
|
||||
'''
|
||||
|
||||
def test_FixedLagSmootherExample(self):
|
||||
'''
|
||||
Simple test that checks for equality between C++ example
|
||||
file and the Python implementation. See
|
||||
gtsam_unstable/examples/FixedLagSmootherExample.cpp
|
||||
'''
|
||||
# Define a batch fixed lag smoother, which uses
|
||||
# Levenberg-Marquardt to perform the nonlinear optimization
|
||||
lag = 2.0
|
||||
smoother_batch = gtsam_unstable.BatchFixedLagSmoother(lag)
|
||||
|
||||
# Create containers to store the factors and linearization points
|
||||
# that will be sent to the smoothers
|
||||
new_factors = gtsam.NonlinearFactorGraph()
|
||||
new_values = gtsam.Values()
|
||||
new_timestamps = gtsam_unstable.FixedLagSmootherKeyTimestampMap()
|
||||
|
||||
# Create a prior on the first pose, placing it at the origin
|
||||
prior_mean = gtsam.Pose2(0, 0, 0)
|
||||
prior_noise = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.3, 0.3, 0.1]))
|
||||
X1 = 0
|
||||
new_factors.push_back(
|
||||
gtsam.PriorFactorPose2(
|
||||
X1, prior_mean, prior_noise))
|
||||
new_values.insert(X1, prior_mean)
|
||||
new_timestamps.insert(_timestamp_key_value(X1, 0.0))
|
||||
|
||||
delta_time = 0.25
|
||||
time = 0.25
|
||||
|
||||
i = 0
|
||||
|
||||
ground_truth = [
|
||||
gtsam.Pose2(0.995821, 0.0231012, 0.0300001),
|
||||
gtsam.Pose2(1.49284, 0.0457247, 0.045),
|
||||
gtsam.Pose2(1.98981, 0.0758879, 0.06),
|
||||
gtsam.Pose2(2.48627, 0.113502, 0.075),
|
||||
gtsam.Pose2(2.98211, 0.158558, 0.09),
|
||||
gtsam.Pose2(3.47722, 0.211047, 0.105),
|
||||
gtsam.Pose2(3.97149, 0.270956, 0.12),
|
||||
gtsam.Pose2(4.4648, 0.338272, 0.135),
|
||||
gtsam.Pose2(4.95705, 0.41298, 0.15),
|
||||
gtsam.Pose2(5.44812, 0.495063, 0.165),
|
||||
gtsam.Pose2(5.9379, 0.584503, 0.18),
|
||||
]
|
||||
|
||||
# Iterates from 0.25s to 3.0s, adding 0.25s each loop
|
||||
# In each iteration, the agent moves at a constant speed
|
||||
# and its two odometers measure the change. The smoothed
|
||||
# result is then compared to the ground truth
|
||||
while time <= 3.0:
|
||||
previous_key = 1000 * (time - delta_time)
|
||||
current_key = 1000 * time
|
||||
|
||||
# assign current key to the current timestamp
|
||||
new_timestamps.insert(_timestamp_key_value(current_key, time))
|
||||
|
||||
# Add a guess for this pose to the new values
|
||||
# Assume that the robot moves at 2 m/s. Position is time[s] *
|
||||
# 2[m/s]
|
||||
current_pose = gtsam.Pose2(time * 2, 0, 0)
|
||||
new_values.insert(current_key, current_pose)
|
||||
|
||||
# Add odometry factors from two different sources with different
|
||||
# error stats
|
||||
odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02)
|
||||
odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.1, 0.1, 0.05]))
|
||||
new_factors.push_back(
|
||||
gtsam.BetweenFactorPose2(
|
||||
previous_key,
|
||||
current_key,
|
||||
odometry_measurement_1,
|
||||
odometry_noise_1))
|
||||
|
||||
odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01)
|
||||
odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.05, 0.05, 0.05]))
|
||||
new_factors.push_back(
|
||||
gtsam.BetweenFactorPose2(
|
||||
previous_key,
|
||||
current_key,
|
||||
odometry_measurement_2,
|
||||
odometry_noise_2))
|
||||
|
||||
# Update the smoothers with the new factors. In this case,
|
||||
# one iteration must pass for Levenberg-Marquardt to accurately
|
||||
# estimate
|
||||
if time >= 0.50:
|
||||
smoother_batch.update(new_factors, new_values, new_timestamps)
|
||||
|
||||
estimate = smoother_batch.calculateEstimatePose2(current_key)
|
||||
self.assertTrue(estimate.equals(ground_truth[i], 1e-4))
|
||||
i += 1
|
||||
|
||||
new_timestamps.clear()
|
||||
new_values.clear()
|
||||
new_factors.resize(0)
|
||||
|
||||
time += delta_time
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -0,0 +1,49 @@
|
|||
import os
|
||||
import sys
|
||||
try:
|
||||
from setuptools import setup, find_packages
|
||||
except ImportError:
|
||||
from distutils.core import setup, find_packages
|
||||
|
||||
if 'SETUP_PY_NO_CHECK' not in os.environ:
|
||||
script_path = os.path.abspath(os.path.realpath(__file__))
|
||||
install_path = os.path.abspath(os.path.realpath(os.path.join('${GTSAM_CYTHON_INSTALL_PATH}', 'setup.py')))
|
||||
if script_path != install_path:
|
||||
print('setup.py is being run from an unexpected location: "{}"'.format(script_path))
|
||||
print('please run `make install` and run the script from there')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
packages = find_packages()
|
||||
|
||||
setup(
|
||||
name='gtsam',
|
||||
description='Georgia Tech Smoothing And Mapping library',
|
||||
url='https://bitbucket.org/gtborg/gtsam',
|
||||
version='${GTSAM_VERSION_STRING}', # https://www.python.org/dev/peps/pep-0440/
|
||||
license='Simplified BSD license',
|
||||
keywords='slam sam robotics localization mapping optimization',
|
||||
long_description='''${README_CONTENTS}''',
|
||||
# https://pypi.org/pypi?%3Aaction=list_classifiers
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Education',
|
||||
'Intended Audience :: Developers',
|
||||
'Intended Audience :: Science/Research',
|
||||
'Operating System :: MacOS',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Operating System :: POSIX',
|
||||
'License :: OSI Approved :: BSD License',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
|
||||
packages=packages,
|
||||
package_data={package:
|
||||
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.dll')]
|
||||
for package in packages
|
||||
},
|
||||
install_requires=[line.strip() for line in '''
|
||||
${CYTHON_INSTALL_REQUIREMENTS}
|
||||
'''.splitlines() if len(line.strip()) > 0 and not line.strip().startswith('#')]
|
||||
)
|
|
@ -0,0 +1,20 @@
|
|||
NAME HS21
|
||||
ROWS
|
||||
N OBJ.FUNC
|
||||
G R------1
|
||||
COLUMNS
|
||||
C------1 R------1 0.100000e+02
|
||||
C------2 R------1 -.100000e+01
|
||||
RHS
|
||||
RHS OBJ.FUNC 0.100000e+03
|
||||
RHS R------1 0.100000e+02
|
||||
RANGES
|
||||
BOUNDS
|
||||
LO BOUNDS C------1 0.200000e+01
|
||||
UP BOUNDS C------1 0.500000e+02
|
||||
LO BOUNDS C------2 -.500000e+02
|
||||
UP BOUNDS C------2 0.500000e+02
|
||||
QUADOBJ
|
||||
C------1 C------1 0.200000e-01
|
||||
C------2 C------2 0.200000e+01
|
||||
ENDATA
|
|
@ -0,0 +1,55 @@
|
|||
NAME HS268
|
||||
ROWS
|
||||
N OBJ.FUNC
|
||||
G R------1
|
||||
G R------2
|
||||
G R------3
|
||||
G R------4
|
||||
G R------5
|
||||
COLUMNS
|
||||
C------1 OBJ.FUNC 0.183400e+05 R------1 -.100000e+01
|
||||
C------1 R------2 0.100000e+02 R------3 -.800000e+01
|
||||
C------1 R------4 0.800000e+01 R------5 -.400000e+01
|
||||
C------2 OBJ.FUNC -.341980e+05 R------1 -.100000e+01
|
||||
C------2 R------2 0.100000e+02 R------3 0.100000e+01
|
||||
C------2 R------4 -.100000e+01 R------5 -.200000e+01
|
||||
C------3 OBJ.FUNC 0.454200e+04 R------1 -.100000e+01
|
||||
C------3 R------2 -.300000e+01 R------3 -.200000e+01
|
||||
C------3 R------4 0.200000e+01 R------5 0.300000e+01
|
||||
C------4 OBJ.FUNC 0.867200e+04 R------1 -.100000e+01
|
||||
C------4 R------2 0.500000e+01 R------3 -.500000e+01
|
||||
C------4 R------4 0.500000e+01 R------5 -.500000e+01
|
||||
C------5 OBJ.FUNC 0.860000e+02 R------1 -.100000e+01
|
||||
C------5 R------2 0.400000e+01 R------3 0.300000e+01
|
||||
C------5 R------4 -.300000e+01 R------5 0.100000e+01
|
||||
RHS
|
||||
RHS OBJ.FUNC -.144630e+05
|
||||
RHS R------1 -.500000e+01
|
||||
RHS R------2 0.200000e+02
|
||||
RHS R------3 -.400000e+02
|
||||
RHS R------4 0.110000e+02
|
||||
RHS R------5 -.300000e+02
|
||||
RANGES
|
||||
BOUNDS
|
||||
FR BOUNDS C------1
|
||||
FR BOUNDS C------2
|
||||
FR BOUNDS C------3
|
||||
FR BOUNDS C------4
|
||||
FR BOUNDS C------5
|
||||
QUADOBJ
|
||||
C------1 C------1 0.203940e+05
|
||||
C------1 C------2 -.249080e+05
|
||||
C------1 C------3 -.202600e+04
|
||||
C------1 C------4 0.389600e+04
|
||||
C------1 C------5 0.658000e+03
|
||||
C------2 C------2 0.418180e+05
|
||||
C------2 C------3 -.346600e+04
|
||||
C------2 C------4 -.982800e+04
|
||||
C------2 C------5 -.372000e+03
|
||||
C------3 C------3 0.351000e+04
|
||||
C------3 C------4 0.217800e+04
|
||||
C------3 C------5 -.348000e+03
|
||||
C------4 C------4 0.303000e+04
|
||||
C------4 C------5 -.440000e+02
|
||||
C------5 C------5 0.540000e+02
|
||||
ENDATA
|
|
@ -0,0 +1,20 @@
|
|||
NAME HS35
|
||||
ROWS
|
||||
N OBJ.FUNC
|
||||
G R------1
|
||||
COLUMNS
|
||||
C------1 OBJ.FUNC -.800000e+01 R------1 -.100000e+01
|
||||
C------2 OBJ.FUNC -.600000e+01 R------1 -.100000e+01
|
||||
C------3 OBJ.FUNC -.400000e+01 R------1 -.200000e+01
|
||||
RHS
|
||||
RHS OBJ.FUNC -.900000e+01
|
||||
RHS R------1 -.300000e+01
|
||||
RANGES
|
||||
BOUNDS
|
||||
QUADOBJ
|
||||
C------1 C------1 0.400000e+01
|
||||
C------1 C------2 0.200000e+01
|
||||
C------1 C------3 0.200000e+01
|
||||
C------2 C------2 0.400000e+01
|
||||
C------3 C------3 0.200000e+01
|
||||
ENDATA
|
|
@ -0,0 +1,21 @@
|
|||
NAME HS35MOD
|
||||
ROWS
|
||||
N OBJ.FUNC
|
||||
G R------1
|
||||
COLUMNS
|
||||
C------1 OBJ.FUNC -.800000e+01 R------1 -.100000e+01
|
||||
C------2 OBJ.FUNC -.600000e+01 R------1 -.100000e+01
|
||||
C------3 OBJ.FUNC -.400000e+01 R------1 -.200000e+01
|
||||
RHS
|
||||
RHS OBJ.FUNC -.900000e+01
|
||||
RHS R------1 -.300000e+01
|
||||
RANGES
|
||||
BOUNDS
|
||||
FX BOUNDS C------2 0.500000e+00
|
||||
QUADOBJ
|
||||
C------1 C------1 0.400000e+01
|
||||
C------1 C------2 0.200000e+01
|
||||
C------1 C------3 0.200000e+01
|
||||
C------2 C------2 0.400000e+01
|
||||
C------3 C------3 0.200000e+01
|
||||
ENDATA
|
|
@ -0,0 +1,33 @@
|
|||
NAME HS51
|
||||
ROWS
|
||||
N OBJ.FUNC
|
||||
E R------1
|
||||
E R------2
|
||||
E R------3
|
||||
COLUMNS
|
||||
C------1 R------1 0.100000e+01
|
||||
C------2 OBJ.FUNC -.400000e+01 R------1 0.300000e+01
|
||||
C------2 R------3 0.100000e+01
|
||||
C------3 OBJ.FUNC -.400000e+01 R------2 0.100000e+01
|
||||
C------4 OBJ.FUNC -.200000e+01 R------2 0.100000e+01
|
||||
C------5 OBJ.FUNC -.200000e+01 R------2 -.200000e+01
|
||||
C------5 R------3 -.100000e+01
|
||||
RHS
|
||||
RHS OBJ.FUNC -.600000e+01
|
||||
RHS R------1 0.400000e+01
|
||||
RANGES
|
||||
BOUNDS
|
||||
FR BOUNDS C------1
|
||||
FR BOUNDS C------2
|
||||
FR BOUNDS C------3
|
||||
FR BOUNDS C------4
|
||||
FR BOUNDS C------5
|
||||
QUADOBJ
|
||||
C------1 C------1 0.200000e+01
|
||||
C------1 C------2 -.200000e+01
|
||||
C------2 C------2 0.400000e+01
|
||||
C------2 C------3 0.200000e+01
|
||||
C------3 C------3 0.200000e+01
|
||||
C------4 C------4 0.200000e+01
|
||||
C------5 C------5 0.200000e+01
|
||||
ENDATA
|
|
@ -0,0 +1,32 @@
|
|||
NAME HS52
|
||||
ROWS
|
||||
N OBJ.FUNC
|
||||
E R------1
|
||||
E R------2
|
||||
E R------3
|
||||
COLUMNS
|
||||
C------1 R------1 0.100000e+01
|
||||
C------2 OBJ.FUNC -.400000e+01 R------1 0.300000e+01
|
||||
C------2 R------3 0.100000e+01
|
||||
C------3 OBJ.FUNC -.400000e+01 R------2 0.100000e+01
|
||||
C------4 OBJ.FUNC -.200000e+01 R------2 0.100000e+01
|
||||
C------5 OBJ.FUNC -.200000e+01 R------2 -.200000e+01
|
||||
C------5 R------3 -.100000e+01
|
||||
RHS
|
||||
RHS OBJ.FUNC -.600000e+01
|
||||
RANGES
|
||||
BOUNDS
|
||||
FR BOUNDS C------1
|
||||
FR BOUNDS C------2
|
||||
FR BOUNDS C------3
|
||||
FR BOUNDS C------4
|
||||
FR BOUNDS C------5
|
||||
QUADOBJ
|
||||
C------1 C------1 0.320000e+02
|
||||
C------1 C------2 -.800000e+01
|
||||
C------2 C------2 0.400000e+01
|
||||
C------2 C------3 0.200000e+01
|
||||
C------3 C------3 0.200000e+01
|
||||
C------4 C------4 0.200000e+01
|
||||
C------5 C------5 0.200000e+01
|
||||
ENDATA
|
|
@ -0,0 +1,19 @@
|
|||
NAME QP example
|
||||
ROWS
|
||||
N obj
|
||||
G r1
|
||||
L r2
|
||||
COLUMNS
|
||||
c1 r1 2.0 r2 -1.0
|
||||
c1 obj 1.5
|
||||
c2 r1 1.0 r2 2.0
|
||||
c2 obj -2.0
|
||||
RHS
|
||||
rhs1 r1 2.0 r2 6.0
|
||||
BOUNDS
|
||||
UP bnd1 c1 20.0
|
||||
QUADOBJ
|
||||
c1 c1 8.0
|
||||
c1 c2 2.0
|
||||
c2 c2 10.0
|
||||
ENDATA
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,169 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
|
||||
<!DOCTYPE boost_serialization>
|
||||
<boost_serialization signature="serialization::archive" version="17">
|
||||
<graph class_id="0" tracking_level="0" version="0">
|
||||
<Base class_id="1" tracking_level="0" version="0">
|
||||
<factors_ class_id="2" tracking_level="0" version="0">
|
||||
<count>2</count>
|
||||
<item_version>1</item_version>
|
||||
<item class_id="3" tracking_level="0" version="1">
|
||||
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<Base class_id="5" tracking_level="0" version="0">
|
||||
<Base class_id="6" tracking_level="0" version="0">
|
||||
<keys_>
|
||||
<count>2</count>
|
||||
<item_version>0</item_version>
|
||||
<item>0</item>
|
||||
<item>1</item>
|
||||
</keys_>
|
||||
</Base>
|
||||
</Base>
|
||||
<Ab_ class_id="8" tracking_level="0" version="0">
|
||||
<matrix_ class_id="9" tracking_level="0" version="0">
|
||||
<rows>9</rows>
|
||||
<cols>7</cols>
|
||||
<data>
|
||||
<item>1.22427709071730959e+01</item>
|
||||
<item>1.51514613104920048e+01</item>
|
||||
<item>3.60934366857813060e+00</item>
|
||||
<item>-1.18407259026383116e+01</item>
|
||||
<item>7.84826117220921216e+00</item>
|
||||
<item>1.23509165494819051e+01</item>
|
||||
<item>-6.09875015991639735e+00</item>
|
||||
<item>6.16547190708139126e-01</item>
|
||||
<item>3.94972084922329048e+00</item>
|
||||
<item>-4.89208482920378174e+00</item>
|
||||
<item>3.02091647632478866e+00</item>
|
||||
<item>-8.95328692238917512e+00</item>
|
||||
<item>7.89831607220345955e+00</item>
|
||||
<item>-2.36793602009719084e+00</item>
|
||||
<item>1.48517612051941725e+01</item>
|
||||
<item>-3.97284286249233731e-01</item>
|
||||
<item>-1.95744531643153863e+01</item>
|
||||
<item>-3.85954855417462017e+00</item>
|
||||
<item>4.79268277145419042e+00</item>
|
||||
<item>-9.01707953629520453e+00</item>
|
||||
<item>1.37848069005841385e+01</item>
|
||||
<item>1.04829326688375950e+01</item>
|
||||
<item>-5.00630568442241675e+00</item>
|
||||
<item>4.70463561852773182e+00</item>
|
||||
<item>-1.59179134598689274e+01</item>
|
||||
<item>-2.04767784956723942e+00</item>
|
||||
<item>9.54135497908261954e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>-0.00000000000000000e+00</item>
|
||||
<item>-1.76201757312015372e+00</item>
|
||||
<item>1.98634190821282672e+01</item>
|
||||
<item>1.52966546661624236e+00</item>
|
||||
<item>1.94817649567575373e+01</item>
|
||||
<item>1.39684693294110307e+00</item>
|
||||
<item>4.30228460420588288e+00</item>
|
||||
<item>1.76201757312015372e+00</item>
|
||||
<item>-1.98634190821282672e+01</item>
|
||||
<item>-1.52966546661624236e+00</item>
|
||||
<item>-0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>4.16606867942304948e+00</item>
|
||||
<item>1.86906420801308037e+00</item>
|
||||
<item>-1.94717865319198360e+01</item>
|
||||
<item>-1.94817649567575373e+01</item>
|
||||
<item>-1.39684693294110307e+00</item>
|
||||
<item>-4.30228460420588288e+00</item>
|
||||
<item>-4.16606867942304948e+00</item>
|
||||
<item>-1.86906420801308037e+00</item>
|
||||
<item>1.94717865319198360e+01</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>1.00891462904330531e+01</item>
|
||||
<item>-1.08132497987816869e+01</item>
|
||||
<item>8.66487736568128497e+00</item>
|
||||
<item>2.88370015604634311e+01</item>
|
||||
<item>1.89391698948574643e+01</item>
|
||||
<item>2.12398960190661290e+00</item>
|
||||
<item>1.22150946112124039e+01</item>
|
||||
<item>-2.33658532501596561e+01</item>
|
||||
<item>1.51576204760307363e+01</item>
|
||||
</data>
|
||||
</matrix_>
|
||||
<variableColOffsets_>
|
||||
<count>4</count>
|
||||
<item_version>0</item_version>
|
||||
<item>0</item>
|
||||
<item>3</item>
|
||||
<item>6</item>
|
||||
<item>7</item>
|
||||
</variableColOffsets_>
|
||||
<rowStart_>0</rowStart_>
|
||||
<rowEnd_>9</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_ class_id="11" tracking_level="0" version="1">
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
</px>
|
||||
</item>
|
||||
<item>
|
||||
<px class_id_reference="4" object_id="_1">
|
||||
<Base>
|
||||
<Base>
|
||||
<keys_>
|
||||
<count>2</count>
|
||||
<item_version>0</item_version>
|
||||
<item>0</item>
|
||||
<item>1</item>
|
||||
</keys_>
|
||||
</Base>
|
||||
</Base>
|
||||
<Ab_>
|
||||
<matrix_>
|
||||
<rows>3</rows>
|
||||
<cols>7</cols>
|
||||
<data>
|
||||
<item>1.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>1.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>1.00000000000000000e+00</item>
|
||||
<item>1.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>1.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>1.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
<item>0.00000000000000000e+00</item>
|
||||
</data>
|
||||
</matrix_>
|
||||
<variableColOffsets_>
|
||||
<count>4</count>
|
||||
<item_version>0</item_version>
|
||||
<item>0</item>
|
||||
<item>3</item>
|
||||
<item>6</item>
|
||||
<item>7</item>
|
||||
</variableColOffsets_>
|
||||
<rowStart_>0</rowStart_>
|
||||
<rowEnd_>3</rowEnd_>
|
||||
<blockStart_>0</blockStart_>
|
||||
</Ab_>
|
||||
<model_>
|
||||
<px class_id="-1"></px>
|
||||
</model_>
|
||||
</px>
|
||||
</item>
|
||||
</factors_>
|
||||
</Base>
|
||||
</graph>
|
||||
</boost_serialization>
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file InverseKinematicsExampleExpressions.cpp
|
||||
* @brief Implement inverse kinematics on a three-link arm using expressions.
|
||||
* @date April 15, 2019
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/nonlinear/ExpressionFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/Marginals.h>
|
||||
#include <gtsam/nonlinear/expressions.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/expressions.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// Scalar multiplication of a vector, with derivatives.
|
||||
inline Vector3 scalarMultiply(const double& s, const Vector3& v,
|
||||
OptionalJacobian<3, 1> Hs,
|
||||
OptionalJacobian<3, 3> Hv) {
|
||||
if (Hs) *Hs = v;
|
||||
if (Hv) *Hv = s * I_3x3;
|
||||
return s * v;
|
||||
}
|
||||
|
||||
// Expression version of scalar product, using above function.
|
||||
inline Vector3_ operator*(const Double_& s, const Vector3_& v) {
|
||||
return Vector3_(&scalarMultiply, s, v);
|
||||
}
|
||||
|
||||
// Expression version of Pose2::Expmap
|
||||
inline Pose2_ Expmap(const Vector3_& xi) { return Pose2_(&Pose2::Expmap, xi); }
|
||||
|
||||
// Main function
|
||||
int main(int argc, char** argv) {
|
||||
// Three-link planar manipulator specification.
|
||||
const double L1 = 3.5, L2 = 3.5, L3 = 2.5; // link lengths
|
||||
const Pose2 sXt0(0, L1 + L2 + L3, M_PI / 2); // end-effector pose at rest
|
||||
const Vector3 xi1(0, 0, 1), xi2(L1, 0, 1),
|
||||
xi3(L1 + L2, 0, 1); // screw axes at rest
|
||||
|
||||
// Create Expressions for unknowns
|
||||
using symbol_shorthand::Q;
|
||||
Double_ q1(Q(1)), q2(Q(2)), q3(Q(3));
|
||||
|
||||
// Forward kinematics expression as product of exponentials
|
||||
Pose2_ l1Zl1 = Expmap(q1 * Vector3_(xi1));
|
||||
Pose2_ l2Zl2 = Expmap(q2 * Vector3_(xi2));
|
||||
Pose2_ l3Zl3 = Expmap(q3 * Vector3_(xi3));
|
||||
Pose2_ forward = compose(compose(l1Zl1, l2Zl2), compose(l3Zl3, Pose2_(sXt0)));
|
||||
|
||||
// Create a factor graph with a a single expression factor.
|
||||
ExpressionFactorGraph graph;
|
||||
Pose2 desiredEndEffectorPose(3, 2, 0);
|
||||
auto model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
graph.addExpressionFactor(forward, desiredEndEffectorPose, model);
|
||||
|
||||
// Create initial estimate
|
||||
Values initial;
|
||||
initial.insert(Q(1), 0.1);
|
||||
initial.insert(Q(2), 0.2);
|
||||
initial.insert(Q(3), 0.3);
|
||||
initial.print("\nInitial Estimate:\n"); // print
|
||||
GTSAM_PRINT(forward.value(initial));
|
||||
|
||||
// Optimize the initial values using a Levenberg-Marquardt nonlinear optimizer
|
||||
LevenbergMarquardtParams params;
|
||||
params.setlambdaInitial(1e6);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initial, params);
|
||||
Values result = optimizer.optimize();
|
||||
result.print("Final Result:\n");
|
||||
|
||||
GTSAM_PRINT(forward.value(result));
|
||||
|
||||
return 0;
|
||||
}
|
157
gtsam.h
157
gtsam.h
|
@ -209,11 +209,60 @@ class KeyGroupMap {
|
|||
bool insert2(size_t key, int val);
|
||||
};
|
||||
|
||||
// Actually a FastSet<FactorIndex>
|
||||
class FactorIndexSet {
|
||||
FactorIndexSet();
|
||||
FactorIndexSet(const gtsam::FactorIndexSet& set);
|
||||
|
||||
// common STL methods
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
void clear();
|
||||
|
||||
// structure specific methods
|
||||
void insert(size_t factorIndex);
|
||||
bool erase(size_t factorIndex); // returns true if value was removed
|
||||
bool count(size_t factorIndex) const; // returns true if value exists
|
||||
};
|
||||
|
||||
// Actually a vector<FactorIndex>
|
||||
class FactorIndices {
|
||||
FactorIndices();
|
||||
FactorIndices(const gtsam::FactorIndices& other);
|
||||
|
||||
// common STL methods
|
||||
size_t size() const;
|
||||
bool empty() const;
|
||||
void clear();
|
||||
|
||||
// structure specific methods
|
||||
size_t at(size_t i) const;
|
||||
size_t front() const;
|
||||
size_t back() const;
|
||||
void push_back(size_t factorIndex) const;
|
||||
};
|
||||
|
||||
//*************************************************************************
|
||||
// base
|
||||
//*************************************************************************
|
||||
|
||||
/** gtsam namespace functions */
|
||||
|
||||
#include <gtsam/base/DSFMap.h>
|
||||
class IndexPair {
|
||||
IndexPair();
|
||||
IndexPair(size_t i, size_t j);
|
||||
size_t i() const;
|
||||
size_t j() const;
|
||||
};
|
||||
|
||||
template<KEY = {gtsam::IndexPair}>
|
||||
class DSFMap {
|
||||
DSFMap();
|
||||
KEY find(const KEY& key) const;
|
||||
void merge(const KEY& x, const KEY& y);
|
||||
};
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
bool linear_independent(Matrix A, Matrix B, double tol);
|
||||
|
||||
|
@ -577,9 +626,9 @@ class Pose2 {
|
|||
static Matrix LogmapDerivative(const gtsam::Pose2& v);
|
||||
Matrix AdjointMap() const;
|
||||
Vector Adjoint(Vector xi) const;
|
||||
static Matrix adjointMap(Vector v);
|
||||
Vector adjoint(Vector xi, Vector y);
|
||||
Vector adjointTranspose(Vector xi, Vector y);
|
||||
static Matrix adjointMap_(Vector v);
|
||||
static Vector adjoint_(Vector xi, Vector y);
|
||||
static Vector adjointTranspose(Vector xi, Vector y);
|
||||
static Matrix wedge(double vx, double vy, double w);
|
||||
|
||||
// Group Actions on Point2
|
||||
|
@ -628,8 +677,8 @@ class Pose3 {
|
|||
static Vector Logmap(const gtsam::Pose3& pose);
|
||||
Matrix AdjointMap() const;
|
||||
Vector Adjoint(Vector xi) const;
|
||||
static Matrix adjointMap(Vector xi);
|
||||
static Vector adjoint(Vector xi, Vector y);
|
||||
static Matrix adjointMap_(Vector xi);
|
||||
static Vector adjoint_(Vector xi, Vector y);
|
||||
static Vector adjointTranspose(Vector xi, Vector y);
|
||||
static Matrix ExpmapDerivative(Vector xi);
|
||||
static Matrix LogmapDerivative(const gtsam::Pose3& xi);
|
||||
|
@ -1218,14 +1267,30 @@ class VariableIndex {
|
|||
namespace noiseModel {
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
virtual class Base {
|
||||
void print(string s) const;
|
||||
// Methods below are available for all noise models. However, can't add them
|
||||
// because wrap (incorrectly) thinks robust classes derive from this Base as well.
|
||||
// bool isConstrained() const;
|
||||
// bool isUnit() const;
|
||||
// size_t dim() const;
|
||||
// Vector sigmas() const;
|
||||
};
|
||||
|
||||
virtual class Gaussian : gtsam::noiseModel::Base {
|
||||
static gtsam::noiseModel::Gaussian* SqrtInformation(Matrix R);
|
||||
static gtsam::noiseModel::Gaussian* Covariance(Matrix R);
|
||||
Matrix R() const;
|
||||
|
||||
bool equals(gtsam::noiseModel::Base& expected, double tol);
|
||||
void print(string s) const;
|
||||
|
||||
// access to noise model
|
||||
Matrix R() const;
|
||||
Matrix information() const;
|
||||
Matrix covariance() const;
|
||||
|
||||
// Whitening operations
|
||||
Vector whiten(Vector v) const;
|
||||
Vector unwhiten(Vector v) const;
|
||||
Matrix Whiten(Matrix H) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
@ -1236,7 +1301,11 @@ virtual class Diagonal : gtsam::noiseModel::Gaussian {
|
|||
static gtsam::noiseModel::Diagonal* Variances(Vector variances);
|
||||
static gtsam::noiseModel::Diagonal* Precisions(Vector precisions);
|
||||
Matrix R() const;
|
||||
void print(string s) const;
|
||||
|
||||
// access to noise model
|
||||
Vector sigmas() const;
|
||||
Vector invsigmas() const;
|
||||
Vector precisions() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
@ -1263,7 +1332,9 @@ virtual class Isotropic : gtsam::noiseModel::Diagonal {
|
|||
static gtsam::noiseModel::Isotropic* Sigma(size_t dim, double sigma);
|
||||
static gtsam::noiseModel::Isotropic* Variance(size_t dim, double varianace);
|
||||
static gtsam::noiseModel::Isotropic* Precision(size_t dim, double precision);
|
||||
void print(string s) const;
|
||||
|
||||
// access to noise model
|
||||
double sigma() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
@ -1271,7 +1342,6 @@ virtual class Isotropic : gtsam::noiseModel::Diagonal {
|
|||
|
||||
virtual class Unit : gtsam::noiseModel::Isotropic {
|
||||
static gtsam::noiseModel::Unit* Create(size_t dim);
|
||||
void print(string s) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
@ -1279,11 +1349,11 @@ virtual class Unit : gtsam::noiseModel::Isotropic {
|
|||
|
||||
namespace mEstimator {
|
||||
virtual class Base {
|
||||
void print(string s) const;
|
||||
};
|
||||
|
||||
virtual class Null: gtsam::noiseModel::mEstimator::Base {
|
||||
Null();
|
||||
void print(string s) const;
|
||||
static gtsam::noiseModel::mEstimator::Null* Create();
|
||||
|
||||
// enabling serialization functionality
|
||||
|
@ -1292,7 +1362,6 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base {
|
|||
|
||||
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
||||
Fair(double c);
|
||||
void print(string s) const;
|
||||
static gtsam::noiseModel::mEstimator::Fair* Create(double c);
|
||||
|
||||
// enabling serialization functionality
|
||||
|
@ -1301,7 +1370,6 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base {
|
|||
|
||||
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
||||
Huber(double k);
|
||||
void print(string s) const;
|
||||
static gtsam::noiseModel::mEstimator::Huber* Create(double k);
|
||||
|
||||
// enabling serialization functionality
|
||||
|
@ -1310,7 +1378,6 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base {
|
|||
|
||||
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
||||
Tukey(double k);
|
||||
void print(string s) const;
|
||||
static gtsam::noiseModel::mEstimator::Tukey* Create(double k);
|
||||
|
||||
// enabling serialization functionality
|
||||
|
@ -1322,7 +1389,6 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
|
|||
virtual class Robust : gtsam::noiseModel::Base {
|
||||
Robust(const gtsam::noiseModel::mEstimator::Base* robust, const gtsam::noiseModel::Base* noise);
|
||||
static gtsam::noiseModel::Robust* Create(const gtsam::noiseModel::mEstimator::Base* robust, const gtsam::noiseModel::Base* noise);
|
||||
void print(string s) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
@ -1713,7 +1779,7 @@ virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters {
|
|||
|
||||
virtual class SubgraphSolver {
|
||||
SubgraphSolver(const gtsam::GaussianFactorGraph &A, const gtsam::SubgraphSolverParameters ¶meters, const gtsam::Ordering& ordering);
|
||||
SubgraphSolver(const gtsam::GaussianFactorGraph &Ab1, const gtsam::GaussianFactorGraph &Ab2, const gtsam::SubgraphSolverParameters ¶meters, const gtsam::Ordering& ordering);
|
||||
SubgraphSolver(const gtsam::GaussianFactorGraph &Ab1, const gtsam::GaussianFactorGraph* Ab2, const gtsam::SubgraphSolverParameters ¶meters, const gtsam::Ordering& ordering);
|
||||
gtsam::VectorValues optimize() const;
|
||||
};
|
||||
|
||||
|
@ -2002,10 +2068,12 @@ virtual class NonlinearOptimizerParams {
|
|||
void setVerbosity(string s);
|
||||
|
||||
string getLinearSolverType() const;
|
||||
|
||||
void setLinearSolverType(string solver);
|
||||
void setOrdering(const gtsam::Ordering& ordering);
|
||||
|
||||
void setIterativeParams(gtsam::IterativeOptimizationParameters* params);
|
||||
void setOrdering(const gtsam::Ordering& ordering);
|
||||
string getOrderingType() const;
|
||||
void setOrderingType(string ordering);
|
||||
|
||||
bool isMultifrontal() const;
|
||||
bool isSequential() const;
|
||||
|
@ -2026,15 +2094,32 @@ virtual class GaussNewtonParams : gtsam::NonlinearOptimizerParams {
|
|||
virtual class LevenbergMarquardtParams : gtsam::NonlinearOptimizerParams {
|
||||
LevenbergMarquardtParams();
|
||||
|
||||
double getlambdaInitial() const;
|
||||
bool getDiagonalDamping() const;
|
||||
double getlambdaFactor() const;
|
||||
double getlambdaInitial() const;
|
||||
double getlambdaLowerBound() const;
|
||||
double getlambdaUpperBound() const;
|
||||
bool getUseFixedLambdaFactor();
|
||||
string getLogFile() const;
|
||||
string getVerbosityLM() const;
|
||||
|
||||
void setlambdaInitial(double value);
|
||||
void setDiagonalDamping(bool flag);
|
||||
void setlambdaFactor(double value);
|
||||
void setlambdaInitial(double value);
|
||||
void setlambdaLowerBound(double value);
|
||||
void setlambdaUpperBound(double value);
|
||||
void setUseFixedLambdaFactor(bool flag);
|
||||
void setLogFile(string s);
|
||||
void setVerbosityLM(string s);
|
||||
|
||||
static gtsam::LevenbergMarquardtParams LegacyDefaults();
|
||||
static gtsam::LevenbergMarquardtParams CeresDefaults();
|
||||
|
||||
static gtsam::LevenbergMarquardtParams EnsureHasOrdering(
|
||||
gtsam::LevenbergMarquardtParams params,
|
||||
const gtsam::NonlinearFactorGraph& graph);
|
||||
static gtsam::LevenbergMarquardtParams ReplaceOrdering(
|
||||
gtsam::LevenbergMarquardtParams params, const gtsam::Ordering& ordering);
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/DoglegOptimizer.h>
|
||||
|
@ -2173,8 +2258,6 @@ class ISAM2Result {
|
|||
size_t getCliques() const;
|
||||
};
|
||||
|
||||
class FactorIndices {};
|
||||
|
||||
class ISAM2 {
|
||||
ISAM2();
|
||||
ISAM2(const gtsam::ISAM2Params& params);
|
||||
|
@ -2480,6 +2563,36 @@ void save2D(const gtsam::NonlinearFactorGraph& graph,
|
|||
const gtsam::Values& config, gtsam::noiseModel::Diagonal* model,
|
||||
string filename);
|
||||
|
||||
// std::vector<gtsam::BetweenFactor<Pose3>::shared_ptr>
|
||||
class BetweenFactorPose3s
|
||||
{
|
||||
size_t size() const;
|
||||
gtsam::BetweenFactorPose3* at(size_t i) const;
|
||||
};
|
||||
|
||||
#include <gtsam/slam/InitializePose3.h>
|
||||
class InitializePose3 {
|
||||
static gtsam::Values computeOrientationsChordal(
|
||||
const gtsam::NonlinearFactorGraph& pose3Graph);
|
||||
static gtsam::Values computeOrientationsGradient(
|
||||
const gtsam::NonlinearFactorGraph& pose3Graph,
|
||||
const gtsam::Values& givenGuess, size_t maxIter, const bool setRefFrame);
|
||||
static gtsam::Values computeOrientationsGradient(
|
||||
const gtsam::NonlinearFactorGraph& pose3Graph,
|
||||
const gtsam::Values& givenGuess);
|
||||
static gtsam::NonlinearFactorGraph buildPose3graph(
|
||||
const gtsam::NonlinearFactorGraph& graph);
|
||||
static gtsam::Values initializeOrientations(
|
||||
const gtsam::NonlinearFactorGraph& graph);
|
||||
static gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph,
|
||||
const gtsam::Values& givenGuess,
|
||||
bool useGradient);
|
||||
static gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph);
|
||||
};
|
||||
|
||||
gtsam::BetweenFactorPose3s parse3DFactors(string filename);
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> load3D(string filename);
|
||||
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> readG2o(string filename);
|
||||
pair<gtsam::NonlinearFactorGraph*, gtsam::Values*> readG2o(string filename, bool is3D);
|
||||
void writeG2o(const gtsam::NonlinearFactorGraph& graph,
|
||||
|
|
|
@ -101,16 +101,67 @@ message(STATUS "Building GTSAM - shared: ${BUILD_SHARED_LIBS}")
|
|||
|
||||
# BUILD_SHARED_LIBS automatically defines static/shared libs:
|
||||
add_library(gtsam ${gtsam_srcs})
|
||||
# Boost:
|
||||
target_link_libraries(gtsam PUBLIC ${GTSAM_BOOST_LIBRARIES})
|
||||
target_include_directories(gtsam PUBLIC ${Boost_INCLUDE_DIR})
|
||||
# Other deps:
|
||||
target_link_libraries(gtsam PUBLIC ${GTSAM_ADDITIONAL_LIBRARIES})
|
||||
target_compile_definitions(gtsam PUBLIC ${GTSAM_COMPILE_DEFINITIONS})
|
||||
target_compile_options(gtsam PUBLIC ${GTSAM_COMPILE_OPTIONS})
|
||||
target_compile_definitions(gtsam PRIVATE ${GTSAM_COMPILE_DEFINITIONS_PRIVATE})
|
||||
target_compile_definitions(gtsam PUBLIC ${GTSAM_COMPILE_DEFINITIONS_PUBLIC})
|
||||
if (NOT "${GTSAM_COMPILE_OPTIONS_PUBLIC}" STREQUAL "")
|
||||
target_compile_options(gtsam PUBLIC ${GTSAM_COMPILE_OPTIONS_PUBLIC})
|
||||
endif()
|
||||
target_compile_options(gtsam PRIVATE ${GTSAM_COMPILE_OPTIONS_PRIVATE})
|
||||
set_target_properties(gtsam PROPERTIES
|
||||
OUTPUT_NAME gtsam
|
||||
CLEAN_DIRECT_OUTPUT 1
|
||||
VERSION ${gtsam_version}
|
||||
SOVERSION ${gtsam_soversion})
|
||||
|
||||
# Append Eigen include path, set in top-level CMakeLists.txt to either
|
||||
# system-eigen, or GTSAM eigen path
|
||||
target_include_directories(gtsam PUBLIC
|
||||
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
|
||||
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
|
||||
)
|
||||
# MKL include dir:
|
||||
if (GTSAM_USE_EIGEN_MKL)
|
||||
target_include_directories(gtsam PUBLIC ${MKL_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if(GTSAM_USE_TBB)
|
||||
target_include_directories(gtsam PUBLIC ${TBB_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
# Add includes for source directories 'BEFORE' boost and any system include
|
||||
# paths so that the compiler uses GTSAM headers in our source directory instead
|
||||
# of any previously installed GTSAM headers.
|
||||
target_include_directories(gtsam BEFORE PUBLIC
|
||||
# SuiteSparse_config
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/SuiteSparse_config>
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include/gtsam/3rdparty/SuiteSparse_config>
|
||||
# CCOLAMD
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include>
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include/gtsam/3rdparty/CCOLAMD>
|
||||
# main gtsam includes:
|
||||
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}>
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include/>
|
||||
# config.h
|
||||
$<BUILD_INTERFACE:${CMAKE_BINARY_DIR}>
|
||||
# unit tests:
|
||||
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/CppUnitLite>
|
||||
)
|
||||
if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
target_include_directories(gtsam BEFORE PUBLIC
|
||||
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/gtsam/3rdparty/metis/include>
|
||||
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include/gtsam/3rdparty/metis/>
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
if(WIN32) # Add 'lib' prefix to static library to avoid filename collision with shared library
|
||||
if (NOT BUILD_SHARED_LIBS)
|
||||
set_target_properties(gtsam PROPERTIES
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <cstdlib> // Provides size_t
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <cstdlib> // Provides size_t
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -29,11 +29,9 @@ namespace gtsam {
|
|||
* Uses rank compression and union by rank, iterator version
|
||||
* @addtogroup base
|
||||
*/
|
||||
template<class KEY>
|
||||
template <class KEY>
|
||||
class DSFMap {
|
||||
|
||||
protected:
|
||||
|
||||
protected:
|
||||
/// We store the forest in an STL map, but parents are done with pointers
|
||||
struct Entry {
|
||||
typename std::map<KEY, Entry>::iterator parent_;
|
||||
|
@ -41,7 +39,7 @@ protected:
|
|||
Entry() {}
|
||||
};
|
||||
|
||||
typedef typename std::map<KEY, Entry> Map;
|
||||
typedef typename std::map<KEY, Entry> Map;
|
||||
typedef typename Map::iterator iterator;
|
||||
mutable Map entries_;
|
||||
|
||||
|
@ -62,8 +60,7 @@ protected:
|
|||
iterator find_(const iterator& it) const {
|
||||
// follow parent pointers until we reach set representative
|
||||
iterator& parent = it->second.parent_;
|
||||
if (parent != it)
|
||||
parent = find_(parent); // not yet, recurse!
|
||||
if (parent != it) parent = find_(parent); // not yet, recurse!
|
||||
return parent;
|
||||
}
|
||||
|
||||
|
@ -73,13 +70,11 @@ protected:
|
|||
return find_(initial);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
public:
|
||||
typedef std::set<KEY> Set;
|
||||
|
||||
/// constructor
|
||||
DSFMap() {
|
||||
}
|
||||
DSFMap() {}
|
||||
|
||||
/// Given key, find the representative key for the set in which it lives
|
||||
inline KEY find(const KEY& key) const {
|
||||
|
@ -89,12 +84,10 @@ public:
|
|||
|
||||
/// Merge two sets
|
||||
void merge(const KEY& x, const KEY& y) {
|
||||
|
||||
// straight from http://en.wikipedia.org/wiki/Disjoint-set_data_structure
|
||||
iterator xRoot = find_(x);
|
||||
iterator yRoot = find_(y);
|
||||
if (xRoot == yRoot)
|
||||
return;
|
||||
if (xRoot == yRoot) return;
|
||||
|
||||
// Merge sets
|
||||
if (xRoot->second.rank_ < yRoot->second.rank_)
|
||||
|
@ -117,7 +110,14 @@ public:
|
|||
}
|
||||
return sets;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
/// Small utility class for representing a wrappable pairs of ints.
|
||||
class IndexPair : public std::pair<size_t,size_t> {
|
||||
public:
|
||||
IndexPair(): std::pair<size_t,size_t>(0,0) {}
|
||||
IndexPair(size_t i, size_t j) : std::pair<size_t,size_t>(i,j) {}
|
||||
inline size_t i() const { return first; };
|
||||
inline size_t j() const { return second; };
|
||||
};
|
||||
} // namespace gtsam
|
|
@ -67,28 +67,29 @@ struct FixedSizeMatrix {
|
|||
}
|
||||
|
||||
/**
|
||||
* Numerically compute gradient of scalar function
|
||||
* @brief Numerically compute gradient of scalar function
|
||||
* @return n-dimensional gradient computed via central differencing
|
||||
* Class X is the input argument
|
||||
* The class X needs to have dim, expmap, logmap
|
||||
* int N is the dimension of the X input value if variable dimension type but known at test time
|
||||
*/
|
||||
template<class X>
|
||||
typename internal::FixedSizeMatrix<X>::type numericalGradient(boost::function<double(const X&)> h, const X& x,
|
||||
double delta = 1e-5) {
|
||||
|
||||
template <class X, int N = traits<X>::dimension>
|
||||
typename Eigen::Matrix<double, N, 1> numericalGradient(
|
||||
boost::function<double(const X&)> h, const X& x, double delta = 1e-5) {
|
||||
double factor = 1.0 / (2.0 * delta);
|
||||
|
||||
BOOST_STATIC_ASSERT_MSG(
|
||||
(boost::is_base_of<manifold_tag, typename traits<X>::structure_category>::value),
|
||||
"Template argument X must be a manifold type.");
|
||||
static const int N = traits<X>::dimension;
|
||||
BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type.");
|
||||
|
||||
typedef typename traits<X>::TangentVector TangentX;
|
||||
BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type or N must be specified.");
|
||||
|
||||
// Prepare a tangent vector to perturb x with, only works for fixed size
|
||||
TangentX d;
|
||||
typename traits<X>::TangentVector d;
|
||||
d.setZero();
|
||||
|
||||
Eigen::Matrix<double,N,1> g; g.setZero(); // Can be fixed size
|
||||
Eigen::Matrix<double,N,1> g;
|
||||
g.setZero();
|
||||
for (int j = 0; j < N; j++) {
|
||||
d(j) = delta;
|
||||
double hxplus = h(traits<X>::Retract(x, d));
|
||||
|
@ -108,37 +109,34 @@ typename internal::FixedSizeMatrix<X>::type numericalGradient(boost::function<do
|
|||
* @param delta increment for numerical derivative
|
||||
* Class Y is the output argument
|
||||
* Class X is the input argument
|
||||
* int N is the dimension of the X input value if variable dimension type but known at test time
|
||||
* @return m*n Jacobian computed via central differencing
|
||||
*/
|
||||
|
||||
template<class Y, class X>
|
||||
template <class Y, class X, int N = traits<X>::dimension>
|
||||
// TODO Should compute fixed-size matrix
|
||||
typename internal::FixedSizeMatrix<Y,X>::type numericalDerivative11(boost::function<Y(const X&)> h, const X& x,
|
||||
double delta = 1e-5) {
|
||||
|
||||
typename internal::FixedSizeMatrix<Y, X>::type numericalDerivative11(
|
||||
boost::function<Y(const X&)> h, const X& x, double delta = 1e-5) {
|
||||
typedef typename internal::FixedSizeMatrix<Y,X>::type Matrix;
|
||||
|
||||
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
|
||||
"Template argument Y must be a manifold type.");
|
||||
typedef traits<Y> TraitsY;
|
||||
typedef typename TraitsY::TangentVector TangentY;
|
||||
|
||||
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X>::structure_category>::value),
|
||||
"Template argument X must be a manifold type.");
|
||||
static const int N = traits<X>::dimension;
|
||||
BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type.");
|
||||
BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type or N must be specified.");
|
||||
typedef traits<X> TraitsX;
|
||||
typedef typename TraitsX::TangentVector TangentX;
|
||||
|
||||
// get value at x, and corresponding chart
|
||||
const Y hx = h(x);
|
||||
|
||||
// Bit of a hack for now to find number of rows
|
||||
const TangentY zeroY = TraitsY::Local(hx, hx);
|
||||
const typename TraitsY::TangentVector zeroY = TraitsY::Local(hx, hx);
|
||||
const size_t m = zeroY.size();
|
||||
|
||||
// Prepare a tangent vector to perturb x with, only works for fixed size
|
||||
TangentX dx;
|
||||
Eigen::Matrix<double, N, 1> dx;
|
||||
dx.setZero();
|
||||
|
||||
// Fill in Jacobian H
|
||||
|
@ -146,9 +144,9 @@ typename internal::FixedSizeMatrix<Y,X>::type numericalDerivative11(boost::funct
|
|||
const double factor = 1.0 / (2.0 * delta);
|
||||
for (int j = 0; j < N; j++) {
|
||||
dx(j) = delta;
|
||||
const TangentY dy1 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx)));
|
||||
const auto dy1 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx)));
|
||||
dx(j) = -delta;
|
||||
const TangentY dy2 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx)));
|
||||
const auto dy2 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx)));
|
||||
dx(j) = 0;
|
||||
H.col(j) << (dy1 - dy2) * factor;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* @brief unit tests for DSFMap
|
||||
*/
|
||||
|
||||
#include <gtsam_unstable/base/DSFMap.h>
|
||||
#include <gtsam/base/DSFMap.h>
|
||||
|
||||
#include <boost/assign/std/list.hpp>
|
||||
#include <boost/assign/std/set.hpp>
|
||||
|
@ -115,7 +115,6 @@ TEST(DSFMap, mergePairwiseMatches2) {
|
|||
TEST(DSFMap, sets){
|
||||
// Create some "matches"
|
||||
typedef pair<size_t,size_t> Match;
|
||||
typedef pair<size_t, set<size_t> > key_pair;
|
||||
list<Match> matches;
|
||||
matches += Match(1,2), Match(2,3), Match(4,5), Match(4,6);
|
||||
|
||||
|
@ -140,6 +139,12 @@ TEST(DSFMap, sets){
|
|||
EXPECT(s2 == sets[4]);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DSFMap, findIndexPair) {
|
||||
DSFMap<IndexPair> dsf;
|
||||
EXPECT(dsf.find(IndexPair(1,2))==IndexPair(1,2));
|
||||
EXPECT(dsf.find(IndexPair(1,2)) != dsf.find(IndexPair(1,3)));
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
/* ************************************************************************* */
|
|
@ -56,6 +56,9 @@ namespace gtsam {
|
|||
/// Integer nonlinear key type
|
||||
typedef std::uint64_t Key;
|
||||
|
||||
/// Integer nonlinear factor index type
|
||||
typedef std::uint64_t FactorIndex;
|
||||
|
||||
/// The index type for Eigen objects
|
||||
typedef ptrdiff_t DenseIndex;
|
||||
|
||||
|
|
|
@ -44,8 +44,7 @@ Cal3_S2::Cal3_S2(const std::string &path) :
|
|||
if (infile)
|
||||
infile >> fx_ >> fy_ >> s_ >> u0_ >> v0_;
|
||||
else {
|
||||
printf("Unable to load the calibration\n");
|
||||
exit(0);
|
||||
throw std::runtime_error("Cal3_S2: Unable to load the calibration");
|
||||
}
|
||||
|
||||
infile.close();
|
||||
|
|
|
@ -146,17 +146,21 @@ public:
|
|||
/**
|
||||
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
|
||||
*/
|
||||
Vector3 adjoint(const Vector3& xi, const Vector3& y) {
|
||||
static Vector3 adjoint(const Vector3& xi, const Vector3& y) {
|
||||
return adjointMap(xi) * y;
|
||||
}
|
||||
|
||||
/**
|
||||
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
||||
*/
|
||||
Vector3 adjointTranspose(const Vector3& xi, const Vector3& y) {
|
||||
static Vector3 adjointTranspose(const Vector3& xi, const Vector3& y) {
|
||||
return adjointMap(xi).transpose() * y;
|
||||
}
|
||||
|
||||
// temporary fix for wrappers until case issue is resolved
|
||||
static Matrix3 adjointMap_(const Vector3 &xi) { return adjointMap(xi);}
|
||||
static Vector3 adjoint_(const Vector3 &xi, const Vector3 &y) { return adjoint(xi, y);}
|
||||
|
||||
/**
|
||||
* wedge for SE(2):
|
||||
* @param xi 3-dim twist (v,omega) where
|
||||
|
|
|
@ -160,6 +160,10 @@ public:
|
|||
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y,
|
||||
OptionalJacobian<6, 6> = boost::none);
|
||||
|
||||
// temporary fix for wrappers until case issue is resolved
|
||||
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
|
||||
static Vector6 adjoint_(const Vector6 &xi, const Vector6 &y) { return adjoint(xi, y);}
|
||||
|
||||
/**
|
||||
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
||||
*/
|
||||
|
|
|
@ -31,7 +31,6 @@ namespace so3 {
|
|||
void ExpmapFunctor::init(bool nearZeroApprox) {
|
||||
nearZero = nearZeroApprox || (theta2 <= std::numeric_limits<double>::epsilon());
|
||||
if (!nearZero) {
|
||||
theta = std::sqrt(theta2); // rotation angle
|
||||
sin_theta = std::sin(theta);
|
||||
const double s2 = std::sin(theta / 2.0);
|
||||
one_minus_cos = 2.0 * s2 * s2; // numerically better than [1 - cos(theta)]
|
||||
|
@ -39,7 +38,7 @@ void ExpmapFunctor::init(bool nearZeroApprox) {
|
|||
}
|
||||
|
||||
ExpmapFunctor::ExpmapFunctor(const Vector3& omega, bool nearZeroApprox)
|
||||
: theta2(omega.dot(omega)) {
|
||||
: theta2(omega.dot(omega)), theta(std::sqrt(theta2)) {
|
||||
const double wx = omega.x(), wy = omega.y(), wz = omega.z();
|
||||
W << 0.0, -wz, +wy, +wz, 0.0, -wx, -wy, +wx, 0.0;
|
||||
init(nearZeroApprox);
|
||||
|
@ -50,7 +49,7 @@ ExpmapFunctor::ExpmapFunctor(const Vector3& omega, bool nearZeroApprox)
|
|||
}
|
||||
|
||||
ExpmapFunctor::ExpmapFunctor(const Vector3& axis, double angle, bool nearZeroApprox)
|
||||
: theta2(angle * angle) {
|
||||
: theta2(angle * angle), theta(angle) {
|
||||
const double ax = axis.x(), ay = axis.y(), az = axis.z();
|
||||
K << 0.0, -az, +ay, +az, 0.0, -ax, -ay, +ax, 0.0;
|
||||
W = K * angle;
|
||||
|
|
|
@ -103,6 +103,20 @@ Rot3 slow_but_correct_Rodrigues(const Vector& w) {
|
|||
return Rot3(R);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Rot3, AxisAngle)
|
||||
{
|
||||
Vector axis = Vector3(0., 1., 0.); // rotation around Y
|
||||
double angle = 3.14 / 4.0;
|
||||
Rot3 expected(0.707388, 0, 0.706825,
|
||||
0, 1, 0,
|
||||
-0.706825, 0, 0.707388);
|
||||
Rot3 actual = Rot3::AxisAngle(axis, angle);
|
||||
CHECK(assert_equal(expected,actual,1e-5));
|
||||
Rot3 actual2 = Rot3::AxisAngle(axis, angle-2*M_PI);
|
||||
CHECK(assert_equal(expected,actual2,1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Rot3, Rodrigues)
|
||||
{
|
||||
|
|
|
@ -82,6 +82,34 @@ TEST(SO3, ChartDerivatives) {
|
|||
CHECK_CHART_DERIVATIVES(R2, R1);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SO3, Expmap) {
|
||||
Vector axis = Vector3(0., 1., 0.); // rotation around Y
|
||||
double angle = 3.14 / 4.0;
|
||||
Matrix expected(3,3);
|
||||
expected << 0.707388, 0, 0.706825, 0, 1, 0, -0.706825, 0, 0.707388;
|
||||
|
||||
// axis angle version
|
||||
so3::ExpmapFunctor f1(axis, angle);
|
||||
SO3 actual1 = f1.expmap();
|
||||
CHECK(assert_equal(expected, actual1.matrix(), 1e-5));
|
||||
|
||||
// axis angle version, negative angle
|
||||
so3::ExpmapFunctor f2(axis, angle - 2*M_PI);
|
||||
SO3 actual2 = f2.expmap();
|
||||
CHECK(assert_equal(expected, actual2.matrix(), 1e-5));
|
||||
|
||||
// omega version
|
||||
so3::ExpmapFunctor f3(axis * angle);
|
||||
SO3 actual3 = f3.expmap();
|
||||
CHECK(assert_equal(expected, actual3.matrix(), 1e-5));
|
||||
|
||||
// omega version, negative angle
|
||||
so3::ExpmapFunctor f4(axis * (angle - 2*M_PI));
|
||||
SO3 actual4 = f4.expmap();
|
||||
CHECK(assert_equal(expected, actual4.matrix(), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace exmap_derivative {
|
||||
static const Vector3 w(0.1, 0.27, -0.2);
|
||||
|
|
|
@ -28,6 +28,9 @@
|
|||
#include <gtsam/inference/Key.h>
|
||||
|
||||
namespace gtsam {
|
||||
/// Define collection types:
|
||||
typedef FastVector<FactorIndex> FactorIndices;
|
||||
typedef FastSet<FactorIndex> FactorIndexSet;
|
||||
|
||||
/**
|
||||
* This is the base class for all factor types. It is templated on a KEY type,
|
||||
|
|
|
@ -25,7 +25,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
template<class FG>
|
||||
void VariableIndex::augment(const FG& factors,
|
||||
boost::optional<const FastVector<size_t>&> newFactorIndices) {
|
||||
boost::optional<const FactorIndices&> newFactorIndices) {
|
||||
gttic(VariableIndex_augment);
|
||||
|
||||
// Augment index for each factor
|
||||
|
|
|
@ -35,8 +35,8 @@ void VariableIndex::print(const string& str, const KeyFormatter& keyFormatter) c
|
|||
cout << "nEntries = " << nEntries() << ", nFactors = " << nFactors() << "\n";
|
||||
for(KeyMap::value_type key_factors: index_) {
|
||||
cout << "var " << keyFormatter(key_factors.first) << ":";
|
||||
for(const size_t factor: key_factors.second)
|
||||
cout << " " << factor;
|
||||
for(const auto index: key_factors.second)
|
||||
cout << " " << index;
|
||||
cout << "\n";
|
||||
}
|
||||
cout.flush();
|
||||
|
@ -48,8 +48,8 @@ void VariableIndex::outputMetisFormat(ostream& os) const {
|
|||
// run over variables, which will be hyper-edges.
|
||||
for(KeyMap::value_type key_factors: index_) {
|
||||
// every variable is a hyper-edge covering its factors
|
||||
for(const size_t factor: key_factors.second)
|
||||
os << (factor+1) << " "; // base 1
|
||||
for(const auto index: key_factors.second)
|
||||
os << (index+1) << " "; // base 1
|
||||
os << "\n";
|
||||
}
|
||||
os << flush;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/inference/Factor.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
#include <gtsam/base/FastMap.h>
|
||||
#include <gtsam/base/FastVector.h>
|
||||
|
@ -43,7 +44,7 @@ class GTSAM_EXPORT VariableIndex {
|
|||
public:
|
||||
|
||||
typedef boost::shared_ptr<VariableIndex> shared_ptr;
|
||||
typedef FastVector<size_t> Factors;
|
||||
typedef FactorIndices Factors;
|
||||
typedef Factors::iterator Factor_iterator;
|
||||
typedef Factors::const_iterator Factor_const_iterator;
|
||||
|
||||
|
@ -63,7 +64,7 @@ public:
|
|||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/** Default constructor, creates an empty VariableIndex */
|
||||
/// Default constructor, creates an empty VariableIndex
|
||||
VariableIndex() : nFactors_(0), nEntries_(0) {}
|
||||
|
||||
/**
|
||||
|
@ -77,19 +78,16 @@ public:
|
|||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/**
|
||||
* The number of variable entries. This is one greater than the variable
|
||||
* with the highest index.
|
||||
*/
|
||||
/// The number of variable entries. This is equal to the number of unique variable Keys.
|
||||
size_t size() const { return index_.size(); }
|
||||
|
||||
/** The number of factors in the original factor graph */
|
||||
/// The number of factors in the original factor graph
|
||||
size_t nFactors() const { return nFactors_; }
|
||||
|
||||
/** The number of nonzero blocks, i.e. the number of variable-factor entries */
|
||||
/// The number of nonzero blocks, i.e. the number of variable-factor entries
|
||||
size_t nEntries() const { return nEntries_; }
|
||||
|
||||
/** Access a list of factors by variable */
|
||||
/// Access a list of factors by variable
|
||||
const Factors& operator[](Key variable) const {
|
||||
KeyMap::const_iterator item = index_.find(variable);
|
||||
if(item == index_.end())
|
||||
|
@ -102,10 +100,10 @@ public:
|
|||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/** Test for equality (for unit tests and debug assertions). */
|
||||
/// Test for equality (for unit tests and debug assertions).
|
||||
bool equals(const VariableIndex& other, double tol=0.0) const;
|
||||
|
||||
/** Print the variable index (for unit tests and debugging). */
|
||||
/// Print the variable index (for unit tests and debugging).
|
||||
void print(const std::string& str = "VariableIndex: ",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
|
@ -125,7 +123,7 @@ public:
|
|||
* solving problems incrementally.
|
||||
*/
|
||||
template<class FG>
|
||||
void augment(const FG& factors, boost::optional<const FastVector<size_t>&> newFactorIndices = boost::none);
|
||||
void augment(const FG& factors, boost::optional<const FactorIndices&> newFactorIndices = boost::none);
|
||||
|
||||
/**
|
||||
* Remove entries corresponding to the specified factors. NOTE: We intentionally do not decrement
|
||||
|
@ -140,17 +138,17 @@ public:
|
|||
template<typename ITERATOR, class FG>
|
||||
void remove(ITERATOR firstFactor, ITERATOR lastFactor, const FG& factors);
|
||||
|
||||
/** Remove unused empty variables (in debug mode verifies they are empty). */
|
||||
/// Remove unused empty variables (in debug mode verifies they are empty).
|
||||
template<typename ITERATOR>
|
||||
void removeUnusedVariables(ITERATOR firstKey, ITERATOR lastKey);
|
||||
|
||||
/** Iterator to the first variable entry */
|
||||
/// Iterator to the first variable entry
|
||||
const_iterator begin() const { return index_.begin(); }
|
||||
|
||||
/** Iterator to the first variable entry */
|
||||
/// Iterator to the first variable entry
|
||||
const_iterator end() const { return index_.end(); }
|
||||
|
||||
/** Find the iterator for the requested variable entry */
|
||||
/// Find the iterator for the requested variable entry
|
||||
const_iterator find(Key key) const { return index_.find(key); }
|
||||
|
||||
protected:
|
||||
|
|
|
@ -138,23 +138,34 @@ namespace gtsam {
|
|||
//}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix, Vector> GaussianBayesNet::matrix() const {
|
||||
Ordering GaussianBayesNet::ordering() const {
|
||||
GaussianFactorGraph factorGraph(*this);
|
||||
KeySet keys = factorGraph.keys();
|
||||
auto keys = factorGraph.keys();
|
||||
// add frontal keys in order
|
||||
Ordering ordering;
|
||||
for (const sharedConditional& cg: *this)
|
||||
for (const sharedConditional& cg : *this)
|
||||
if (cg) {
|
||||
for (Key key: cg->frontals()) {
|
||||
for (Key key : cg->frontals()) {
|
||||
ordering.push_back(key);
|
||||
keys.erase(key);
|
||||
}
|
||||
}
|
||||
// add remaining keys in case Bayes net is incomplete
|
||||
for (Key key: keys)
|
||||
ordering.push_back(key);
|
||||
// return matrix and RHS
|
||||
return factorGraph.jacobian(ordering);
|
||||
for (Key key : keys) ordering.push_back(key);
|
||||
return ordering;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix, Vector> GaussianBayesNet::matrix(boost::optional<const Ordering&> ordering) const {
|
||||
if (ordering) {
|
||||
// Convert to a GaussianFactorGraph and use its machinery
|
||||
GaussianFactorGraph factorGraph(*this);
|
||||
return factorGraph.jacobian(ordering);
|
||||
} else {
|
||||
// recursively call with default ordering
|
||||
const auto defaultOrdering = this->ordering();
|
||||
return matrix(defaultOrdering);
|
||||
}
|
||||
}
|
||||
|
||||
///* ************************************************************************* */
|
||||
|
|
|
@ -74,6 +74,14 @@ namespace gtsam {
|
|||
/// Version of optimize for incomplete BayesNet, needs solution for missing variables
|
||||
VectorValues optimize(const VectorValues& solutionForMissing) const;
|
||||
|
||||
/**
|
||||
* Return ordering corresponding to a topological sort.
|
||||
* There are many topological sorts of a Bayes net. This one
|
||||
* corresponds to the one that makes 'matrix' below upper-triangular.
|
||||
* In case Bayes net is incomplete any non-frontal are added to the end.
|
||||
*/
|
||||
Ordering ordering() const;
|
||||
|
||||
///@}
|
||||
|
||||
///@name Linear Algebra
|
||||
|
@ -81,8 +89,10 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* Return (dense) upper-triangular matrix representation
|
||||
* Will return upper-triangular matrix only when using 'ordering' above.
|
||||
* In case Bayes net is incomplete zero columns are added to the end.
|
||||
*/
|
||||
std::pair<Matrix, Vector> matrix() const;
|
||||
std::pair<Matrix, Vector> matrix(boost::optional<const Ordering&> ordering = boost::none) const;
|
||||
|
||||
/**
|
||||
* Optimize along the gradient direction, with a closed-form computation to perform the line
|
||||
|
|
|
@ -134,7 +134,7 @@ namespace gtsam {
|
|||
VectorValues result;
|
||||
DenseIndex vectorPosition = 0;
|
||||
for (const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) {
|
||||
result.insert(*frontal, solution.segment(vectorPosition, getDim(frontal)));
|
||||
result.emplace(*frontal, solution.segment(vectorPosition, getDim(frontal)));
|
||||
vectorPosition += getDim(frontal);
|
||||
}
|
||||
|
||||
|
@ -162,7 +162,7 @@ namespace gtsam {
|
|||
VectorValues result;
|
||||
DenseIndex vectorPosition = 0;
|
||||
for (const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) {
|
||||
result.insert(*frontal, soln.segment(vectorPosition, getDim(frontal)));
|
||||
result.emplace(*frontal, soln.segment(vectorPosition, getDim(frontal)));
|
||||
vectorPosition += getDim(frontal);
|
||||
}
|
||||
|
||||
|
@ -172,13 +172,13 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
void GaussianConditional::solveTransposeInPlace(VectorValues& gy) const {
|
||||
Vector frontalVec = gy.vector(KeyVector(beginFrontals(), endFrontals()));
|
||||
frontalVec = gtsam::backSubstituteUpper(frontalVec, Matrix(get_R()));
|
||||
frontalVec = get_R().transpose().triangularView<Eigen::Lower>().solve(frontalVec);
|
||||
|
||||
// Check for indeterminant solution
|
||||
if (frontalVec.hasNaN()) throw IndeterminantLinearSystemException(this->keys().front());
|
||||
|
||||
for (const_iterator it = beginParents(); it!= endParents(); it++)
|
||||
gy[*it] += -1.0 * Matrix(getA(it)).transpose() * frontalVec;
|
||||
gy[*it].noalias() += -1.0 * getA(it).transpose() * frontalVec;
|
||||
|
||||
// Scale by sigmas
|
||||
if (model_)
|
||||
|
|
|
@ -61,7 +61,7 @@ namespace gtsam {
|
|||
for (GaussianFactor::const_iterator it = gf->begin(); it != gf->end(); it++) {
|
||||
map<Key,size_t>::iterator it2 = spec.find(*it);
|
||||
if ( it2 == spec.end() ) {
|
||||
spec.insert(make_pair(*it, gf->getDim(it)));
|
||||
spec.emplace(*it, gf->getDim(it));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ namespace gtsam {
|
|||
if (blocks.count(j))
|
||||
blocks[j] += Bj;
|
||||
else
|
||||
blocks.insert(make_pair(j,Bj));
|
||||
blocks.emplace(j,Bj);
|
||||
}
|
||||
}
|
||||
return blocks;
|
||||
|
|
|
@ -305,7 +305,7 @@ Matrix HessianFactor::information() const {
|
|||
VectorValues HessianFactor::hessianDiagonal() const {
|
||||
VectorValues d;
|
||||
for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) {
|
||||
d.insert(keys_[j], info_.diagonal(j));
|
||||
d.emplace(keys_[j], info_.diagonal(j));
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
@ -436,7 +436,7 @@ VectorValues HessianFactor::gradientAtZero() const {
|
|||
VectorValues g;
|
||||
size_t n = size();
|
||||
for (size_t j = 0; j < n; ++j)
|
||||
g.insert(keys_[j], -info_.aboveDiagonalBlock(j, n));
|
||||
g.emplace(keys_[j], -info_.aboveDiagonalBlock(j, n));
|
||||
return g;
|
||||
}
|
||||
|
||||
|
@ -513,7 +513,7 @@ VectorValues HessianFactor::solve() {
|
|||
std::size_t offset = 0;
|
||||
for (DenseIndex j = 0; j < (DenseIndex)n; ++j) {
|
||||
const DenseIndex dim = info_.getDim(j);
|
||||
delta.insert(keys_[j], solution.segment(offset, dim));
|
||||
delta.emplace(keys_[j], solution.segment(offset, dim));
|
||||
offset += dim;
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ void KeyInfo::initialize(const GaussianFactorGraph &fg) {
|
|||
for (size_t i = 0; i < n; ++i) {
|
||||
const size_t key = ordering_[i];
|
||||
const size_t dim = colspec.find(key)->second;
|
||||
insert(make_pair(key, KeyInfoEntry(i, dim, start)));
|
||||
this->emplace(key, KeyInfoEntry(i, dim, start));
|
||||
start += dim;
|
||||
}
|
||||
numCols_ = start;
|
||||
|
@ -126,8 +126,8 @@ void KeyInfo::initialize(const GaussianFactorGraph &fg) {
|
|||
/****************************************************************************/
|
||||
vector<size_t> KeyInfo::colSpec() const {
|
||||
std::vector<size_t> result(size(), 0);
|
||||
for ( const KeyInfo::value_type &item: *this ) {
|
||||
result[item.second.index()] = item.second.dim();
|
||||
for ( const auto &item: *this ) {
|
||||
result[item.second.index] = item.second.dim;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -135,8 +135,8 @@ vector<size_t> KeyInfo::colSpec() const {
|
|||
/****************************************************************************/
|
||||
VectorValues KeyInfo::x0() const {
|
||||
VectorValues result;
|
||||
for ( const KeyInfo::value_type &item: *this ) {
|
||||
result.insert(item.first, Vector::Zero(item.second.dim()));
|
||||
for ( const auto &item: *this ) {
|
||||
result.emplace(item.first, Vector::Zero(item.second.dim));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -32,8 +32,8 @@
|
|||
namespace gtsam {
|
||||
|
||||
// Forward declarations
|
||||
struct KeyInfoEntry;
|
||||
class KeyInfo;
|
||||
class KeyInfoEntry;
|
||||
class GaussianFactorGraph;
|
||||
class Values;
|
||||
class VectorValues;
|
||||
|
@ -109,27 +109,14 @@ public:
|
|||
|
||||
/**
|
||||
* Handy data structure for iterative solvers
|
||||
* key to (index, dimension, colstart)
|
||||
* key to (index, dimension, start)
|
||||
*/
|
||||
class GTSAM_EXPORT KeyInfoEntry: public boost::tuple<Key, size_t, Key> {
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::tuple<Key, size_t, Key> Base;
|
||||
|
||||
struct GTSAM_EXPORT KeyInfoEntry {
|
||||
size_t index, dim, start;
|
||||
KeyInfoEntry() {
|
||||
}
|
||||
KeyInfoEntry(size_t idx, size_t d, Key start) :
|
||||
Base(idx, d, start) {
|
||||
}
|
||||
size_t index() const {
|
||||
return this->get<0>();
|
||||
}
|
||||
size_t dim() const {
|
||||
return this->get<1>();
|
||||
}
|
||||
size_t colstart() const {
|
||||
return this->get<2>();
|
||||
index(idx), dim(d), start(start) {
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -102,10 +102,9 @@ JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const HessianFactor& factor) :
|
||||
Base(factor), Ab_(
|
||||
VerticalBlockMatrix::LikeActiveViewOf(factor.info(),
|
||||
factor.rows())) {
|
||||
JacobianFactor::JacobianFactor(const HessianFactor& factor)
|
||||
: Base(factor),
|
||||
Ab_(VerticalBlockMatrix::LikeActiveViewOf(factor.info(), factor.rows())) {
|
||||
// Copy Hessian into our matrix and then do in-place Cholesky
|
||||
Ab_.full() = factor.info().selfadjointView();
|
||||
|
||||
|
@ -114,16 +113,19 @@ JacobianFactor::JacobianFactor(const HessianFactor& factor) :
|
|||
bool success;
|
||||
boost::tie(maxrank, success) = choleskyCareful(Ab_.matrix());
|
||||
|
||||
// Check for indefinite system
|
||||
if (!success)
|
||||
// Check that Cholesky succeeded OR it managed to factor the full Hessian.
|
||||
// THe latter case occurs with non-positive definite matrices arising from QP.
|
||||
if (success || maxrank == factor.rows() - 1) {
|
||||
// Zero out lower triangle
|
||||
Ab_.matrix().topRows(maxrank).triangularView<Eigen::StrictlyLower>() =
|
||||
Matrix::Zero(maxrank, Ab_.matrix().cols());
|
||||
// FIXME: replace with triangular system
|
||||
Ab_.rowEnd() = maxrank;
|
||||
model_ = SharedDiagonal(); // is equivalent to Unit::Create(maxrank)
|
||||
} else {
|
||||
// indefinite system
|
||||
throw IndeterminantLinearSystemException(factor.keys().front());
|
||||
|
||||
// Zero out lower triangle
|
||||
Ab_.matrix().topRows(maxrank).triangularView<Eigen::StrictlyLower>() =
|
||||
Matrix::Zero(maxrank, Ab_.matrix().cols());
|
||||
// FIXME: replace with triangular system
|
||||
Ab_.rowEnd() = maxrank;
|
||||
model_ = SharedDiagonal(); // should be same as Unit::Create(maxrank);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -164,9 +166,10 @@ boost::tuple<FastVector<DenseIndex>, DenseIndex, DenseIndex> _countDims(
|
|||
n += vardim;
|
||||
} else {
|
||||
if(!(varDims[jointVarpos] == vardim)) {
|
||||
cout << "Factor " << sourceFactorI << " variable " << DefaultKeyFormatter(sourceFactor.keys()[sourceVarpos]) <<
|
||||
" has different dimensionality of " << vardim << " instead of " << varDims[jointVarpos] << endl;
|
||||
exit(1);
|
||||
std::stringstream ss;
|
||||
ss << "Factor " << sourceFactorI << " variable " << DefaultKeyFormatter(sourceFactor.keys()[sourceVarpos]) <<
|
||||
" has different dimensionality of " << vardim << " instead of " << varDims[jointVarpos];
|
||||
throw std::runtime_error(ss.str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -429,10 +432,9 @@ Vector JacobianFactor::unweighted_error(const VectorValues& c) const {
|
|||
|
||||
/* ************************************************************************* */
|
||||
Vector JacobianFactor::error_vector(const VectorValues& c) const {
|
||||
if (model_)
|
||||
return model_->whiten(unweighted_error(c));
|
||||
else
|
||||
return unweighted_error(c);
|
||||
Vector e = unweighted_error(c);
|
||||
if (model_) model_->whitenInPlace(e);
|
||||
return e;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -473,10 +475,10 @@ VectorValues JacobianFactor::hessianDiagonal() const {
|
|||
for (size_t k = 0; k < nj; ++k) {
|
||||
Vector column_k = Ab_(pos).col(k);
|
||||
if (model_)
|
||||
column_k = model_->whiten(column_k);
|
||||
model_->whitenInPlace(column_k);
|
||||
dj(k) = dot(column_k, column_k);
|
||||
}
|
||||
d.insert(j, dj);
|
||||
d.emplace(j, dj);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
@ -495,7 +497,7 @@ map<Key, Matrix> JacobianFactor::hessianBlockDiagonal() const {
|
|||
Matrix Aj = Ab_(pos);
|
||||
if (model_)
|
||||
Aj = model_->Whiten(Aj);
|
||||
blocks.insert(make_pair(j, Aj.transpose() * Aj));
|
||||
blocks.emplace(j, Aj.transpose() * Aj);
|
||||
}
|
||||
return blocks;
|
||||
}
|
||||
|
@ -540,29 +542,38 @@ void JacobianFactor::updateHessian(const KeyVector& infoKeys,
|
|||
|
||||
/* ************************************************************************* */
|
||||
Vector JacobianFactor::operator*(const VectorValues& x) const {
|
||||
Vector Ax = Vector::Zero(Ab_.rows());
|
||||
Vector Ax(Ab_.rows());
|
||||
Ax.setZero();
|
||||
if (empty())
|
||||
return Ax;
|
||||
|
||||
// Just iterate over all A matrices and multiply in correct config part
|
||||
for (size_t pos = 0; pos < size(); ++pos)
|
||||
Ax += Ab_(pos) * x[keys_[pos]];
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
// http://eigen.tuxfamily.org/dox/TopicWritingEfficientProductExpression.html
|
||||
Ax.noalias() += Ab_(pos) * x[keys_[pos]];
|
||||
}
|
||||
|
||||
return model_ ? model_->whiten(Ax) : Ax;
|
||||
if (model_) model_->whitenInPlace(Ax);
|
||||
return Ax;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e,
|
||||
VectorValues& x) const {
|
||||
Vector E = alpha * (model_ ? model_->whiten(e) : e);
|
||||
VectorValues& x) const {
|
||||
Vector E(e.size());
|
||||
E.noalias() = alpha * e;
|
||||
if (model_) model_->whitenInPlace(E);
|
||||
// Just iterate over all A matrices and insert Ai^e into VectorValues
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
// Create the value as a zero vector if it does not exist.
|
||||
pair<VectorValues::iterator, bool> xi = x.tryInsert(j, Vector());
|
||||
if (xi.second)
|
||||
xi.first->second = Vector::Zero(getDim(begin() + pos));
|
||||
xi.first->second += Ab_(pos).transpose()*E;
|
||||
const Key j = keys_[pos];
|
||||
// To avoid another malloc if key exists, we explicitly check
|
||||
auto it = x.find(j);
|
||||
if (it != x.end()) {
|
||||
// http://eigen.tuxfamily.org/dox/TopicWritingEfficientProductExpression.html
|
||||
it->second.noalias() += Ab_(pos).transpose() * E;
|
||||
} else {
|
||||
x.emplace(j, Ab_(pos).transpose() * E);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -624,8 +635,8 @@ VectorValues JacobianFactor::gradientAtZero() const {
|
|||
Vector b = getb();
|
||||
// Gradient is really -A'*b / sigma^2
|
||||
// transposeMultiplyAdd will divide by sigma once, so we need one more
|
||||
Vector b_sigma = model_ ? model_->whiten(b) : b;
|
||||
this->transposeMultiplyAdd(-1.0, b_sigma, g); // g -= A'*b/sigma^2
|
||||
if (model_) model_->whitenInPlace(b);
|
||||
this->transposeMultiplyAdd(-1.0, b, g); // g -= A'*b/sigma^2
|
||||
return g;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ void GaussianFactorGraphSystem::multiply(const Vector &x, Vector& AtAx) const {
|
|||
VectorValues vvX = buildVectorValues(x, keyInfo_);
|
||||
|
||||
// VectorValues form of A'Ax for multiplyHessianAdd
|
||||
VectorValues vvAtAx;
|
||||
VectorValues vvAtAx = keyInfo_.x0(); // crucial for performance
|
||||
|
||||
// vvAtAx += 1.0 * A'Ax for each factor
|
||||
gfg_.multiplyHessianAdd(1.0, vvX, vvAtAx);
|
||||
|
@ -132,14 +132,14 @@ VectorValues buildVectorValues(const Vector &v, const Ordering &ordering,
|
|||
|
||||
DenseIndex offset = 0;
|
||||
for (size_t i = 0; i < ordering.size(); ++i) {
|
||||
const Key &key = ordering[i];
|
||||
const Key key = ordering[i];
|
||||
map<Key, size_t>::const_iterator it = dimensions.find(key);
|
||||
if (it == dimensions.end()) {
|
||||
throw invalid_argument(
|
||||
"buildVectorValues: inconsistent ordering and dimensions");
|
||||
}
|
||||
const size_t dim = it->second;
|
||||
result.insert(key, v.segment(offset, dim));
|
||||
result.emplace(key, v.segment(offset, dim));
|
||||
offset += dim;
|
||||
}
|
||||
|
||||
|
@ -150,8 +150,7 @@ VectorValues buildVectorValues(const Vector &v, const Ordering &ordering,
|
|||
VectorValues buildVectorValues(const Vector &v, const KeyInfo &keyInfo) {
|
||||
VectorValues result;
|
||||
for ( const KeyInfo::value_type &item: keyInfo ) {
|
||||
result.insert(item.first,
|
||||
v.segment(item.second.colstart(), item.second.dim()));
|
||||
result.emplace(item.first, v.segment(item.second.start, item.second.dim));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -70,21 +70,20 @@ public:
|
|||
Preconditioner() {}
|
||||
virtual ~Preconditioner() {}
|
||||
|
||||
/* Computation Interfaces */
|
||||
/*
|
||||
* Abstract interface for raw vectors. VectorValues is a speed bottleneck
|
||||
* and Yong-Dian has profiled preconditioners (outside GTSAM) with the the
|
||||
* three methods below. In GTSAM, unfortunately, we are still using the
|
||||
* VectorValues methods called in iterative-inl.h
|
||||
*/
|
||||
|
||||
/* implement x = L^{-1} y */
|
||||
/// implement x = L^{-1} y
|
||||
virtual void solve(const Vector& y, Vector &x) const = 0;
|
||||
// virtual void solve(const VectorValues& y, VectorValues &x) const = 0;
|
||||
|
||||
/* implement x = L^{-T} y */
|
||||
/// implement x = L^{-T} y
|
||||
virtual void transposeSolve(const Vector& y, Vector& x) const = 0;
|
||||
// virtual void transposeSolve(const VectorValues& y, VectorValues &x) const = 0;
|
||||
|
||||
// /* implement x = L^{-1} L^{-T} y */
|
||||
// virtual void fullSolve(const Vector& y, Vector &x) const = 0;
|
||||
// virtual void fullSolve(const VectorValues& y, VectorValues &x) const = 0;
|
||||
|
||||
/* build/factorize the preconditioner */
|
||||
/// build/factorize the preconditioner
|
||||
virtual void build(
|
||||
const GaussianFactorGraph &gfg,
|
||||
const KeyInfo &info,
|
||||
|
@ -113,14 +112,7 @@ public:
|
|||
|
||||
/* Computation Interfaces for raw vector */
|
||||
virtual void solve(const Vector& y, Vector &x) const { x = y; }
|
||||
// virtual void solve(const VectorValues& y, VectorValues& x) const { x = y; }
|
||||
|
||||
virtual void transposeSolve(const Vector& y, Vector& x) const { x = y; }
|
||||
// virtual void transposeSolve(const VectorValues& y, VectorValues& x) const { x = y; }
|
||||
|
||||
// virtual void fullSolve(const Vector& y, Vector &x) const { x = y; }
|
||||
// virtual void fullSolve(const VectorValues& y, VectorValues& x) const { x = y; }
|
||||
|
||||
virtual void build(
|
||||
const GaussianFactorGraph &gfg,
|
||||
const KeyInfo &info,
|
||||
|
@ -145,8 +137,6 @@ public:
|
|||
/* Computation Interfaces for raw vector */
|
||||
virtual void solve(const Vector& y, Vector &x) const;
|
||||
virtual void transposeSolve(const Vector& y, Vector& x) const ;
|
||||
// virtual void fullSolve(const Vector& y, Vector &x) const ;
|
||||
|
||||
virtual void build(
|
||||
const GaussianFactorGraph &gfg,
|
||||
const KeyInfo &info,
|
||||
|
|
|
@ -0,0 +1,545 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SubgraphBuilder.cpp
|
||||
* @date Dec 31, 2009
|
||||
* @author Frank Dellaert, Yong-Dian Jian
|
||||
*/
|
||||
|
||||
#include <gtsam/base/DSFVector.h>
|
||||
#include <gtsam/base/FastMap.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/inference/VariableIndex.h>
|
||||
#include <gtsam/linear/Errors.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/SubgraphBuilder.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/archive/text_iarchive.hpp>
|
||||
#include <boost/archive/text_oarchive.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <numeric> // accumulate
|
||||
#include <queue>
|
||||
#include <set>
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
using std::ostream;
|
||||
using std::vector;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/*****************************************************************************/
|
||||
/* sort the container and return permutation index with default comparator */
|
||||
template <typename Container>
|
||||
static vector<size_t> sort_idx(const Container &src) {
|
||||
typedef typename Container::value_type T;
|
||||
const size_t n = src.size();
|
||||
vector<std::pair<size_t, T> > tmp;
|
||||
tmp.reserve(n);
|
||||
for (size_t i = 0; i < n; i++) tmp.emplace_back(i, src[i]);
|
||||
|
||||
/* sort */
|
||||
std::stable_sort(tmp.begin(), tmp.end());
|
||||
|
||||
/* copy back */
|
||||
vector<size_t> idx;
|
||||
idx.reserve(n);
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
idx.push_back(tmp[i].first);
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
static vector<size_t> iidSampler(const vector<double> &weight, const size_t n) {
|
||||
/* compute the sum of the weights */
|
||||
const double sum = std::accumulate(weight.begin(), weight.end(), 0.0);
|
||||
if (sum == 0.0) {
|
||||
throw std::runtime_error("Weight vector has no non-zero weights");
|
||||
}
|
||||
|
||||
/* make a normalized and accumulated version of the weight vector */
|
||||
const size_t m = weight.size();
|
||||
vector<double> cdf;
|
||||
cdf.reserve(m);
|
||||
for (size_t i = 0; i < m; ++i) {
|
||||
cdf.push_back(weight[i] / sum);
|
||||
}
|
||||
|
||||
vector<double> acc(m);
|
||||
std::partial_sum(cdf.begin(), cdf.end(), acc.begin());
|
||||
|
||||
/* iid sample n times */
|
||||
vector<size_t> samples;
|
||||
samples.reserve(n);
|
||||
const double denominator = (double)RAND_MAX;
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
const double value = rand() / denominator;
|
||||
/* binary search the interval containing "value" */
|
||||
const auto it = std::lower_bound(acc.begin(), acc.end(), value);
|
||||
const size_t index = it - acc.begin();
|
||||
samples.push_back(index);
|
||||
}
|
||||
return samples;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
static vector<size_t> UniqueSampler(const vector<double> &weight,
|
||||
const size_t n) {
|
||||
const size_t m = weight.size();
|
||||
if (n > m) throw std::invalid_argument("UniqueSampler: invalid input size");
|
||||
|
||||
vector<size_t> samples;
|
||||
|
||||
size_t count = 0;
|
||||
vector<bool> touched(m, false);
|
||||
while (count < n) {
|
||||
vector<size_t> localIndices;
|
||||
localIndices.reserve(n - count);
|
||||
vector<double> localWeights;
|
||||
localWeights.reserve(n - count);
|
||||
|
||||
/* collect data */
|
||||
for (size_t i = 0; i < m; ++i) {
|
||||
if (!touched[i]) {
|
||||
localIndices.push_back(i);
|
||||
localWeights.push_back(weight[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* sampling and cache results */
|
||||
vector<size_t> samples = iidSampler(localWeights, n - count);
|
||||
for (const size_t &index : samples) {
|
||||
if (touched[index] == false) {
|
||||
touched[index] = true;
|
||||
samples.push_back(index);
|
||||
if (++count >= n) break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return samples;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
Subgraph::Subgraph(const vector<size_t> &indices) {
|
||||
edges_.reserve(indices.size());
|
||||
for (const size_t &index : indices) {
|
||||
const Edge e {index,1.0};
|
||||
edges_.push_back(e);
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
vector<size_t> Subgraph::edgeIndices() const {
|
||||
vector<size_t> eid;
|
||||
eid.reserve(size());
|
||||
for (const Edge &edge : edges_) {
|
||||
eid.push_back(edge.index);
|
||||
}
|
||||
return eid;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
void Subgraph::save(const std::string &fn) const {
|
||||
std::ofstream os(fn.c_str());
|
||||
boost::archive::text_oarchive oa(os);
|
||||
oa << *this;
|
||||
os.close();
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
Subgraph Subgraph::load(const std::string &fn) {
|
||||
std::ifstream is(fn.c_str());
|
||||
boost::archive::text_iarchive ia(is);
|
||||
Subgraph subgraph;
|
||||
ia >> subgraph;
|
||||
is.close();
|
||||
return subgraph;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
ostream &operator<<(ostream &os, const Subgraph::Edge &edge) {
|
||||
if (edge.weight != 1.0)
|
||||
os << edge.index << "(" << std::setprecision(2) << edge.weight << ")";
|
||||
else
|
||||
os << edge.index;
|
||||
return os;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
ostream &operator<<(ostream &os, const Subgraph &subgraph) {
|
||||
os << "Subgraph" << endl;
|
||||
for (const auto &e : subgraph.edges()) {
|
||||
os << e << ", ";
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
void SubgraphBuilderParameters::print() const { print(cout); }
|
||||
|
||||
/***************************************************************************************/
|
||||
void SubgraphBuilderParameters::print(ostream &os) const {
|
||||
os << "SubgraphBuilderParameters" << endl
|
||||
<< "skeleton: " << skeletonTranslator(skeletonType) << endl
|
||||
<< "skeleton weight: " << skeletonWeightTranslator(skeletonWeight)
|
||||
<< endl
|
||||
<< "augmentation weight: "
|
||||
<< augmentationWeightTranslator(augmentationWeight) << endl;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
ostream &operator<<(ostream &os, const SubgraphBuilderParameters &p) {
|
||||
p.print(os);
|
||||
return os;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
SubgraphBuilderParameters::Skeleton
|
||||
SubgraphBuilderParameters::skeletonTranslator(const std::string &src) {
|
||||
std::string s = src;
|
||||
boost::algorithm::to_upper(s);
|
||||
if (s == "NATURALCHAIN")
|
||||
return NATURALCHAIN;
|
||||
else if (s == "BFS")
|
||||
return BFS;
|
||||
else if (s == "KRUSKAL")
|
||||
return KRUSKAL;
|
||||
throw std::invalid_argument(
|
||||
"SubgraphBuilderParameters::skeletonTranslator undefined string " + s);
|
||||
return KRUSKAL;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::string SubgraphBuilderParameters::skeletonTranslator(Skeleton s) {
|
||||
if (s == NATURALCHAIN)
|
||||
return "NATURALCHAIN";
|
||||
else if (s == BFS)
|
||||
return "BFS";
|
||||
else if (s == KRUSKAL)
|
||||
return "KRUSKAL";
|
||||
else
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
SubgraphBuilderParameters::SkeletonWeight
|
||||
SubgraphBuilderParameters::skeletonWeightTranslator(const std::string &src) {
|
||||
std::string s = src;
|
||||
boost::algorithm::to_upper(s);
|
||||
if (s == "EQUAL")
|
||||
return EQUAL;
|
||||
else if (s == "RHS")
|
||||
return RHS_2NORM;
|
||||
else if (s == "LHS")
|
||||
return LHS_FNORM;
|
||||
else if (s == "RANDOM")
|
||||
return RANDOM;
|
||||
throw std::invalid_argument(
|
||||
"SubgraphBuilderParameters::skeletonWeightTranslator undefined string " +
|
||||
s);
|
||||
return EQUAL;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::string SubgraphBuilderParameters::skeletonWeightTranslator(
|
||||
SkeletonWeight w) {
|
||||
if (w == EQUAL)
|
||||
return "EQUAL";
|
||||
else if (w == RHS_2NORM)
|
||||
return "RHS";
|
||||
else if (w == LHS_FNORM)
|
||||
return "LHS";
|
||||
else if (w == RANDOM)
|
||||
return "RANDOM";
|
||||
else
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
SubgraphBuilderParameters::AugmentationWeight
|
||||
SubgraphBuilderParameters::augmentationWeightTranslator(
|
||||
const std::string &src) {
|
||||
std::string s = src;
|
||||
boost::algorithm::to_upper(s);
|
||||
if (s == "SKELETON") return SKELETON;
|
||||
// else if (s == "STRETCH") return STRETCH;
|
||||
// else if (s == "GENERALIZED_STRETCH") return GENERALIZED_STRETCH;
|
||||
throw std::invalid_argument(
|
||||
"SubgraphBuilder::Parameters::augmentationWeightTranslator undefined "
|
||||
"string " +
|
||||
s);
|
||||
return SKELETON;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::string SubgraphBuilderParameters::augmentationWeightTranslator(
|
||||
AugmentationWeight w) {
|
||||
if (w == SKELETON) return "SKELETON";
|
||||
// else if ( w == STRETCH ) return "STRETCH";
|
||||
// else if ( w == GENERALIZED_STRETCH ) return "GENERALIZED_STRETCH";
|
||||
else
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
vector<size_t> SubgraphBuilder::buildTree(const GaussianFactorGraph &gfg,
|
||||
const FastMap<Key, size_t> &ordering,
|
||||
const vector<double> &weights) const {
|
||||
const SubgraphBuilderParameters &p = parameters_;
|
||||
switch (p.skeletonType) {
|
||||
case SubgraphBuilderParameters::NATURALCHAIN:
|
||||
return natural_chain(gfg);
|
||||
break;
|
||||
case SubgraphBuilderParameters::BFS:
|
||||
return bfs(gfg);
|
||||
break;
|
||||
case SubgraphBuilderParameters::KRUSKAL:
|
||||
return kruskal(gfg, ordering, weights);
|
||||
break;
|
||||
default:
|
||||
std::cerr << "SubgraphBuilder::buildTree undefined skeleton type" << endl;
|
||||
break;
|
||||
}
|
||||
return vector<size_t>();
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
vector<size_t> SubgraphBuilder::unary(const GaussianFactorGraph &gfg) const {
|
||||
vector<size_t> unaryFactorIndices;
|
||||
size_t index = 0;
|
||||
for (const auto &factor : gfg) {
|
||||
if (factor->size() == 1) {
|
||||
unaryFactorIndices.push_back(index);
|
||||
}
|
||||
index++;
|
||||
}
|
||||
return unaryFactorIndices;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
vector<size_t> SubgraphBuilder::natural_chain(
|
||||
const GaussianFactorGraph &gfg) const {
|
||||
vector<size_t> chainFactorIndices;
|
||||
size_t index = 0;
|
||||
for (const GaussianFactor::shared_ptr &gf : gfg) {
|
||||
if (gf->size() == 2) {
|
||||
const Key k0 = gf->keys()[0], k1 = gf->keys()[1];
|
||||
if ((k1 - k0) == 1 || (k0 - k1) == 1) chainFactorIndices.push_back(index);
|
||||
}
|
||||
index++;
|
||||
}
|
||||
return chainFactorIndices;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
vector<size_t> SubgraphBuilder::bfs(const GaussianFactorGraph &gfg) const {
|
||||
const VariableIndex variableIndex(gfg);
|
||||
/* start from the first key of the first factor */
|
||||
Key seed = gfg[0]->keys()[0];
|
||||
|
||||
const size_t n = variableIndex.size();
|
||||
|
||||
/* each vertex has self as the predecessor */
|
||||
vector<size_t> bfsFactorIndices;
|
||||
bfsFactorIndices.reserve(n - 1);
|
||||
|
||||
/* Initialize */
|
||||
std::queue<size_t> q;
|
||||
q.push(seed);
|
||||
|
||||
std::set<size_t> flags;
|
||||
flags.insert(seed);
|
||||
|
||||
/* traversal */
|
||||
while (!q.empty()) {
|
||||
const size_t head = q.front();
|
||||
q.pop();
|
||||
for (const size_t index : variableIndex[head]) {
|
||||
const GaussianFactor &gf = *gfg[index];
|
||||
for (const size_t key : gf.keys()) {
|
||||
if (flags.count(key) == 0) {
|
||||
q.push(key);
|
||||
flags.insert(key);
|
||||
bfsFactorIndices.push_back(index);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return bfsFactorIndices;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg,
|
||||
const FastMap<Key, size_t> &ordering,
|
||||
const vector<double> &weights) const {
|
||||
const VariableIndex variableIndex(gfg);
|
||||
const size_t n = variableIndex.size();
|
||||
const vector<size_t> sortedIndices = sort_idx(weights);
|
||||
|
||||
/* initialize buffer */
|
||||
vector<size_t> treeIndices;
|
||||
treeIndices.reserve(n - 1);
|
||||
|
||||
// container for acsendingly sorted edges
|
||||
DSFVector dsf(n);
|
||||
|
||||
size_t count = 0;
|
||||
double sum = 0.0;
|
||||
for (const size_t index : sortedIndices) {
|
||||
const GaussianFactor &gf = *gfg[index];
|
||||
const auto keys = gf.keys();
|
||||
if (keys.size() != 2) continue;
|
||||
const size_t u = ordering.find(keys[0])->second,
|
||||
v = ordering.find(keys[1])->second;
|
||||
if (dsf.find(u) != dsf.find(v)) {
|
||||
dsf.merge(u, v);
|
||||
treeIndices.push_back(index);
|
||||
sum += weights[index];
|
||||
if (++count == n - 1) break;
|
||||
}
|
||||
}
|
||||
return treeIndices;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
vector<size_t> SubgraphBuilder::sample(const vector<double> &weights,
|
||||
const size_t t) const {
|
||||
return UniqueSampler(weights, t);
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
Subgraph SubgraphBuilder::operator()(
|
||||
const GaussianFactorGraph &gfg) const {
|
||||
const auto &p = parameters_;
|
||||
const auto inverse_ordering = Ordering::Natural(gfg);
|
||||
const FastMap<Key, size_t> forward_ordering = inverse_ordering.invert();
|
||||
const size_t n = inverse_ordering.size(), m = gfg.size();
|
||||
|
||||
// Make sure the subgraph preconditioner does not include more than half of
|
||||
// the edges beyond the spanning tree, or we might as well solve the whole
|
||||
// thing.
|
||||
size_t numExtraEdges = n * p.augmentationFactor;
|
||||
const size_t numRemaining = m - (n - 1);
|
||||
numExtraEdges = std::min(numExtraEdges, numRemaining / 2);
|
||||
|
||||
// Calculate weights
|
||||
vector<double> weights = this->weights(gfg);
|
||||
|
||||
// Build spanning tree.
|
||||
const vector<size_t> tree = buildTree(gfg, forward_ordering, weights);
|
||||
if (tree.size() != n - 1) {
|
||||
throw std::runtime_error(
|
||||
"SubgraphBuilder::operator() failure: tree.size() != n-1");
|
||||
}
|
||||
|
||||
// Downweight the tree edges to zero.
|
||||
for (const size_t index : tree) {
|
||||
weights[index] = 0.0;
|
||||
}
|
||||
|
||||
/* decide how many edges to augment */
|
||||
vector<size_t> offTree = sample(weights, numExtraEdges);
|
||||
|
||||
vector<size_t> subgraph = unary(gfg);
|
||||
subgraph.insert(subgraph.end(), tree.begin(), tree.end());
|
||||
subgraph.insert(subgraph.end(), offTree.begin(), offTree.end());
|
||||
|
||||
return Subgraph(subgraph);
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
SubgraphBuilder::Weights SubgraphBuilder::weights(
|
||||
const GaussianFactorGraph &gfg) const {
|
||||
const size_t m = gfg.size();
|
||||
Weights weight;
|
||||
weight.reserve(m);
|
||||
|
||||
for (const GaussianFactor::shared_ptr &gf : gfg) {
|
||||
switch (parameters_.skeletonWeight) {
|
||||
case SubgraphBuilderParameters::EQUAL:
|
||||
weight.push_back(1.0);
|
||||
break;
|
||||
case SubgraphBuilderParameters::RHS_2NORM: {
|
||||
if (JacobianFactor::shared_ptr jf =
|
||||
boost::dynamic_pointer_cast<JacobianFactor>(gf)) {
|
||||
weight.push_back(jf->getb().norm());
|
||||
} else if (HessianFactor::shared_ptr hf =
|
||||
boost::dynamic_pointer_cast<HessianFactor>(gf)) {
|
||||
weight.push_back(hf->linearTerm().norm());
|
||||
}
|
||||
} break;
|
||||
case SubgraphBuilderParameters::LHS_FNORM: {
|
||||
if (JacobianFactor::shared_ptr jf =
|
||||
boost::dynamic_pointer_cast<JacobianFactor>(gf)) {
|
||||
weight.push_back(std::sqrt(jf->getA().squaredNorm()));
|
||||
} else if (HessianFactor::shared_ptr hf =
|
||||
boost::dynamic_pointer_cast<HessianFactor>(gf)) {
|
||||
weight.push_back(std::sqrt(hf->information().squaredNorm()));
|
||||
}
|
||||
} break;
|
||||
|
||||
case SubgraphBuilderParameters::RANDOM:
|
||||
weight.push_back(std::rand() % 100 + 1.0);
|
||||
break;
|
||||
|
||||
default:
|
||||
throw std::invalid_argument(
|
||||
"SubgraphBuilder::weights: undefined weight scheme ");
|
||||
break;
|
||||
}
|
||||
}
|
||||
return weight;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
GaussianFactorGraph::shared_ptr buildFactorSubgraph(
|
||||
const GaussianFactorGraph &gfg, const Subgraph &subgraph,
|
||||
const bool clone) {
|
||||
auto subgraphFactors = boost::make_shared<GaussianFactorGraph>();
|
||||
subgraphFactors->reserve(subgraph.size());
|
||||
for (const auto &e : subgraph) {
|
||||
const auto factor = gfg[e.index];
|
||||
subgraphFactors->push_back(clone ? factor->clone(): factor);
|
||||
}
|
||||
return subgraphFactors;
|
||||
}
|
||||
|
||||
/**************************************************************************************************/
|
||||
std::pair<GaussianFactorGraph::shared_ptr, GaussianFactorGraph::shared_ptr> //
|
||||
splitFactorGraph(const GaussianFactorGraph &factorGraph, const Subgraph &subgraph) {
|
||||
|
||||
// Get the subgraph by calling cheaper method
|
||||
auto subgraphFactors = buildFactorSubgraph(factorGraph, subgraph, false);
|
||||
|
||||
// Now, copy all factors then set subGraph factors to zero
|
||||
auto remaining = boost::make_shared<GaussianFactorGraph>(factorGraph);
|
||||
|
||||
for (const auto &e : subgraph) {
|
||||
remaining->remove(e.index);
|
||||
}
|
||||
|
||||
return std::make_pair(subgraphFactors, remaining);
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,182 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SubgraphBuilder.h
|
||||
* @date Dec 31, 2009
|
||||
* @author Frank Dellaert, Yong-Dian Jian
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/FastMap.h>
|
||||
#include <gtsam/base/types.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
class access;
|
||||
} /* namespace serialization */
|
||||
} /* namespace boost */
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// Forward declarations
|
||||
class GaussianFactorGraph;
|
||||
struct PreconditionerParameters;
|
||||
|
||||
/**************************************************************************/
|
||||
class GTSAM_EXPORT Subgraph {
|
||||
public:
|
||||
struct GTSAM_EXPORT Edge {
|
||||
size_t index; /* edge id */
|
||||
double weight; /* edge weight */
|
||||
inline bool isUnitWeight() const { return (weight == 1.0); }
|
||||
friend std::ostream &operator<<(std::ostream &os, const Edge &edge);
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive &ar, const unsigned int /*version*/) {
|
||||
ar &BOOST_SERIALIZATION_NVP(index);
|
||||
ar &BOOST_SERIALIZATION_NVP(weight);
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::vector<Edge> Edges;
|
||||
typedef std::vector<size_t> EdgeIndices;
|
||||
typedef Edges::iterator iterator;
|
||||
typedef Edges::const_iterator const_iterator;
|
||||
|
||||
protected:
|
||||
Edges edges_; /* index to the factors */
|
||||
|
||||
public:
|
||||
Subgraph() {}
|
||||
Subgraph(const Subgraph &subgraph) : edges_(subgraph.edges()) {}
|
||||
Subgraph(const Edges &edges) : edges_(edges) {}
|
||||
Subgraph(const std::vector<size_t> &indices);
|
||||
|
||||
inline const Edges &edges() const { return edges_; }
|
||||
inline size_t size() const { return edges_.size(); }
|
||||
EdgeIndices edgeIndices() const;
|
||||
|
||||
iterator begin() { return edges_.begin(); }
|
||||
const_iterator begin() const { return edges_.begin(); }
|
||||
iterator end() { return edges_.end(); }
|
||||
const_iterator end() const { return edges_.end(); }
|
||||
|
||||
void save(const std::string &fn) const;
|
||||
static Subgraph load(const std::string &fn);
|
||||
friend std::ostream &operator<<(std::ostream &os, const Subgraph &subgraph);
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive &ar, const unsigned int /*version*/) {
|
||||
ar &BOOST_SERIALIZATION_NVP(edges_);
|
||||
}
|
||||
};
|
||||
|
||||
/****************************************************************************/
|
||||
struct GTSAM_EXPORT SubgraphBuilderParameters {
|
||||
typedef boost::shared_ptr<SubgraphBuilderParameters> shared_ptr;
|
||||
|
||||
enum Skeleton {
|
||||
/* augmented tree */
|
||||
NATURALCHAIN = 0, /* natural ordering of the graph */
|
||||
BFS, /* breadth-first search tree */
|
||||
KRUSKAL, /* maximum weighted spanning tree */
|
||||
} skeletonType;
|
||||
|
||||
enum SkeletonWeight { /* how to weigh the graph edges */
|
||||
EQUAL = 0, /* every block edge has equal weight */
|
||||
RHS_2NORM, /* use the 2-norm of the rhs */
|
||||
LHS_FNORM, /* use the frobenius norm of the lhs */
|
||||
RANDOM, /* bounded random edge weight */
|
||||
} skeletonWeight;
|
||||
|
||||
enum AugmentationWeight { /* how to weigh the graph edges */
|
||||
SKELETON = 0, /* use the same weights in building
|
||||
the skeleton */
|
||||
// STRETCH, /* stretch in the
|
||||
// laplacian sense */ GENERALIZED_STRETCH /*
|
||||
// the generalized stretch defined in
|
||||
// jian2013iros */
|
||||
} augmentationWeight;
|
||||
|
||||
/// factor multiplied with n, yields number of extra edges.
|
||||
double augmentationFactor;
|
||||
|
||||
SubgraphBuilderParameters()
|
||||
: skeletonType(KRUSKAL),
|
||||
skeletonWeight(RANDOM),
|
||||
augmentationWeight(SKELETON),
|
||||
augmentationFactor(1.0) {}
|
||||
virtual ~SubgraphBuilderParameters() {}
|
||||
|
||||
/* for serialization */
|
||||
void print() const;
|
||||
virtual void print(std::ostream &os) const;
|
||||
friend std::ostream &operator<<(std::ostream &os,
|
||||
const PreconditionerParameters &p);
|
||||
|
||||
static Skeleton skeletonTranslator(const std::string &s);
|
||||
static std::string skeletonTranslator(Skeleton s);
|
||||
static SkeletonWeight skeletonWeightTranslator(const std::string &s);
|
||||
static std::string skeletonWeightTranslator(SkeletonWeight w);
|
||||
static AugmentationWeight augmentationWeightTranslator(const std::string &s);
|
||||
static std::string augmentationWeightTranslator(AugmentationWeight w);
|
||||
};
|
||||
|
||||
/*****************************************************************************/
|
||||
class GTSAM_EXPORT SubgraphBuilder {
|
||||
public:
|
||||
typedef SubgraphBuilder Base;
|
||||
typedef std::vector<double> Weights;
|
||||
|
||||
SubgraphBuilder(
|
||||
const SubgraphBuilderParameters &p = SubgraphBuilderParameters())
|
||||
: parameters_(p) {}
|
||||
virtual ~SubgraphBuilder() {}
|
||||
virtual Subgraph operator()(const GaussianFactorGraph &jfg) const;
|
||||
|
||||
private:
|
||||
std::vector<size_t> buildTree(const GaussianFactorGraph &gfg,
|
||||
const FastMap<Key, size_t> &ordering,
|
||||
const std::vector<double> &weights) const;
|
||||
std::vector<size_t> unary(const GaussianFactorGraph &gfg) const;
|
||||
std::vector<size_t> natural_chain(const GaussianFactorGraph &gfg) const;
|
||||
std::vector<size_t> bfs(const GaussianFactorGraph &gfg) const;
|
||||
std::vector<size_t> kruskal(const GaussianFactorGraph &gfg,
|
||||
const FastMap<Key, size_t> &ordering,
|
||||
const std::vector<double> &weights) const;
|
||||
std::vector<size_t> sample(const std::vector<double> &weights,
|
||||
const size_t t) const;
|
||||
Weights weights(const GaussianFactorGraph &gfg) const;
|
||||
SubgraphBuilderParameters parameters_;
|
||||
};
|
||||
|
||||
/** Select the factors in a factor graph according to the subgraph. */
|
||||
boost::shared_ptr<GaussianFactorGraph> buildFactorSubgraph(
|
||||
const GaussianFactorGraph &gfg, const Subgraph &subgraph, const bool clone);
|
||||
|
||||
/** Split the graph into a subgraph and the remaining edges.
|
||||
* Note that the remaining factorgraph has null factors. */
|
||||
std::pair<boost::shared_ptr<GaussianFactorGraph>, boost::shared_ptr<GaussianFactorGraph> >
|
||||
splitFactorGraph(const GaussianFactorGraph &factorGraph, const Subgraph &subgraph);
|
||||
|
||||
} // namespace gtsam
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -12,451 +12,85 @@
|
|||
/**
|
||||
* @file SubgraphPreconditioner.cpp
|
||||
* @date Dec 31, 2009
|
||||
* @author: Frank Dellaert
|
||||
* @author Frank Dellaert, Yong-Dian Jian
|
||||
*/
|
||||
|
||||
#include <gtsam/linear/SubgraphPreconditioner.h>
|
||||
#include <gtsam/linear/GaussianConditional.h>
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
|
||||
#include <gtsam/linear/SubgraphBuilder.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/GaussianBayesNet.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/GaussianEliminationTree.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/inference/VariableIndex.h>
|
||||
#include <gtsam/base/DSFVector.h>
|
||||
#include <gtsam/base/FastMap.h>
|
||||
#include <gtsam/base/FastVector.h>
|
||||
#include <gtsam/base/types.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/archive/text_iarchive.hpp>
|
||||
#include <boost/archive/text_oarchive.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
#include <boost/range/adaptor/reversed.hpp>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
#include <iterator>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <numeric> // accumulate
|
||||
#include <queue>
|
||||
#include <set>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
using std::vector;
|
||||
using std::ostream;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
static GaussianFactorGraph::shared_ptr convertToJacobianFactors(const GaussianFactorGraph &gfg) {
|
||||
GaussianFactorGraph::shared_ptr result(new GaussianFactorGraph());
|
||||
for(const GaussianFactor::shared_ptr &gf: gfg) {
|
||||
JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<JacobianFactor>(gf);
|
||||
if( !jf ) {
|
||||
jf = boost::make_shared<JacobianFactor>(*gf); // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky)
|
||||
}
|
||||
result->push_back(jf);
|
||||
static Vector getSubvector(const Vector &src, const KeyInfo &keyInfo,
|
||||
const KeyVector &keys) {
|
||||
/* a cache of starting index and dim */
|
||||
vector<std::pair<size_t, size_t> > cache;
|
||||
cache.reserve(3);
|
||||
|
||||
/* figure out dimension by traversing the keys */
|
||||
size_t dim = 0;
|
||||
for (const Key &key : keys) {
|
||||
const KeyInfoEntry &entry = keyInfo.find(key)->second;
|
||||
cache.emplace_back(entry.start, entry.dim);
|
||||
dim += entry.dim;
|
||||
}
|
||||
|
||||
/* use the cache to fill the result */
|
||||
Vector result(dim);
|
||||
size_t index = 0;
|
||||
for (const auto &p : cache) {
|
||||
result.segment(index, p.second) = src.segment(p.first, p.second);
|
||||
index += p.second;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
static std::vector<size_t> iidSampler(const vector<double> &weight, const size_t n) {
|
||||
|
||||
/* compute the sum of the weights */
|
||||
const double sum = std::accumulate(weight.begin(), weight.end(), 0.0);
|
||||
|
||||
/* make a normalized and accumulated version of the weight vector */
|
||||
const size_t m = weight.size();
|
||||
vector<double> w; w.reserve(m);
|
||||
for ( size_t i = 0 ; i < m ; ++i ) {
|
||||
w.push_back(weight[i]/sum);
|
||||
static void setSubvector(const Vector &src, const KeyInfo &keyInfo,
|
||||
const KeyVector &keys, Vector &dst) {
|
||||
size_t index = 0;
|
||||
for (const Key &key : keys) {
|
||||
const KeyInfoEntry &entry = keyInfo.find(key)->second;
|
||||
const size_t keyDim = entry.dim;
|
||||
dst.segment(entry.start, keyDim) = src.segment(index, keyDim);
|
||||
index += keyDim;
|
||||
}
|
||||
}
|
||||
|
||||
vector<double> acc(m);
|
||||
std::partial_sum(w.begin(),w.end(),acc.begin());
|
||||
|
||||
/* iid sample n times */
|
||||
vector<size_t> result; result.reserve(n);
|
||||
const double denominator = (double)RAND_MAX;
|
||||
for ( size_t i = 0 ; i < n ; ++i ) {
|
||||
const double value = rand() / denominator;
|
||||
/* binary search the interval containing "value" */
|
||||
vector<double>::iterator it = std::lower_bound(acc.begin(), acc.end(), value);
|
||||
size_t idx = it - acc.begin();
|
||||
result.push_back(idx);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
// Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with
|
||||
// Cholesky)
|
||||
static GaussianFactorGraph::shared_ptr convertToJacobianFactors(
|
||||
const GaussianFactorGraph &gfg) {
|
||||
auto result = boost::make_shared<GaussianFactorGraph>();
|
||||
for (const auto &factor : gfg)
|
||||
if (factor) {
|
||||
auto jf = boost::dynamic_pointer_cast<JacobianFactor>(factor);
|
||||
if (!jf) {
|
||||
jf = boost::make_shared<JacobianFactor>(*factor);
|
||||
}
|
||||
result->push_back(jf);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
vector<size_t> uniqueSampler(const vector<double> &weight, const size_t n) {
|
||||
|
||||
const size_t m = weight.size();
|
||||
if ( n > m ) throw std::invalid_argument("uniqueSampler: invalid input size");
|
||||
|
||||
vector<size_t> result;
|
||||
|
||||
size_t count = 0;
|
||||
std::vector<bool> touched(m, false);
|
||||
while ( count < n ) {
|
||||
std::vector<size_t> localIndices; localIndices.reserve(n-count);
|
||||
std::vector<double> localWeights; localWeights.reserve(n-count);
|
||||
|
||||
/* collect data */
|
||||
for ( size_t i = 0 ; i < m ; ++i ) {
|
||||
if ( !touched[i] ) {
|
||||
localIndices.push_back(i);
|
||||
localWeights.push_back(weight[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* sampling and cache results */
|
||||
vector<size_t> samples = iidSampler(localWeights, n-count);
|
||||
for ( const size_t &id: samples ) {
|
||||
if ( touched[id] == false ) {
|
||||
touched[id] = true ;
|
||||
result.push_back(id);
|
||||
if ( ++count >= n ) break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
Subgraph::Subgraph(const std::vector<size_t> &indices) {
|
||||
edges_.reserve(indices.size());
|
||||
for ( const size_t &idx: indices ) {
|
||||
edges_.push_back(SubgraphEdge(idx, 1.0));
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
std::vector<size_t> Subgraph::edgeIndices() const {
|
||||
std::vector<size_t> eid; eid.reserve(size());
|
||||
for ( const SubgraphEdge &edge: edges_ ) {
|
||||
eid.push_back(edge.index_);
|
||||
}
|
||||
return eid;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
void Subgraph::save(const std::string &fn) const {
|
||||
std::ofstream os(fn.c_str());
|
||||
boost::archive::text_oarchive oa(os);
|
||||
oa << *this;
|
||||
os.close();
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
Subgraph::shared_ptr Subgraph::load(const std::string &fn) {
|
||||
std::ifstream is(fn.c_str());
|
||||
boost::archive::text_iarchive ia(is);
|
||||
Subgraph::shared_ptr subgraph(new Subgraph());
|
||||
ia >> *subgraph;
|
||||
is.close();
|
||||
return subgraph;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
std::ostream &operator<<(std::ostream &os, const SubgraphEdge &edge) {
|
||||
if ( edge.weight() != 1.0 )
|
||||
os << edge.index() << "(" << std::setprecision(2) << edge.weight() << ")";
|
||||
else
|
||||
os << edge.index() ;
|
||||
return os;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
std::ostream &operator<<(std::ostream &os, const Subgraph &subgraph) {
|
||||
os << "Subgraph" << endl;
|
||||
for ( const SubgraphEdge &e: subgraph.edges() ) {
|
||||
os << e << ", " ;
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
void SubgraphBuilderParameters::print() const {
|
||||
print(cout);
|
||||
}
|
||||
|
||||
/***************************************************************************************/
|
||||
void SubgraphBuilderParameters::print(ostream &os) const {
|
||||
os << "SubgraphBuilderParameters" << endl
|
||||
<< "skeleton: " << skeletonTranslator(skeleton_) << endl
|
||||
<< "skeleton weight: " << skeletonWeightTranslator(skeletonWeight_) << endl
|
||||
<< "augmentation weight: " << augmentationWeightTranslator(augmentationWeight_) << endl
|
||||
;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
ostream& operator<<(ostream &os, const SubgraphBuilderParameters &p) {
|
||||
p.print(os);
|
||||
return os;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
SubgraphBuilderParameters::Skeleton SubgraphBuilderParameters::skeletonTranslator(const std::string &src){
|
||||
std::string s = src; boost::algorithm::to_upper(s);
|
||||
if (s == "NATURALCHAIN") return NATURALCHAIN;
|
||||
else if (s == "BFS") return BFS;
|
||||
else if (s == "KRUSKAL") return KRUSKAL;
|
||||
throw invalid_argument("SubgraphBuilderParameters::skeletonTranslator undefined string " + s);
|
||||
return KRUSKAL;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::string SubgraphBuilderParameters::skeletonTranslator(Skeleton w) {
|
||||
if ( w == NATURALCHAIN )return "NATURALCHAIN";
|
||||
else if ( w == BFS ) return "BFS";
|
||||
else if ( w == KRUSKAL )return "KRUSKAL";
|
||||
else return "UNKNOWN";
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
SubgraphBuilderParameters::SkeletonWeight SubgraphBuilderParameters::skeletonWeightTranslator(const std::string &src) {
|
||||
std::string s = src; boost::algorithm::to_upper(s);
|
||||
if (s == "EQUAL") return EQUAL;
|
||||
else if (s == "RHS") return RHS_2NORM;
|
||||
else if (s == "LHS") return LHS_FNORM;
|
||||
else if (s == "RANDOM") return RANDOM;
|
||||
throw invalid_argument("SubgraphBuilderParameters::skeletonWeightTranslator undefined string " + s);
|
||||
return EQUAL;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::string SubgraphBuilderParameters::skeletonWeightTranslator(SkeletonWeight w) {
|
||||
if ( w == EQUAL ) return "EQUAL";
|
||||
else if ( w == RHS_2NORM ) return "RHS";
|
||||
else if ( w == LHS_FNORM ) return "LHS";
|
||||
else if ( w == RANDOM ) return "RANDOM";
|
||||
else return "UNKNOWN";
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
SubgraphBuilderParameters::AugmentationWeight SubgraphBuilderParameters::augmentationWeightTranslator(const std::string &src) {
|
||||
std::string s = src; boost::algorithm::to_upper(s);
|
||||
if (s == "SKELETON") return SKELETON;
|
||||
// else if (s == "STRETCH") return STRETCH;
|
||||
// else if (s == "GENERALIZED_STRETCH") return GENERALIZED_STRETCH;
|
||||
throw invalid_argument("SubgraphBuilder::Parameters::augmentationWeightTranslator undefined string " + s);
|
||||
return SKELETON;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::string SubgraphBuilderParameters::augmentationWeightTranslator(AugmentationWeight w) {
|
||||
if ( w == SKELETON ) return "SKELETON";
|
||||
// else if ( w == STRETCH ) return "STRETCH";
|
||||
// else if ( w == GENERALIZED_STRETCH ) return "GENERALIZED_STRETCH";
|
||||
else return "UNKNOWN";
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::vector<size_t> SubgraphBuilder::buildTree(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &w) const {
|
||||
const SubgraphBuilderParameters &p = parameters_;
|
||||
switch (p.skeleton_) {
|
||||
case SubgraphBuilderParameters::NATURALCHAIN:
|
||||
return natural_chain(gfg);
|
||||
break;
|
||||
case SubgraphBuilderParameters::BFS:
|
||||
return bfs(gfg);
|
||||
break;
|
||||
case SubgraphBuilderParameters::KRUSKAL:
|
||||
return kruskal(gfg, ordering, w);
|
||||
break;
|
||||
default:
|
||||
cerr << "SubgraphBuilder::buildTree undefined skeleton type" << endl;
|
||||
break;
|
||||
}
|
||||
return vector<size_t>();
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::vector<size_t> SubgraphBuilder::unary(const GaussianFactorGraph &gfg) const {
|
||||
std::vector<size_t> result ;
|
||||
size_t idx = 0;
|
||||
for ( const GaussianFactor::shared_ptr &gf: gfg ) {
|
||||
if ( gf->size() == 1 ) {
|
||||
result.push_back(idx);
|
||||
}
|
||||
idx++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::vector<size_t> SubgraphBuilder::natural_chain(const GaussianFactorGraph &gfg) const {
|
||||
std::vector<size_t> result ;
|
||||
size_t idx = 0;
|
||||
for ( const GaussianFactor::shared_ptr &gf: gfg ) {
|
||||
if ( gf->size() == 2 ) {
|
||||
const Key k0 = gf->keys()[0], k1 = gf->keys()[1];
|
||||
if ( (k1-k0) == 1 || (k0-k1) == 1 )
|
||||
result.push_back(idx);
|
||||
}
|
||||
idx++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::vector<size_t> SubgraphBuilder::bfs(const GaussianFactorGraph &gfg) const {
|
||||
const VariableIndex variableIndex(gfg);
|
||||
/* start from the first key of the first factor */
|
||||
Key seed = gfg[0]->keys()[0];
|
||||
|
||||
const size_t n = variableIndex.size();
|
||||
|
||||
/* each vertex has self as the predecessor */
|
||||
std::vector<size_t> result;
|
||||
result.reserve(n-1);
|
||||
|
||||
/* Initialize */
|
||||
std::queue<size_t> q;
|
||||
q.push(seed);
|
||||
|
||||
std::set<size_t> flags;
|
||||
flags.insert(seed);
|
||||
|
||||
/* traversal */
|
||||
while ( !q.empty() ) {
|
||||
const size_t head = q.front(); q.pop();
|
||||
for ( const size_t id: variableIndex[head] ) {
|
||||
const GaussianFactor &gf = *gfg[id];
|
||||
for ( const size_t key: gf.keys() ) {
|
||||
if ( flags.count(key) == 0 ) {
|
||||
q.push(key);
|
||||
flags.insert(key);
|
||||
result.push_back(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &w) const {
|
||||
const VariableIndex variableIndex(gfg);
|
||||
const size_t n = variableIndex.size();
|
||||
const vector<size_t> idx = sort_idx(w) ;
|
||||
|
||||
/* initialize buffer */
|
||||
vector<size_t> result;
|
||||
result.reserve(n-1);
|
||||
|
||||
// container for acsendingly sorted edges
|
||||
DSFVector D(n) ;
|
||||
|
||||
size_t count = 0 ; double sum = 0.0 ;
|
||||
for (const size_t id: idx) {
|
||||
const GaussianFactor &gf = *gfg[id];
|
||||
if ( gf.keys().size() != 2 ) continue;
|
||||
const size_t u = ordering.find(gf.keys()[0])->second,
|
||||
u_root = D.find(u),
|
||||
v = ordering.find(gf.keys()[1])->second,
|
||||
v_root = D.find(v) ;
|
||||
if ( u_root != v_root ) {
|
||||
D.merge(u_root, v_root) ;
|
||||
result.push_back(id) ;
|
||||
sum += w[id] ;
|
||||
if ( ++count == n-1 ) break ;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
std::vector<size_t> SubgraphBuilder::sample(const std::vector<double> &weights, const size_t t) const {
|
||||
return uniqueSampler(weights, t);
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
Subgraph::shared_ptr SubgraphBuilder::operator() (const GaussianFactorGraph &gfg) const {
|
||||
|
||||
const SubgraphBuilderParameters &p = parameters_;
|
||||
const Ordering inverse_ordering = Ordering::Natural(gfg);
|
||||
const FastMap<Key, size_t> forward_ordering = inverse_ordering.invert();
|
||||
const size_t n = inverse_ordering.size(), t = n * p.complexity_ ;
|
||||
|
||||
vector<double> w = weights(gfg);
|
||||
const vector<size_t> tree = buildTree(gfg, forward_ordering, w);
|
||||
|
||||
/* sanity check */
|
||||
if ( tree.size() != n-1 ) {
|
||||
throw runtime_error("SubgraphBuilder::operator() tree.size() != n-1 failed ");
|
||||
}
|
||||
|
||||
/* down weight the tree edges to zero */
|
||||
for ( const size_t id: tree ) {
|
||||
w[id] = 0.0;
|
||||
}
|
||||
|
||||
/* decide how many edges to augment */
|
||||
std::vector<size_t> offTree = sample(w, t);
|
||||
|
||||
vector<size_t> subgraph = unary(gfg);
|
||||
subgraph.insert(subgraph.end(), tree.begin(), tree.end());
|
||||
subgraph.insert(subgraph.end(), offTree.begin(), offTree.end());
|
||||
|
||||
return boost::make_shared<Subgraph>(subgraph);
|
||||
}
|
||||
|
||||
/****************************************************************/
|
||||
SubgraphBuilder::Weights SubgraphBuilder::weights(const GaussianFactorGraph &gfg) const
|
||||
{
|
||||
const size_t m = gfg.size() ;
|
||||
Weights weight; weight.reserve(m);
|
||||
|
||||
for(const GaussianFactor::shared_ptr &gf: gfg ) {
|
||||
switch ( parameters_.skeletonWeight_ ) {
|
||||
case SubgraphBuilderParameters::EQUAL:
|
||||
weight.push_back(1.0);
|
||||
break;
|
||||
case SubgraphBuilderParameters::RHS_2NORM:
|
||||
{
|
||||
if ( JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<JacobianFactor>(gf) ) {
|
||||
weight.push_back(jf->getb().norm());
|
||||
}
|
||||
else if ( HessianFactor::shared_ptr hf = boost::dynamic_pointer_cast<HessianFactor>(gf) ) {
|
||||
weight.push_back(hf->linearTerm().norm());
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SubgraphBuilderParameters::LHS_FNORM:
|
||||
{
|
||||
if ( JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<JacobianFactor>(gf) ) {
|
||||
weight.push_back(std::sqrt(jf->getA().squaredNorm()));
|
||||
}
|
||||
else if ( HessianFactor::shared_ptr hf = boost::dynamic_pointer_cast<HessianFactor>(gf) ) {
|
||||
weight.push_back(std::sqrt(hf->information().squaredNorm()));
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case SubgraphBuilderParameters::RANDOM:
|
||||
weight.push_back(std::rand()%100 + 1.0);
|
||||
break;
|
||||
|
||||
default:
|
||||
throw invalid_argument("SubgraphBuilder::weights: undefined weight scheme ");
|
||||
break;
|
||||
}
|
||||
}
|
||||
return weight;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
SubgraphPreconditioner::SubgraphPreconditioner(const SubgraphPreconditionerParameters &p) :
|
||||
parameters_(p) {}
|
||||
|
@ -484,21 +118,20 @@ double SubgraphPreconditioner::error(const VectorValues& y) const {
|
|||
|
||||
/* ************************************************************************* */
|
||||
// gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar),
|
||||
VectorValues SubgraphPreconditioner::gradient(const VectorValues& y) const {
|
||||
VectorValues SubgraphPreconditioner::gradient(const VectorValues &y) const {
|
||||
VectorValues x = Rc1()->backSubstitute(y); /* inv(R1)*y */
|
||||
Errors e = (*Ab2()*x - *b2bar()); /* (A2*inv(R1)*y-b2bar) */
|
||||
Errors e = (*Ab2() * x - *b2bar()); /* (A2*inv(R1)*y-b2bar) */
|
||||
VectorValues v = VectorValues::Zero(x);
|
||||
Ab2()->transposeMultiplyAdd(1.0, e, v); /* A2'*(A2*inv(R1)*y-b2bar) */
|
||||
Ab2()->transposeMultiplyAdd(1.0, e, v); /* A2'*(A2*inv(R1)*y-b2bar) */
|
||||
return y + Rc1()->backSubstituteTranspose(v);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y]
|
||||
Errors SubgraphPreconditioner::operator*(const VectorValues& y) const {
|
||||
|
||||
Errors e(y);
|
||||
VectorValues x = Rc1()->backSubstitute(y); /* x=inv(R1)*y */
|
||||
Errors e2 = *Ab2() * x; /* A2*x */
|
||||
Errors e2 = *Ab2() * x; /* A2*x */
|
||||
e.splice(e.end(), e2);
|
||||
return e;
|
||||
}
|
||||
|
@ -508,8 +141,10 @@ Errors SubgraphPreconditioner::operator*(const VectorValues& y) const {
|
|||
void SubgraphPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const {
|
||||
|
||||
Errors::iterator ei = e.begin();
|
||||
for (size_t i = 0; i < y.size(); ++i, ++ei)
|
||||
*ei = y[i];
|
||||
for(const auto& key_value: y) {
|
||||
*ei = key_value.second;
|
||||
++ei;
|
||||
}
|
||||
|
||||
// Add A2 contribution
|
||||
VectorValues x = Rc1()->backSubstitute(y); // x=inv(R1)*y
|
||||
|
@ -522,8 +157,10 @@ VectorValues SubgraphPreconditioner::operator^(const Errors& e) const {
|
|||
|
||||
Errors::const_iterator it = e.begin();
|
||||
VectorValues y = zero();
|
||||
for (size_t i = 0; i < y.size(); ++i, ++it)
|
||||
y[i] = *it;
|
||||
for(auto& key_value: y) {
|
||||
key_value.second = *it;
|
||||
++it;
|
||||
}
|
||||
transposeMultiplyAdd2(1.0, it, e.end(), y);
|
||||
return y;
|
||||
}
|
||||
|
@ -534,9 +171,10 @@ void SubgraphPreconditioner::transposeMultiplyAdd
|
|||
(double alpha, const Errors& e, VectorValues& y) const {
|
||||
|
||||
Errors::const_iterator it = e.begin();
|
||||
for (size_t i = 0; i < y.size(); ++i, ++it) {
|
||||
for(auto& key_value: y) {
|
||||
const Vector& ei = *it;
|
||||
axpy(alpha, ei, y[i]);
|
||||
axpy(alpha, ei, key_value.second);
|
||||
++it;
|
||||
}
|
||||
transposeMultiplyAdd2(alpha, it, e.end(), y);
|
||||
}
|
||||
|
@ -563,47 +201,51 @@ void SubgraphPreconditioner::print(const std::string& s) const {
|
|||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
void SubgraphPreconditioner::solve(const Vector& y, Vector &x) const
|
||||
{
|
||||
/* copy first */
|
||||
std::copy(y.data(), y.data() + y.rows(), x.data());
|
||||
void SubgraphPreconditioner::solve(const Vector &y, Vector &x) const {
|
||||
assert(x.size() == y.size());
|
||||
|
||||
/* in place back substitute */
|
||||
for (auto cg: boost::adaptors::reverse(*Rc1_)) {
|
||||
/* back substitute */
|
||||
for (const auto &cg : boost::adaptors::reverse(*Rc1_)) {
|
||||
/* collect a subvector of x that consists of the parents of cg (S) */
|
||||
const Vector xParent = getSubvector(x, keyInfo_, KeyVector(cg->beginParents(), cg->endParents()));
|
||||
const Vector rhsFrontal = getSubvector(x, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals()));
|
||||
const KeyVector parentKeys(cg->beginParents(), cg->endParents());
|
||||
const KeyVector frontalKeys(cg->beginFrontals(), cg->endFrontals());
|
||||
const Vector xParent = getSubvector(x, keyInfo_, parentKeys);
|
||||
const Vector rhsFrontal = getSubvector(y, keyInfo_, frontalKeys);
|
||||
|
||||
/* compute the solution for the current pivot */
|
||||
const Vector solFrontal = cg->get_R().triangularView<Eigen::Upper>().solve(rhsFrontal - cg->get_S() * xParent);
|
||||
const Vector solFrontal = cg->get_R().triangularView<Eigen::Upper>().solve(
|
||||
rhsFrontal - cg->get_S() * xParent);
|
||||
|
||||
/* assign subvector of sol to the frontal variables */
|
||||
setSubvector(solFrontal, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals()), x);
|
||||
setSubvector(solFrontal, keyInfo_, frontalKeys, x);
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
void SubgraphPreconditioner::transposeSolve(const Vector& y, Vector& x) const
|
||||
{
|
||||
void SubgraphPreconditioner::transposeSolve(const Vector &y, Vector &x) const {
|
||||
/* copy first */
|
||||
assert(x.size() == y.size());
|
||||
std::copy(y.data(), y.data() + y.rows(), x.data());
|
||||
|
||||
/* in place back substitute */
|
||||
for(const boost::shared_ptr<GaussianConditional> & cg: *Rc1_) {
|
||||
const Vector rhsFrontal = getSubvector(x, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals()));
|
||||
// const Vector solFrontal = cg->get_R().triangularView<Eigen::Upper>().transpose().solve(rhsFrontal);
|
||||
const Vector solFrontal = cg->get_R().transpose().triangularView<Eigen::Lower>().solve(rhsFrontal);
|
||||
for (const auto &cg : *Rc1_) {
|
||||
const KeyVector frontalKeys(cg->beginFrontals(), cg->endFrontals());
|
||||
const Vector rhsFrontal = getSubvector(x, keyInfo_, frontalKeys);
|
||||
const Vector solFrontal =
|
||||
cg->get_R().transpose().triangularView<Eigen::Lower>().solve(
|
||||
rhsFrontal);
|
||||
|
||||
// Check for indeterminant solution
|
||||
if ( solFrontal.hasNaN()) throw IndeterminantLinearSystemException(cg->keys().front());
|
||||
if (solFrontal.hasNaN())
|
||||
throw IndeterminantLinearSystemException(cg->keys().front());
|
||||
|
||||
/* assign subvector of sol to the frontal variables */
|
||||
setSubvector(solFrontal, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals()), x);
|
||||
setSubvector(solFrontal, keyInfo_, frontalKeys, x);
|
||||
|
||||
/* substract from parent variables */
|
||||
for (GaussianConditional::const_iterator it = cg->beginParents(); it != cg->endParents(); it++) {
|
||||
KeyInfo::const_iterator it2 = keyInfo_.find(*it);
|
||||
Eigen::Map<Vector> rhsParent(x.data()+it2->second.colstart(), it2->second.dim(), 1);
|
||||
for (auto it = cg->beginParents(); it != cg->endParents(); it++) {
|
||||
const KeyInfoEntry &entry = keyInfo_.find(*it)->second;
|
||||
Eigen::Map<Vector> rhsParent(x.data() + entry.start, entry.dim, 1);
|
||||
rhsParent -= Matrix(cg->getA(it)).transpose() * solFrontal;
|
||||
}
|
||||
}
|
||||
|
@ -613,67 +255,18 @@ void SubgraphPreconditioner::transposeSolve(const Vector& y, Vector& x) const
|
|||
void SubgraphPreconditioner::build(const GaussianFactorGraph &gfg, const KeyInfo &keyInfo, const std::map<Key,Vector> &lambda)
|
||||
{
|
||||
/* identify the subgraph structure */
|
||||
const SubgraphBuilder builder(parameters_.builderParams_);
|
||||
Subgraph::shared_ptr subgraph = builder(gfg);
|
||||
const SubgraphBuilder builder(parameters_.builderParams);
|
||||
auto subgraph = builder(gfg);
|
||||
|
||||
keyInfo_ = keyInfo;
|
||||
|
||||
/* build factor subgraph */
|
||||
GaussianFactorGraph::shared_ptr gfg_subgraph = buildFactorSubgraph(gfg, *subgraph, true);
|
||||
GaussianFactorGraph::shared_ptr gfg_subgraph = buildFactorSubgraph(gfg, subgraph, true);
|
||||
|
||||
/* factorize and cache BayesNet */
|
||||
Rc1_ = gfg_subgraph->eliminateSequential();
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
Vector getSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys) {
|
||||
|
||||
/* a cache of starting index and dim */
|
||||
typedef vector<pair<size_t, size_t> > Cache;
|
||||
Cache cache;
|
||||
|
||||
/* figure out dimension by traversing the keys */
|
||||
size_t d = 0;
|
||||
for ( const Key &key: keys ) {
|
||||
const KeyInfoEntry &entry = keyInfo.find(key)->second;
|
||||
cache.push_back(make_pair(entry.colstart(), entry.dim()));
|
||||
d += entry.dim();
|
||||
}
|
||||
|
||||
/* use the cache to fill the result */
|
||||
Vector result = Vector::Zero(d, 1);
|
||||
size_t idx = 0;
|
||||
for ( const Cache::value_type &p: cache ) {
|
||||
result.segment(idx, p.second) = src.segment(p.first, p.second) ;
|
||||
idx += p.second;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
void setSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys, Vector &dst) {
|
||||
/* use the cache */
|
||||
size_t idx = 0;
|
||||
for ( const Key &key: keys ) {
|
||||
const KeyInfoEntry &entry = keyInfo.find(key)->second;
|
||||
dst.segment(entry.colstart(), entry.dim()) = src.segment(idx, entry.dim()) ;
|
||||
idx += entry.dim();
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
boost::shared_ptr<GaussianFactorGraph>
|
||||
buildFactorSubgraph(const GaussianFactorGraph &gfg, const Subgraph &subgraph, const bool clone) {
|
||||
|
||||
GaussianFactorGraph::shared_ptr result(new GaussianFactorGraph());
|
||||
result->reserve(subgraph.size());
|
||||
for ( const SubgraphEdge &e: subgraph ) {
|
||||
const size_t idx = e.index();
|
||||
if ( clone ) result->push_back(gfg[idx]->clone());
|
||||
else result->push_back(gfg[idx]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // nsamespace gtsam
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
@ -17,30 +17,16 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/linear/SubgraphBuilder.h>
|
||||
#include <gtsam/linear/Errors.h>
|
||||
#include <gtsam/linear/GaussianBayesNet.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/IterativeSolver.h>
|
||||
#include <gtsam/linear/Preconditioner.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/base/FastMap.h>
|
||||
#include <gtsam/base/FastVector.h>
|
||||
#include <gtsam/base/types.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
#include <map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
class access;
|
||||
} /* namespace serialization */
|
||||
} /* namespace boost */
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -49,142 +35,11 @@ namespace gtsam {
|
|||
class GaussianFactorGraph;
|
||||
class VectorValues;
|
||||
|
||||
struct GTSAM_EXPORT SubgraphEdge {
|
||||
size_t index_; /* edge id */
|
||||
double weight_; /* edge weight */
|
||||
SubgraphEdge() : index_(0), weight_(1.0) {}
|
||||
SubgraphEdge(const SubgraphEdge &e) : index_(e.index()), weight_(e.weight()) {}
|
||||
SubgraphEdge(const size_t index, const double weight = 1.0): index_(index), weight_(weight) {}
|
||||
inline size_t index() const { return index_; }
|
||||
inline double weight() const { return weight_; }
|
||||
inline bool isUnitWeight() const { return (weight_ == 1.0); }
|
||||
friend std::ostream &operator<<(std::ostream &os, const SubgraphEdge &edge);
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template<class Archive>
|
||||
void serialize(Archive & ar, const unsigned int /*version*/) {
|
||||
ar & BOOST_SERIALIZATION_NVP(index_);
|
||||
ar & BOOST_SERIALIZATION_NVP(weight_);
|
||||
}
|
||||
};
|
||||
|
||||
/**************************************************************************/
|
||||
class GTSAM_EXPORT Subgraph {
|
||||
public:
|
||||
typedef boost::shared_ptr<Subgraph> shared_ptr;
|
||||
typedef std::vector<shared_ptr> vector_shared_ptr;
|
||||
typedef std::vector<SubgraphEdge> Edges;
|
||||
typedef std::vector<size_t> EdgeIndices;
|
||||
typedef Edges::iterator iterator;
|
||||
typedef Edges::const_iterator const_iterator;
|
||||
|
||||
protected:
|
||||
Edges edges_; /* index to the factors */
|
||||
|
||||
public:
|
||||
Subgraph() {}
|
||||
Subgraph(const Subgraph &subgraph) : edges_(subgraph.edges()) {}
|
||||
Subgraph(const Edges &edges) : edges_(edges) {}
|
||||
Subgraph(const std::vector<size_t> &indices) ;
|
||||
|
||||
inline const Edges& edges() const { return edges_; }
|
||||
inline size_t size() const { return edges_.size(); }
|
||||
EdgeIndices edgeIndices() const;
|
||||
|
||||
iterator begin() { return edges_.begin(); }
|
||||
const_iterator begin() const { return edges_.begin(); }
|
||||
iterator end() { return edges_.end(); }
|
||||
const_iterator end() const { return edges_.end(); }
|
||||
|
||||
void save(const std::string &fn) const;
|
||||
static shared_ptr load(const std::string &fn);
|
||||
friend std::ostream &operator<<(std::ostream &os, const Subgraph &subgraph);
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template<class Archive>
|
||||
void serialize(Archive & ar, const unsigned int /*version*/) {
|
||||
ar & BOOST_SERIALIZATION_NVP(edges_);
|
||||
}
|
||||
};
|
||||
|
||||
/****************************************************************************/
|
||||
struct GTSAM_EXPORT SubgraphBuilderParameters {
|
||||
public:
|
||||
typedef boost::shared_ptr<SubgraphBuilderParameters> shared_ptr;
|
||||
|
||||
enum Skeleton {
|
||||
/* augmented tree */
|
||||
NATURALCHAIN = 0, /* natural ordering of the graph */
|
||||
BFS, /* breadth-first search tree */
|
||||
KRUSKAL, /* maximum weighted spanning tree */
|
||||
} skeleton_ ;
|
||||
|
||||
enum SkeletonWeight { /* how to weigh the graph edges */
|
||||
EQUAL = 0, /* every block edge has equal weight */
|
||||
RHS_2NORM, /* use the 2-norm of the rhs */
|
||||
LHS_FNORM, /* use the frobenius norm of the lhs */
|
||||
RANDOM, /* bounded random edge weight */
|
||||
} skeletonWeight_ ;
|
||||
|
||||
enum AugmentationWeight { /* how to weigh the graph edges */
|
||||
SKELETON = 0, /* use the same weights in building the skeleton */
|
||||
// STRETCH, /* stretch in the laplacian sense */
|
||||
// GENERALIZED_STRETCH /* the generalized stretch defined in jian2013iros */
|
||||
} augmentationWeight_ ;
|
||||
|
||||
double complexity_;
|
||||
|
||||
SubgraphBuilderParameters()
|
||||
: skeleton_(KRUSKAL), skeletonWeight_(RANDOM), augmentationWeight_(SKELETON), complexity_(1.0) {}
|
||||
virtual ~SubgraphBuilderParameters() {}
|
||||
|
||||
/* for serialization */
|
||||
void print() const ;
|
||||
virtual void print(std::ostream &os) const ;
|
||||
friend std::ostream& operator<<(std::ostream &os, const PreconditionerParameters &p);
|
||||
|
||||
static Skeleton skeletonTranslator(const std::string &s);
|
||||
static std::string skeletonTranslator(Skeleton w);
|
||||
static SkeletonWeight skeletonWeightTranslator(const std::string &s);
|
||||
static std::string skeletonWeightTranslator(SkeletonWeight w);
|
||||
static AugmentationWeight augmentationWeightTranslator(const std::string &s);
|
||||
static std::string augmentationWeightTranslator(AugmentationWeight w);
|
||||
};
|
||||
|
||||
/*****************************************************************************/
|
||||
class GTSAM_EXPORT SubgraphBuilder {
|
||||
|
||||
public:
|
||||
typedef SubgraphBuilder Base;
|
||||
typedef boost::shared_ptr<SubgraphBuilder> shared_ptr;
|
||||
typedef std::vector<double> Weights;
|
||||
|
||||
SubgraphBuilder(const SubgraphBuilderParameters &p = SubgraphBuilderParameters())
|
||||
: parameters_(p) {}
|
||||
virtual ~SubgraphBuilder() {}
|
||||
virtual boost::shared_ptr<Subgraph> operator() (const GaussianFactorGraph &jfg) const ;
|
||||
|
||||
private:
|
||||
std::vector<size_t> buildTree(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &weights) const ;
|
||||
std::vector<size_t> unary(const GaussianFactorGraph &gfg) const ;
|
||||
std::vector<size_t> natural_chain(const GaussianFactorGraph &gfg) const ;
|
||||
std::vector<size_t> bfs(const GaussianFactorGraph &gfg) const ;
|
||||
std::vector<size_t> kruskal(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &w) const ;
|
||||
std::vector<size_t> sample(const std::vector<double> &weights, const size_t t) const ;
|
||||
Weights weights(const GaussianFactorGraph &gfg) const;
|
||||
SubgraphBuilderParameters parameters_;
|
||||
|
||||
};
|
||||
|
||||
/*******************************************************************************************/
|
||||
struct GTSAM_EXPORT SubgraphPreconditionerParameters : public PreconditionerParameters {
|
||||
typedef PreconditionerParameters Base;
|
||||
typedef boost::shared_ptr<SubgraphPreconditionerParameters> shared_ptr;
|
||||
SubgraphPreconditionerParameters(const SubgraphBuilderParameters &p = SubgraphBuilderParameters())
|
||||
: Base(), builderParams_(p) {}
|
||||
virtual ~SubgraphPreconditionerParameters() {}
|
||||
SubgraphBuilderParameters builderParams_;
|
||||
: builderParams(p) {}
|
||||
SubgraphBuilderParameters builderParams;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -249,8 +104,8 @@ namespace gtsam {
|
|||
|
||||
/* A zero VectorValues with the structure of xbar */
|
||||
VectorValues zero() const {
|
||||
VectorValues V(VectorValues::Zero(*xbar_));
|
||||
return V ;
|
||||
assert(xbar_);
|
||||
return VectorValues::Zero(*xbar_);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -285,50 +140,19 @@ namespace gtsam {
|
|||
/*****************************************************************************/
|
||||
/* implement virtual functions of Preconditioner */
|
||||
|
||||
/* Computation Interfaces for Vector */
|
||||
virtual void solve(const Vector& y, Vector &x) const;
|
||||
virtual void transposeSolve(const Vector& y, Vector& x) const ;
|
||||
/// implement x = R^{-1} y
|
||||
void solve(const Vector& y, Vector &x) const override;
|
||||
|
||||
virtual void build(
|
||||
/// implement x = R^{-T} y
|
||||
void transposeSolve(const Vector& y, Vector& x) const override;
|
||||
|
||||
/// build/factorize the preconditioner
|
||||
void build(
|
||||
const GaussianFactorGraph &gfg,
|
||||
const KeyInfo &info,
|
||||
const std::map<Key,Vector> &lambda
|
||||
) ;
|
||||
) override;
|
||||
/*****************************************************************************/
|
||||
};
|
||||
|
||||
/* get subvectors */
|
||||
Vector getSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys);
|
||||
|
||||
/* set subvectors */
|
||||
void setSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys, Vector &dst);
|
||||
|
||||
|
||||
/* build a factor subgraph, which is defined as a set of weighted edges (factors) */
|
||||
boost::shared_ptr<GaussianFactorGraph>
|
||||
buildFactorSubgraph(const GaussianFactorGraph &gfg, const Subgraph &subgraph, const bool clone);
|
||||
|
||||
|
||||
/* sort the container and return permutation index with default comparator */
|
||||
template <typename Container>
|
||||
std::vector<size_t> sort_idx(const Container &src)
|
||||
{
|
||||
typedef typename Container::value_type T;
|
||||
const size_t n = src.size() ;
|
||||
std::vector<std::pair<size_t,T> > tmp;
|
||||
tmp.reserve(n);
|
||||
for ( size_t i = 0 ; i < n ; i++ )
|
||||
tmp.push_back(std::make_pair(i, src[i]));
|
||||
|
||||
/* sort */
|
||||
std::stable_sort(tmp.begin(), tmp.end()) ;
|
||||
|
||||
/* copy back */
|
||||
std::vector<size_t> idx; idx.reserve(n);
|
||||
for ( size_t i = 0 ; i < n ; i++ ) {
|
||||
idx.push_back(tmp[i].first) ;
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -18,153 +18,94 @@
|
|||
*/
|
||||
|
||||
#include <gtsam/linear/SubgraphSolver.h>
|
||||
|
||||
#include <gtsam/linear/SubgraphBuilder.h>
|
||||
#include <gtsam/linear/GaussianBayesNet.h>
|
||||
#include <gtsam/linear/iterative-inl.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/SubgraphPreconditioner.h>
|
||||
#include <gtsam/base/DSFVector.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**************************************************************************************************/
|
||||
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &gfg,
|
||||
// Just taking system [A|b]
|
||||
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab,
|
||||
const Parameters ¶meters, const Ordering& ordering) :
|
||||
parameters_(parameters), ordering_(ordering) {
|
||||
initialize(gfg);
|
||||
parameters_(parameters) {
|
||||
GaussianFactorGraph::shared_ptr Ab1,Ab2;
|
||||
std::tie(Ab1, Ab2) = splitGraph(Ab);
|
||||
if (parameters_.verbosity())
|
||||
cout << "Split A into (A1) " << Ab1->size() << " and (A2) " << Ab2->size()
|
||||
<< " factors" << endl;
|
||||
|
||||
auto Rc1 = Ab1->eliminateSequential(ordering, EliminateQR);
|
||||
auto xbar = boost::make_shared<VectorValues>(Rc1->optimize());
|
||||
pc_ = boost::make_shared<SubgraphPreconditioner>(Ab2, Rc1, xbar);
|
||||
}
|
||||
|
||||
/**************************************************************************************************/
|
||||
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph::shared_ptr &jfg,
|
||||
const Parameters ¶meters, const Ordering& ordering) :
|
||||
parameters_(parameters), ordering_(ordering) {
|
||||
initialize(*jfg);
|
||||
// Taking eliminated tree [R1|c] and constraint graph [A2|b2]
|
||||
SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1,
|
||||
const GaussianFactorGraph::shared_ptr &Ab2,
|
||||
const Parameters ¶meters)
|
||||
: parameters_(parameters) {
|
||||
auto xbar = boost::make_shared<VectorValues>(Rc1->optimize());
|
||||
pc_ = boost::make_shared<SubgraphPreconditioner>(Ab2, Rc1, xbar);
|
||||
}
|
||||
|
||||
/**************************************************************************************************/
|
||||
// Taking subgraphs [A1|b1] and [A2|b2]
|
||||
// delegate up
|
||||
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1,
|
||||
const GaussianFactorGraph &Ab2, const Parameters ¶meters,
|
||||
const Ordering& ordering) :
|
||||
parameters_(parameters), ordering_(ordering) {
|
||||
|
||||
GaussianBayesNet::shared_ptr Rc1 = Ab1.eliminateSequential(ordering_,
|
||||
EliminateQR);
|
||||
initialize(Rc1, boost::make_shared<GaussianFactorGraph>(Ab2));
|
||||
}
|
||||
|
||||
/**************************************************************************************************/
|
||||
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph::shared_ptr &Ab1,
|
||||
const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters,
|
||||
const Ordering& ordering) :
|
||||
parameters_(parameters), ordering_(ordering) {
|
||||
|
||||
GaussianBayesNet::shared_ptr Rc1 = Ab1->eliminateSequential(ordering_,
|
||||
EliminateQR);
|
||||
initialize(Rc1, Ab2);
|
||||
}
|
||||
const GaussianFactorGraph::shared_ptr &Ab2,
|
||||
const Parameters ¶meters,
|
||||
const Ordering &ordering)
|
||||
: SubgraphSolver(Ab1.eliminateSequential(ordering, EliminateQR), Ab2,
|
||||
parameters) {}
|
||||
|
||||
/**************************************************************************************************/
|
||||
// deprecated variants
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
|
||||
SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1,
|
||||
const GaussianFactorGraph &Ab2, const Parameters ¶meters,
|
||||
const Ordering& ordering) :
|
||||
parameters_(parameters), ordering_(ordering) {
|
||||
initialize(Rc1, boost::make_shared<GaussianFactorGraph>(Ab2));
|
||||
}
|
||||
const GaussianFactorGraph &Ab2,
|
||||
const Parameters ¶meters)
|
||||
: SubgraphSolver(Rc1, boost::make_shared<GaussianFactorGraph>(Ab2),
|
||||
parameters) {}
|
||||
|
||||
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1,
|
||||
const GaussianFactorGraph &Ab2,
|
||||
const Parameters ¶meters,
|
||||
const Ordering &ordering)
|
||||
: SubgraphSolver(Ab1, boost::make_shared<GaussianFactorGraph>(Ab2),
|
||||
parameters, ordering) {}
|
||||
#endif
|
||||
|
||||
/**************************************************************************************************/
|
||||
SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1,
|
||||
const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters,
|
||||
const Ordering& ordering) :
|
||||
parameters_(parameters), ordering_(ordering) {
|
||||
initialize(Rc1, Ab2);
|
||||
}
|
||||
|
||||
/**************************************************************************************************/
|
||||
VectorValues SubgraphSolver::optimize() {
|
||||
VectorValues SubgraphSolver::optimize() const {
|
||||
VectorValues ybar = conjugateGradients<SubgraphPreconditioner, VectorValues,
|
||||
Errors>(*pc_, pc_->zero(), parameters_);
|
||||
return pc_->x(ybar);
|
||||
}
|
||||
|
||||
/**************************************************************************************************/
|
||||
VectorValues SubgraphSolver::optimize(const VectorValues &initial) {
|
||||
// the initial is ignored in this case ...
|
||||
return optimize();
|
||||
}
|
||||
|
||||
/**************************************************************************************************/
|
||||
void SubgraphSolver::initialize(const GaussianFactorGraph &jfg) {
|
||||
GaussianFactorGraph::shared_ptr Ab1 =
|
||||
boost::make_shared<GaussianFactorGraph>(), Ab2 = boost::make_shared<
|
||||
GaussianFactorGraph>();
|
||||
|
||||
boost::tie(Ab1, Ab2) = splitGraph(jfg);
|
||||
if (parameters_.verbosity())
|
||||
cout << "Split A into (A1) " << Ab1->size() << " and (A2) " << Ab2->size()
|
||||
<< " factors" << endl;
|
||||
|
||||
GaussianBayesNet::shared_ptr Rc1 = Ab1->eliminateSequential(ordering_,
|
||||
EliminateQR);
|
||||
VectorValues::shared_ptr xbar = boost::make_shared<VectorValues>(
|
||||
Rc1->optimize());
|
||||
pc_ = boost::make_shared<SubgraphPreconditioner>(Ab2, Rc1, xbar);
|
||||
}
|
||||
|
||||
/**************************************************************************************************/
|
||||
void SubgraphSolver::initialize(const GaussianBayesNet::shared_ptr &Rc1,
|
||||
const GaussianFactorGraph::shared_ptr &Ab2) {
|
||||
VectorValues::shared_ptr xbar = boost::make_shared<VectorValues>(
|
||||
Rc1->optimize());
|
||||
pc_ = boost::make_shared<SubgraphPreconditioner>(Ab2, Rc1, xbar);
|
||||
}
|
||||
|
||||
/**************************************************************************************************/
|
||||
boost::tuple<GaussianFactorGraph::shared_ptr, GaussianFactorGraph::shared_ptr> //
|
||||
SubgraphSolver::splitGraph(const GaussianFactorGraph &jfg) {
|
||||
|
||||
const VariableIndex index(jfg);
|
||||
const size_t n = index.size();
|
||||
DSFVector D(n);
|
||||
|
||||
GaussianFactorGraph::shared_ptr At(new GaussianFactorGraph());
|
||||
GaussianFactorGraph::shared_ptr Ac(new GaussianFactorGraph());
|
||||
|
||||
size_t t = 0;
|
||||
for ( const GaussianFactor::shared_ptr &gf: jfg ) {
|
||||
|
||||
if (gf->keys().size() > 2) {
|
||||
throw runtime_error(
|
||||
"SubgraphSolver::splitGraph the graph is not simple, sanity check failed ");
|
||||
}
|
||||
|
||||
bool augment = false;
|
||||
|
||||
/* check whether this factor should be augmented to the "tree" graph */
|
||||
if (gf->keys().size() == 1)
|
||||
augment = true;
|
||||
else {
|
||||
const Key u = gf->keys()[0], v = gf->keys()[1], u_root = D.find(u),
|
||||
v_root = D.find(v);
|
||||
if (u_root != v_root) {
|
||||
t++;
|
||||
augment = true;
|
||||
D.merge(u_root, v_root);
|
||||
}
|
||||
}
|
||||
if (augment)
|
||||
At->push_back(gf);
|
||||
else
|
||||
Ac->push_back(gf);
|
||||
}
|
||||
|
||||
return boost::tie(At, Ac);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
VectorValues SubgraphSolver::optimize(const GaussianFactorGraph &gfg,
|
||||
const KeyInfo &keyInfo, const std::map<Key, Vector> &lambda,
|
||||
const KeyInfo &keyInfo, const map<Key, Vector> &lambda,
|
||||
const VectorValues &initial) {
|
||||
return VectorValues();
|
||||
}
|
||||
/**************************************************************************************************/
|
||||
pair<GaussianFactorGraph::shared_ptr, GaussianFactorGraph::shared_ptr> //
|
||||
SubgraphSolver::splitGraph(const GaussianFactorGraph &factorGraph) {
|
||||
|
||||
/* identify the subgraph structure */
|
||||
const SubgraphBuilder builder(parameters_.builderParams);
|
||||
auto subgraph = builder(factorGraph);
|
||||
|
||||
/* build factor subgraph */
|
||||
return splitFactorGraph(factorGraph, subgraph);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
|
||||
} // \namespace gtsam
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue