Merge remote-tracking branch 'origin/develop'
commit
52e8db60dd
|
|
@ -1,4 +1,5 @@
|
|||
/build*
|
||||
/debug*
|
||||
.idea
|
||||
*.pyc
|
||||
*.DS_Store
|
||||
|
|
@ -7,5 +8,16 @@
|
|||
/examples/Data/pose3example-rewritten.txt
|
||||
*.txt.user
|
||||
*.txt.user.6d59f0c
|
||||
/python-build/
|
||||
*.pydevproject
|
||||
cython/venv
|
||||
cython/gtsam.cpp
|
||||
cython/gtsam.cpython-35m-darwin.so
|
||||
cython/gtsam.pyx
|
||||
cython/gtsam.so
|
||||
cython/gtsam_wrapper.pxd
|
||||
.vscode
|
||||
.env
|
||||
/.vs/
|
||||
/CMakeSettings.json
|
||||
# for QtCreator:
|
||||
CMakeLists.txt.user*
|
||||
|
|
|
|||
270
CMakeLists.txt
270
CMakeLists.txt
|
|
@ -1,6 +1,6 @@
|
|||
|
||||
project(GTSAM CXX C)
|
||||
cmake_minimum_required(VERSION 2.6)
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
# new feature to Cmake Version > 2.8.12
|
||||
# Mac ONLY. Define Relative Path on Mac OS
|
||||
|
|
@ -55,46 +55,56 @@ endif()
|
|||
if(GTSAM_UNSTABLE_AVAILABLE)
|
||||
option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON)
|
||||
endif()
|
||||
option(GTSAM_BUILD_STATIC_LIBRARY "Build a static gtsam library, instead of shared" OFF)
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." OFF)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." OFF)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions deprecated in GTSAM 4" ON)
|
||||
option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typdef Point2 and Point3 to Eigen::Vector equivalents" OFF)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
|
||||
endif()
|
||||
|
||||
# Options relating to MATLAB wrapper
|
||||
# TODO: Check for matlab mex binary before handling building of binaries
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab wrap utility (necessary for matlab interface)" ON)
|
||||
option(GTSAM_INSTALL_CYTHON_TOOLBOX "Enable/Disable installation of Cython toolbox" OFF)
|
||||
option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab/cython wrap utility (necessary for matlab/cython interface)" ON)
|
||||
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of python to build the cython wrapper for (or Default)")
|
||||
|
||||
# Check / set dependent variables for MATLAB wrapper
|
||||
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT GTSAM_BUILD_WRAP)
|
||||
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP")
|
||||
if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP)
|
||||
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX or GTSAM_INSTALL_CYTHON_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP")
|
||||
endif()
|
||||
if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND GTSAM_BUILD_TYPE_POSTFIXES)
|
||||
set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX})
|
||||
if(NOT "${CURRENT_POSTFIX}" STREQUAL "")
|
||||
message(FATAL_ERROR "Cannot use executable postfixes with the matlab or cython wrappers. Please disable GTSAM_BUILD_TYPE_POSTFIXES")
|
||||
endif()
|
||||
endif()
|
||||
if(GTSAM_INSTALL_WRAP AND NOT GTSAM_BUILD_WRAP)
|
||||
message(FATAL_ERROR "GTSAM_INSTALL_WRAP is enabled, please also enable GTSAM_BUILD_WRAP")
|
||||
endif()
|
||||
|
||||
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_BUILD_STATIC_LIBRARY)
|
||||
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_BUILD_STATIC_LIBRARY are both enabled. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of GTSAM_BUILD_STATIC_LIBRARY.")
|
||||
endif()
|
||||
|
||||
if(GTSAM_BUILD_PYTHON AND GTSAM_ALLOW_DEPRECATED_SINCE_V4)
|
||||
message(FATAL_ERROR "GTSAM_BUILD_PYTHON and GTSAM_ALLOW_DEPRECATED_SINCE_V4 are both enabled. The python module cannot be compiled with deprecated functions turned on. Turn one of the two options off.")
|
||||
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS)
|
||||
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.")
|
||||
endif()
|
||||
|
||||
if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS)
|
||||
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the MATLAB toolbox cannot deal with this yet. Please turn one of the two options off.")
|
||||
endif()
|
||||
|
||||
if(GTSAM_INSTALL_CYTHON_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS)
|
||||
message(FATAL_ERROR "GTSAM_INSTALL_CYTHON_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the CYTHON toolbox cannot deal with this yet. Please turn one of the two options off.")
|
||||
endif()
|
||||
|
||||
# Flags for choosing default packaging tools
|
||||
set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator")
|
||||
set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator")
|
||||
|
|
@ -106,35 +116,78 @@ set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator")
|
|||
# BOOST_ROOT: path to install prefix for boost
|
||||
# Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT
|
||||
|
||||
# If using Boost shared libs, disable auto linking
|
||||
if(MSVC)
|
||||
# Some libraries, at least Boost Program Options, rely on this to export DLL symbols
|
||||
# Disable autolinking
|
||||
# By default, boost only builds static libraries on windows
|
||||
set(Boost_USE_STATIC_LIBS ON) # only find static libs
|
||||
# If we ever reset above on windows and, ...
|
||||
# If we use Boost shared libs, disable auto linking.
|
||||
# Some libraries, at least Boost Program Options, rely on this to export DLL symbols.
|
||||
if(NOT Boost_USE_STATIC_LIBS)
|
||||
add_definitions(-DBOOST_ALL_NO_LIB)
|
||||
add_definitions(-DBOOST_ALL_DYN_LINK)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK)
|
||||
endif()
|
||||
# Virtual memory range for PCH exceeded on VS2015
|
||||
if(MSVC_VERSION LESS 1910) # older than VS2017
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
find_package(Boost 1.43 COMPONENTS serialization system filesystem thread program_options date_time regex timer chrono)
|
||||
# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT()
|
||||
# or explicit instantiation will generate build errors.
|
||||
# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017
|
||||
#
|
||||
if(MSVC AND BUILD_SHARED_LIBS)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
|
||||
endif()
|
||||
|
||||
# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such.
|
||||
set(BOOST_FIND_MINIMUM_VERSION 1.43)
|
||||
set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
|
||||
|
||||
find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS})
|
||||
|
||||
# Required components
|
||||
if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
|
||||
NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY OR NOT Boost_REGEX_LIBRARY)
|
||||
NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY)
|
||||
message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.")
|
||||
endif()
|
||||
|
||||
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
|
||||
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
|
||||
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
|
||||
|
||||
# JLBC: This was once updated to target-based names (Boost::xxx), but it caused
|
||||
# problems with Boost versions newer than FindBoost.cmake was prepared to handle,
|
||||
# so we downgraded this to classic filenames-based variables, and manually adding
|
||||
# the target_include_directories(xxx ${Boost_INCLUDE_DIR})
|
||||
set(GTSAM_BOOST_LIBRARIES
|
||||
${Boost_SERIALIZATION_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY}
|
||||
${Boost_THREAD_LIBRARY} ${Boost_DATE_TIME_LIBRARY} ${Boost_REGEX_LIBRARY})
|
||||
optimized
|
||||
${Boost_SERIALIZATION_LIBRARY_RELEASE}
|
||||
${Boost_SYSTEM_LIBRARY_RELEASE}
|
||||
${Boost_FILESYSTEM_LIBRARY_RELEASE}
|
||||
${Boost_THREAD_LIBRARY_RELEASE}
|
||||
${Boost_DATE_TIME_LIBRARY_RELEASE}
|
||||
${Boost_REGEX_LIBRARY_RELEASE}
|
||||
debug
|
||||
${Boost_SERIALIZATION_LIBRARY_DEBUG}
|
||||
${Boost_SYSTEM_LIBRARY_DEBUG}
|
||||
${Boost_FILESYSTEM_LIBRARY_DEBUG}
|
||||
${Boost_THREAD_LIBRARY_DEBUG}
|
||||
${Boost_DATE_TIME_LIBRARY_DEBUG}
|
||||
${Boost_REGEX_LIBRARY_DEBUG}
|
||||
)
|
||||
message(STATUS "GTSAM_BOOST_LIBRARIES: ${GTSAM_BOOST_LIBRARIES}")
|
||||
if (GTSAM_DISABLE_NEW_TIMERS)
|
||||
message("WARNING: GTSAM timing instrumentation manually disabled")
|
||||
add_definitions(-DGTSAM_DISABLE_NEW_TIMERS)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS)
|
||||
else()
|
||||
if(Boost_TIMER_LIBRARY)
|
||||
list(APPEND GTSAM_BOOST_LIBRARIES ${Boost_TIMER_LIBRARY} ${Boost_CHRONO_LIBRARY})
|
||||
list(APPEND GTSAM_BOOST_LIBRARIES
|
||||
optimized
|
||||
${Boost_TIMER_LIBRARY_RELEASE}
|
||||
${Boost_CHRONO_LIBRARY_RELEASE}
|
||||
debug
|
||||
${Boost_TIMER_LIBRARY_DEBUG}
|
||||
${Boost_CHRONO_LIBRARY_DEBUG}
|
||||
)
|
||||
else()
|
||||
list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt
|
||||
message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.")
|
||||
|
|
@ -144,29 +197,19 @@ endif()
|
|||
|
||||
if(NOT (${Boost_VERSION} LESS 105600))
|
||||
message("Ignoring Boost restriction on optional lvalue assignment from rvalues")
|
||||
add_definitions(-DBOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES -DBOOST_OPTIONAL_CONFIG_ALLOW_BINDING_TO_RVALUES)
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES BOOST_OPTIONAL_CONFIG_ALLOW_BINDING_TO_RVALUES)
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
# Find TBB
|
||||
find_package(TBB)
|
||||
find_package(TBB COMPONENTS tbb tbbmalloc)
|
||||
|
||||
# Set up variables if we're using TBB
|
||||
if(TBB_FOUND AND GTSAM_WITH_TBB)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
include_directories(BEFORE ${TBB_INCLUDE_DIRS})
|
||||
set(GTSAM_TBB_LIBRARIES "")
|
||||
if(TBB_DEBUG_LIBRARIES)
|
||||
foreach(lib ${TBB_LIBRARIES})
|
||||
list(APPEND GTSAM_TBB_LIBRARIES optimized "${lib}")
|
||||
endforeach()
|
||||
foreach(lib ${TBB_DEBUG_LIBRARIES})
|
||||
list(APPEND GTSAM_TBB_LIBRARIES debug "${lib}")
|
||||
endforeach()
|
||||
else()
|
||||
set(GTSAM_TBB_LIBRARIES ${TBB_LIBRARIES})
|
||||
endif()
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${GTSAM_TBB_LIBRARIES})
|
||||
# all definitions and link requisites will go via imported targets:
|
||||
# tbb & tbbmalloc
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
|
||||
else()
|
||||
set(GTSAM_USE_TBB 0) # This will go into config.h
|
||||
endif()
|
||||
|
|
@ -182,6 +225,20 @@ endif()
|
|||
# Find Google perftools
|
||||
find_package(GooglePerfTools)
|
||||
|
||||
###############################################################################
|
||||
# Support ccache, if installed
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
find_program(CCACHE_FOUND ccache)
|
||||
if(CCACHE_FOUND)
|
||||
if(GTSAM_BUILD_WITH_CCACHE)
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
|
||||
else()
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "")
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "")
|
||||
endif()
|
||||
endif(CCACHE_FOUND)
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
# Find MKL
|
||||
|
|
@ -190,7 +247,6 @@ find_package(MKL)
|
|||
if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL)
|
||||
set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h
|
||||
set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL
|
||||
include_directories(${MKL_INCLUDE_DIR})
|
||||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES})
|
||||
|
||||
# --no-as-needed is required with gcc according to the MKL link advisor
|
||||
|
|
@ -225,30 +281,45 @@ option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', us
|
|||
# Switch for using system Eigen or GTSAM-bundled Eigen
|
||||
if(GTSAM_USE_SYSTEM_EIGEN)
|
||||
find_package(Eigen3 REQUIRED)
|
||||
include_directories(AFTER "${EIGEN3_INCLUDE_DIR}")
|
||||
|
||||
# Use generic Eigen include paths e.g. <Eigen/Core>
|
||||
set(GTSAM_EIGEN_INCLUDE_PREFIX "${EIGEN3_INCLUDE_DIR}")
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}")
|
||||
|
||||
# check if MKL is also enabled - can have one or the other, but not both!
|
||||
# Note: Eigen >= v3.2.5 includes our patches
|
||||
if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5))
|
||||
message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL")
|
||||
endif()
|
||||
|
||||
# Check for Eigen version which doesn't work with MKL
|
||||
# See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details.
|
||||
if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4))
|
||||
message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.")
|
||||
endif()
|
||||
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}")
|
||||
else()
|
||||
# Use bundled Eigen include path.
|
||||
# Clear any variables set by FindEigen3
|
||||
if(EIGEN3_INCLUDE_DIR)
|
||||
set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE)
|
||||
endif()
|
||||
# Add the bundled version of eigen to the include path so that it can still be included
|
||||
# with #include <Eigen/Core>
|
||||
include_directories(BEFORE "gtsam/3rdparty/Eigen/")
|
||||
|
||||
# set full path to be used by external projects
|
||||
# this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in
|
||||
set(GTSAM_EIGEN_INCLUDE_PREFIX "${CMAKE_INSTALL_PREFIX}/include/gtsam/3rdparty/Eigen/")
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/")
|
||||
|
||||
# The actual include directory (for BUILD cmake target interface):
|
||||
set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/")
|
||||
endif()
|
||||
|
||||
if (MSVC)
|
||||
if (BUILD_SHARED_LIBS)
|
||||
# mute eigen static assert to avoid errors in shared lib
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC DEIGEN_NO_STATIC_ASSERT)
|
||||
endif()
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
|
|
@ -289,51 +360,29 @@ elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc")
|
|||
list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc")
|
||||
endif()
|
||||
|
||||
# Include boost - use 'BEFORE' so that a specific boost specified to CMake
|
||||
# takes precedence over a system-installed one.
|
||||
include_directories(BEFORE SYSTEM ${Boost_INCLUDE_DIR})
|
||||
|
||||
if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
set(METIS_INCLUDE_DIRECTORIES
|
||||
gtsam/3rdparty/metis/include
|
||||
gtsam/3rdparty/metis/libmetis
|
||||
gtsam/3rdparty/metis/GKlib)
|
||||
else()
|
||||
set(METIS_INCLUDE_DIRECTORIES)
|
||||
endif()
|
||||
|
||||
# Add includes for source directories 'BEFORE' boost and any system include
|
||||
# paths so that the compiler uses GTSAM headers in our source directory instead
|
||||
# of any previously installed GTSAM headers.
|
||||
include_directories(BEFORE
|
||||
gtsam/3rdparty/SuiteSparse_config
|
||||
gtsam/3rdparty/CCOLAMD/Include
|
||||
${METIS_INCLUDE_DIRECTORIES}
|
||||
${PROJECT_SOURCE_DIR}
|
||||
${PROJECT_BINARY_DIR} # So we can include generated config header files
|
||||
CppUnitLite)
|
||||
|
||||
if(MSVC)
|
||||
add_definitions(-D_CRT_SECURE_NO_WARNINGS -D_SCL_SECURE_NO_WARNINGS)
|
||||
add_definitions(/wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code
|
||||
endif()
|
||||
|
||||
# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc.
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
|
||||
add_definitions(-Wno-unused-local-typedefs)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# As of XCode 7, clang also complains about this
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
|
||||
add_definitions(-Wno-unused-local-typedefs)
|
||||
list(APPEND GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(GTSAM_ENABLE_CONSISTENCY_CHECKS)
|
||||
add_definitions(-DGTSAM_EXTRA_CONSISTENCY_CHECKS)
|
||||
# This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h
|
||||
list(APPEND GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS)
|
||||
endif()
|
||||
|
||||
###############################################################################
|
||||
|
|
@ -359,29 +408,30 @@ add_subdirectory(examples)
|
|||
# Build timing
|
||||
add_subdirectory(timing)
|
||||
|
||||
# Build gtsam_unstable
|
||||
if (GTSAM_BUILD_UNSTABLE)
|
||||
add_subdirectory(gtsam_unstable)
|
||||
endif(GTSAM_BUILD_UNSTABLE)
|
||||
|
||||
# Matlab toolbox
|
||||
if (GTSAM_INSTALL_MATLAB_TOOLBOX)
|
||||
add_subdirectory(matlab)
|
||||
endif()
|
||||
|
||||
# Python wrap
|
||||
if (GTSAM_BUILD_PYTHON)
|
||||
include(GtsamPythonWrap)
|
||||
|
||||
# NOTE: The automatic generation of python wrapper from the gtsampy.h interface is
|
||||
# not working yet, so we're using a handwritten wrapper files on python/handwritten.
|
||||
# Once the python wrapping from the interface file is working, you can _swap_ the
|
||||
# comments on the next lines
|
||||
|
||||
# wrap_and_install_python(gtsampy.h "${GTSAM_ADDITIONAL_LIBRARIES}" "")
|
||||
add_subdirectory(python)
|
||||
|
||||
# Cython wrap
|
||||
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||
set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
|
||||
# Set up cache options
|
||||
set(GTSAM_CYTHON_INSTALL_PATH "" CACHE PATH "Cython toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/cython")
|
||||
if(NOT GTSAM_CYTHON_INSTALL_PATH)
|
||||
set(GTSAM_CYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/cython")
|
||||
endif()
|
||||
set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
|
||||
add_subdirectory(cython)
|
||||
else()
|
||||
set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
|
||||
endif()
|
||||
|
||||
# Build gtsam_unstable
|
||||
if (GTSAM_BUILD_UNSTABLE)
|
||||
add_subdirectory(gtsam_unstable)
|
||||
endif(GTSAM_BUILD_UNSTABLE)
|
||||
|
||||
# Install config and export files
|
||||
GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
|
||||
|
|
@ -437,12 +487,13 @@ print_config_flag(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts wit
|
|||
if (DOXYGEN_FOUND)
|
||||
print_config_flag(${GTSAM_BUILD_DOCS} "Build Docs ")
|
||||
endif()
|
||||
print_config_flag(${GTSAM_BUILD_STATIC_LIBRARY} "Build static GTSAM library instead of shared")
|
||||
print_config_flag(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries ")
|
||||
print_config_flag(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name ")
|
||||
if(GTSAM_UNSTABLE_AVAILABLE)
|
||||
print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ")
|
||||
endif()
|
||||
string(TOUPPER "${CMAKE_BUILD_TYPE}" cmake_build_type_toupper)
|
||||
print_config_flag(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ")
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
message(STATUS " Build type : ${CMAKE_BUILD_TYPE}")
|
||||
message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${cmake_build_type_toupper}}")
|
||||
|
|
@ -480,6 +531,15 @@ else()
|
|||
endif()
|
||||
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
|
||||
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
|
||||
message(STATUS " Build with ccache : Yes")
|
||||
elseif(CCACHE_FOUND)
|
||||
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
|
||||
else()
|
||||
message(STATUS " Build with ccache : No")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
message(STATUS "Packaging flags ")
|
||||
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
|
||||
|
|
@ -491,22 +551,17 @@ print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency chec
|
|||
print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
|
||||
print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
|
||||
print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 allowed ")
|
||||
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
|
||||
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
|
||||
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
||||
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
||||
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
||||
|
||||
message(STATUS "MATLAB toolbox flags ")
|
||||
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
|
||||
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
||||
|
||||
message(STATUS "Python module flags ")
|
||||
|
||||
if(GTSAM_PYTHON_WARNINGS)
|
||||
message(STATUS " Build python module : No - dependencies missing")
|
||||
else()
|
||||
print_config_flag(${GTSAM_BUILD_PYTHON} "Build python module ")
|
||||
endif()
|
||||
if(GTSAM_BUILD_PYTHON)
|
||||
message(STATUS "Cython toolbox flags ")
|
||||
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
||||
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
||||
endif()
|
||||
message(STATUS "===============================================================")
|
||||
|
|
@ -516,13 +571,10 @@ if(GTSAM_WITH_TBB AND NOT TBB_FOUND)
|
|||
message(WARNING "TBB was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.")
|
||||
endif()
|
||||
if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND)
|
||||
message(WARNING "MKL was not found - this is ok, but note that MKL yields better performance. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning.")
|
||||
message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.")
|
||||
endif()
|
||||
if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND)
|
||||
message(WARNING "Your compiler does not support OpenMP - this is ok, but performance may be improved with OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning.")
|
||||
endif()
|
||||
if(GTSAM_BUILD_PYTHON AND GTSAM_PYTHON_WARNINGS)
|
||||
message(WARNING "${GTSAM_PYTHON_WARNINGS}")
|
||||
message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.")
|
||||
endif()
|
||||
|
||||
# Include CPack *after* all flags
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ file(GLOB cppunitlite_src "*.cpp")
|
|||
add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers})
|
||||
list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite)
|
||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||
target_include_directories(CppUnitLite PUBLIC ${Boost_INCLUDE_DIR}) # boost/lexical_cast.h
|
||||
|
||||
gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -36,7 +36,7 @@ Test *Test::getNext() const
|
|||
}
|
||||
|
||||
void Test::setNext(Test *test)
|
||||
{
|
||||
{
|
||||
next_ = test;
|
||||
}
|
||||
|
||||
|
|
@ -46,9 +46,9 @@ bool Test::check(long expected, long actual, TestResult& result, const std::stri
|
|||
return true;
|
||||
result.addFailure (
|
||||
Failure (
|
||||
name_,
|
||||
name_,
|
||||
boost::lexical_cast<std::string> (__FILE__),
|
||||
__LINE__,
|
||||
__LINE__,
|
||||
boost::lexical_cast<std::string> (expected),
|
||||
boost::lexical_cast<std::string> (actual)));
|
||||
|
||||
|
|
@ -63,10 +63,10 @@ bool Test::check(const std::string& expected, const std::string& actual, TestRes
|
|||
return true;
|
||||
result.addFailure (
|
||||
Failure (
|
||||
name_,
|
||||
name_,
|
||||
boost::lexical_cast<std::string> (__FILE__),
|
||||
__LINE__,
|
||||
expected,
|
||||
__LINE__,
|
||||
expected,
|
||||
actual));
|
||||
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -12,7 +12,7 @@
|
|||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// TEST.H
|
||||
//
|
||||
//
|
||||
// This file contains the Test class along with the macros which make effective
|
||||
// in the harness.
|
||||
//
|
||||
|
|
@ -66,7 +66,7 @@ protected:
|
|||
virtual ~testGroup##testName##Test () {};\
|
||||
void run (TestResult& result_);} \
|
||||
testGroup##testName##Instance; \
|
||||
void testGroup##testName##Test::run (TestResult& result_)
|
||||
void testGroup##testName##Test::run (TestResult& result_)
|
||||
|
||||
/**
|
||||
* Declare friend in a class to test its private methods
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -12,7 +12,7 @@
|
|||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// TESTHARNESS.H
|
||||
//
|
||||
//
|
||||
// The primary include file for the framework.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -18,26 +18,26 @@
|
|||
#include "TestResult.h"
|
||||
#include "TestRegistry.h"
|
||||
|
||||
void TestRegistry::addTest (Test *test)
|
||||
void TestRegistry::addTest (Test *test)
|
||||
{
|
||||
instance ().add (test);
|
||||
}
|
||||
|
||||
|
||||
int TestRegistry::runAllTests (TestResult& result)
|
||||
int TestRegistry::runAllTests (TestResult& result)
|
||||
{
|
||||
return instance ().run (result);
|
||||
}
|
||||
|
||||
|
||||
TestRegistry& TestRegistry::instance ()
|
||||
TestRegistry& TestRegistry::instance ()
|
||||
{
|
||||
static TestRegistry registry;
|
||||
return registry;
|
||||
}
|
||||
|
||||
|
||||
void TestRegistry::add (Test *test)
|
||||
void TestRegistry::add (Test *test)
|
||||
{
|
||||
if (tests == 0) {
|
||||
test->setNext(0);
|
||||
|
|
@ -52,7 +52,7 @@ void TestRegistry::add (Test *test)
|
|||
}
|
||||
|
||||
|
||||
int TestRegistry::run (TestResult& result)
|
||||
int TestRegistry::run (TestResult& result)
|
||||
{
|
||||
result.testsStarted ();
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -12,9 +12,9 @@
|
|||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// TESTREGISTRY.H
|
||||
//
|
||||
// TestRegistry is a singleton collection of all the tests to run in a system.
|
||||
//
|
||||
//
|
||||
// TestRegistry is a singleton collection of all the tests to run in a system.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef TESTREGISTRY_H
|
||||
|
|
@ -38,7 +38,7 @@ private:
|
|||
void add (Test *test);
|
||||
int run (TestResult& result);
|
||||
|
||||
|
||||
|
||||
Test *tests;
|
||||
Test *lastTest;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -22,12 +22,12 @@ TestResult::TestResult ()
|
|||
}
|
||||
|
||||
|
||||
void TestResult::testsStarted ()
|
||||
void TestResult::testsStarted ()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void TestResult::addFailure (const Failure& failure)
|
||||
void TestResult::addFailure (const Failure& failure)
|
||||
{
|
||||
if (failure.lineNumber < 0) // allow for no line number
|
||||
fprintf (stdout, "%s%s%s%s\n",
|
||||
|
|
@ -48,7 +48,7 @@ void TestResult::addFailure (const Failure& failure)
|
|||
}
|
||||
|
||||
|
||||
void TestResult::testsEnded ()
|
||||
void TestResult::testsEnded ()
|
||||
{
|
||||
if (failureCount > 0)
|
||||
fprintf (stdout, "There were %d failures\n", failureCount);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -12,10 +12,10 @@
|
|||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// TESTRESULT.H
|
||||
//
|
||||
//
|
||||
// A TestResult is a collection of the history of some test runs. Right now
|
||||
// it just collects failures.
|
||||
//
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef TESTRESULT_H
|
||||
|
|
@ -26,12 +26,12 @@ class Failure;
|
|||
class TestResult
|
||||
{
|
||||
public:
|
||||
TestResult ();
|
||||
TestResult ();
|
||||
virtual ~TestResult() {};
|
||||
virtual void testsStarted ();
|
||||
virtual void addFailure (const Failure& failure);
|
||||
virtual void testsEnded ();
|
||||
|
||||
|
||||
int getFailureCount() {return failureCount;}
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -97,12 +97,24 @@ Note that in the Lie group case, the usual valid expressions for Retract and Loc
|
|||
|
||||
For Lie groups, the `exponential map` above is the most obvious mapping: it
|
||||
associates straight lines in the tangent space with geodesics on the manifold
|
||||
(and it's inverse, the log map). However, there are two cases in which we deviate from this:
|
||||
(and it's inverse, the log map). However, there are several cases in which we deviate from this:
|
||||
|
||||
However, the exponential map is unnecessarily expensive for use in optimization. Hence, in GTSAM there is the option to provide a cheaper chart by means of the `ChartAtOrigin` struct in a class. This is done for *SE(2)*, *SO(3)* and *SE(3)* (see `Pose2`, `Rot3`, `Pose3`)
|
||||
|
||||
Most Lie groups we care about are *Matrix groups*, continuous sub-groups of *GL(n)*, the group of *n x n* invertible matrices. In this case, a lot of the derivatives calculations needed can be standardized, and this is done by the `LieGroup` superclass. You only need to provide an `AdjointMap` method.
|
||||
|
||||
A CRTP helper class `LieGroup` is available that can take a class and create some of the Lie group methods automatically. The class needs:
|
||||
|
||||
* operator* : implements group operator
|
||||
* inverse: implements group inverse
|
||||
* AdjointMap: maps tangent vectors according to group element
|
||||
* Expmap/Logmap: exponential map and its inverse
|
||||
* ChartAtOrigin: struct where you define Retract/Local at origin
|
||||
|
||||
To use, simply derive, but also say `using LieGroup<Class,N>::inverse` so you get an inverse with a derivative.
|
||||
|
||||
Finally, to create the traits automatically you can use `internal::LieGroupTraits<Class>`
|
||||
|
||||
Vector Space
|
||||
------------
|
||||
|
||||
|
|
|
|||
146
INSTALL
146
INSTALL
|
|
@ -1,146 +0,0 @@
|
|||
Quickstart
|
||||
|
||||
In the root library folder execute:
|
||||
|
||||
$] mkdir build
|
||||
$] cd build
|
||||
$] cmake ..
|
||||
$] make check (optional, runs unit tests)
|
||||
$] make install
|
||||
|
||||
Important Installation Notes
|
||||
----------------------------
|
||||
|
||||
1)
|
||||
GTSAM requires the following libraries to be installed on your system:
|
||||
- BOOST version 1.43 or greater (install through Linux repositories or MacPorts)
|
||||
- Cmake version 2.6 or higher
|
||||
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
|
||||
|
||||
Optional dependent libraries:
|
||||
- If TBB is installed and detectable by CMake GTSAM will use it automatically.
|
||||
Ensure that CMake prints "Use Intel TBB : Yes". To disable the use of TBB,
|
||||
disable the CMake flag GTSAM_WITH_TBB (enabled by default). On Ubuntu, TBB
|
||||
may be installed from the Ubuntu repositories, and for other platforms it
|
||||
may be downloaded from https://www.threadingbuildingblocks.org/
|
||||
|
||||
Tested compilers:
|
||||
|
||||
- GCC 4.2-4.7
|
||||
- OSX Clang 2.9-5.0
|
||||
- OSX GCC 4.2
|
||||
- MSVC 2010, 2012
|
||||
|
||||
Tested systems:
|
||||
|
||||
- Ubuntu 11.04 - 13.10
|
||||
- MacOS 10.6 - 10.9
|
||||
- Windows 7, 8, 8.1
|
||||
|
||||
Known issues:
|
||||
|
||||
- MSVC 2013 is not yet supported because it cannot build the serialization module
|
||||
of Boost 1.55 (or earlier).
|
||||
|
||||
2)
|
||||
GTSAM makes extensive use of debug assertions, and we highly recommend you work
|
||||
in Debug mode while developing (enabled by default). Likewise, it is imperative
|
||||
that you switch to release mode when running finished code and for timing. GTSAM
|
||||
will run up to 10x faster in Release mode! See the end of this document for
|
||||
additional debugging tips.
|
||||
|
||||
3)
|
||||
GTSAM has Doxygen documentation. To generate, run 'make doc' from your
|
||||
build directory.
|
||||
|
||||
4)
|
||||
The instructions below install the library to the default system install path and
|
||||
build all components. From a terminal, starting in the root library folder,
|
||||
execute commands as follows for an out-of-source build:
|
||||
|
||||
$] mkdir build
|
||||
$] cd build
|
||||
$] cmake ..
|
||||
$] make check (optional, runs unit tests)
|
||||
$] make install
|
||||
|
||||
This will build the library and unit tests, run all of the unit tests,
|
||||
and then install the library itself.
|
||||
|
||||
- CMake Configuration Options and Details
|
||||
|
||||
GTSAM has a number of options that can be configured, which is best done with
|
||||
one of the following:
|
||||
|
||||
ccmake the curses GUI for cmake
|
||||
cmake-gui a real GUI for cmake
|
||||
|
||||
Important Options:
|
||||
|
||||
CMAKE_BUILD_TYPE: We support several build configurations for GTSAM (case insensitive)
|
||||
Debug (default) All error checking options on, no optimization. Use for development.
|
||||
Release Optimizations turned on, no debug symbols.
|
||||
Timing Adds ENABLE_TIMING flag to provide statistics on operation
|
||||
Profiling Standard configuration for use during profiling
|
||||
RelWithDebInfo Same as Release, but with the -g flag for debug symbols
|
||||
|
||||
CMAKE_INSTALL_PREFIX: The install folder. The default is typically /usr/local/
|
||||
To configure to install to your home directory, you could execute:
|
||||
$] cmake -DCMAKE_INSTALL_PREFIX:PATH=$HOME ..
|
||||
|
||||
GTSAM_TOOLBOX_INSTALL_PATH: The Matlab toolbox will be installed in a subdirectory
|
||||
of this folder, called 'gtsam'.
|
||||
$] cmake -DGTSAM_TOOLBOX_INSTALL_PATH:PATH=$HOME/toolbox ..
|
||||
|
||||
GTSAM_BUILD_CONVENIENCE_LIBRARIES: This is a build option to allow for tests in
|
||||
subfolders to be linked against convenience libraries rather than the full libgtsam.
|
||||
Set with the command line as follows:
|
||||
$] cmake -DGTSAM_BUILD_CONVENIENCE_LIBRARIES:OPTION=ON ..
|
||||
ON (Default) This builds convenience libraries and links tests against them. This
|
||||
option is suggested for gtsam developers, as it is possible to build
|
||||
and run tests without first building the rest of the library, and
|
||||
speeds up compilation for a single test. The downside of this option
|
||||
is that it will build the entire library again to build the full
|
||||
libgtsam library, so build/install will be slower.
|
||||
OFF This will build all of libgtsam before any of the tests, and then
|
||||
link all of the tests at once. This option is best for users of GTSAM,
|
||||
as it avoids rebuilding the entirety of gtsam an extra time.
|
||||
|
||||
GTSAM_BUILD_UNSTABLE: Enable build and install for libgtsam_unstable library.
|
||||
Set with the command line as follows:
|
||||
$] cmake -DGTSAM_BUILD_UNSTABLE:OPTION=ON ..
|
||||
ON When enabled, libgtsam_unstable will be built and installed with the
|
||||
same options as libgtsam. In addition, if tests are enabled, the
|
||||
unit tests will be built as well. The Matlab toolbox will also
|
||||
be generated if the matlab toolbox is enabled, installing into a
|
||||
folder called "gtsam_unstable".
|
||||
OFF (Default) If disabled, no gtsam_unstable code will be included in build or install.
|
||||
|
||||
Check
|
||||
|
||||
"make check" will build and run all of the tests. Note that the tests will only be
|
||||
built when using the "check" targets, to prevent "make install" from building the tests
|
||||
unnecessarily. You can also run "make timing" to build all of the timing scripts.
|
||||
To run check on a particular module only, run "make check.[subfolder]", so to run
|
||||
just the geometry tests, run "make check.geometry". Individual tests can be run by
|
||||
appending ".run" to the name of the test, for example, to run testMatrix, run
|
||||
"make testMatrix.run".
|
||||
|
||||
MEX_COMMAND: Path to the mex compiler. Defaults to assume the path is included in your
|
||||
shell's PATH environment variable. mex is installed with matlab at
|
||||
$MATLABROOT/bin/mex
|
||||
|
||||
$MATLABROOT can be found by executing the command 'matlabroot' in MATLAB
|
||||
|
||||
Debugging tips:
|
||||
|
||||
Another useful debugging symbol is _GLIBCXX_DEBUG, which enables debug checks
|
||||
and safe containers in the standard C++ library and makes problems much easier
|
||||
to find.
|
||||
|
||||
NOTE: The native Snow Leopard g++ compiler/library contains a bug that makes
|
||||
it impossible to use _GLIBCXX_DEBUG. MacPorts g++ compilers do work with it though.
|
||||
|
||||
NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against
|
||||
gtsam will need to be compiled with _GLIBCXX_DEBUG as well, due to the use of
|
||||
header-only Eigen.
|
||||
|
|
@ -0,0 +1,195 @@
|
|||
# Quickstart
|
||||
|
||||
In the root library folder execute:
|
||||
|
||||
```sh
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake ..
|
||||
$ make check # (optional, runs unit tests)
|
||||
$ make install
|
||||
```
|
||||
|
||||
## Important Installation Notes
|
||||
|
||||
1. GTSAM requires the following libraries to be installed on your system:
|
||||
- BOOST version 1.43 or greater (install through Linux repositories or MacPorts)
|
||||
- Cmake version 3.0 or higher
|
||||
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
|
||||
|
||||
Optional dependent libraries:
|
||||
- If TBB is installed and detectable by CMake GTSAM will use it automatically.
|
||||
Ensure that CMake prints "Use Intel TBB : Yes". To disable the use of TBB,
|
||||
disable the CMake flag GTSAM_WITH_TBB (enabled by default). On Ubuntu, TBB
|
||||
may be installed from the Ubuntu repositories, and for other platforms it
|
||||
may be downloaded from https://www.threadingbuildingblocks.org/
|
||||
- GTSAM may be configured to use MKL by toggling `GTSAM_WITH_EIGEN_MKL` and
|
||||
`GTSAM_WITH_EIGEN_MKL_OPENMP` to `ON`; however, best performance is usually
|
||||
achieved with MKL disabled. We therefore advise you to benchmark your problem
|
||||
before using MKL.
|
||||
|
||||
Tested compilers:
|
||||
|
||||
- GCC 4.2-7.3
|
||||
- OS X Clang 2.9-10.0
|
||||
- OS X GCC 4.2
|
||||
- MSVC 2010, 2012, 2017
|
||||
|
||||
Tested systems:
|
||||
|
||||
- Ubuntu 16.04 - 18.04
|
||||
- MacOS 10.6 - 10.14
|
||||
- Windows 7, 8, 8.1, 10
|
||||
|
||||
Known issues:
|
||||
|
||||
- MSVC 2013 is not yet supported because it cannot build the serialization module
|
||||
of Boost 1.55 (or earlier).
|
||||
|
||||
2. GTSAM makes extensive use of debug assertions, and we highly recommend you work
|
||||
in Debug mode while developing (enabled by default). Likewise, it is imperative
|
||||
that you switch to release mode when running finished code and for timing. GTSAM
|
||||
will run up to 10x faster in Release mode! See the end of this document for
|
||||
additional debugging tips.
|
||||
|
||||
3. GTSAM has Doxygen documentation. To generate, run 'make doc' from your
|
||||
build directory.
|
||||
|
||||
4. The instructions below install the library to the default system install path and
|
||||
build all components. From a terminal, starting in the root library folder,
|
||||
execute commands as follows for an out-of-source build:
|
||||
|
||||
```sh
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake ..
|
||||
$ make check (optional, runs unit tests)
|
||||
$ make install
|
||||
```
|
||||
|
||||
This will build the library and unit tests, run all of the unit tests,
|
||||
and then install the library itself.
|
||||
|
||||
## CMake Configuration Options and Details
|
||||
|
||||
GTSAM has a number of options that can be configured, which is best done with
|
||||
one of the following:
|
||||
|
||||
- ccmake the curses GUI for cmake
|
||||
- cmake-gui a real GUI for cmake
|
||||
|
||||
### Important Options:
|
||||
|
||||
#### CMAKE_BUILD_TYPE
|
||||
We support several build configurations for GTSAM (case insensitive)
|
||||
|
||||
```cmake -DCMAKE_BUILD_TYPE=[Option] ..```
|
||||
|
||||
- Debug (default) All error checking options on, no optimization. Use for development.
|
||||
- Release Optimizations turned on, no debug symbols.
|
||||
- Timing Adds ENABLE_TIMING flag to provide statistics on operation
|
||||
- Profiling Standard configuration for use during profiling
|
||||
- RelWithDebInfo Same as Release, but with the -g flag for debug symbols
|
||||
|
||||
#### CMAKE_INSTALL_PREFIX
|
||||
|
||||
The install folder. The default is typically `/usr/local/`.
|
||||
To configure to install to your home directory, you could execute:
|
||||
|
||||
```cmake -DCMAKE_INSTALL_PREFIX:PATH=$HOME ..```
|
||||
|
||||
#### GTSAM_TOOLBOX_INSTALL_PATH
|
||||
|
||||
The Matlab toolbox will be installed in a subdirectory
|
||||
of this folder, called 'gtsam'.
|
||||
|
||||
```cmake -DGTSAM_TOOLBOX_INSTALL_PATH:PATH=$HOME/toolbox ..```
|
||||
|
||||
#### GTSAM_BUILD_CONVENIENCE_LIBRARIES
|
||||
|
||||
This is a build option to allow for tests in subfolders to be linked against convenience libraries rather than the full libgtsam.
|
||||
Set with the command line as follows:
|
||||
|
||||
```cmake -DGTSAM_BUILD_CONVENIENCE_LIBRARIES:OPTION=ON ..```
|
||||
- ON (Default): This builds convenience libraries and links tests against them. This option is suggested for gtsam developers, as it is possible to build and run tests without first building the rest of the library, and speeds up compilation for a single test. The downside of this option is that it will build the entire library again to build the full libgtsam library, so build/install will be slower.
|
||||
- OFF: This will build all of libgtsam before any of the tests, and then link all of the tests at once. This option is best for users of GTSAM, as it avoids rebuilding the entirety of gtsam an extra time.
|
||||
|
||||
#### GTSAM_BUILD_UNSTABLE
|
||||
|
||||
Enable build and install for libgtsam_unstable library.
|
||||
Set with the command line as follows:
|
||||
|
||||
```cmake -DGTSAM_BUILD_UNSTABLE:OPTION=ON ..```
|
||||
|
||||
ON: When enabled, libgtsam_unstable will be built and installed with the same options as libgtsam. In addition, if tests are enabled, the unit tests will be built as well. The Matlab toolbox will also be generated if the matlab toolbox is enabled, installing into a folder called `gtsam_unstable`.
|
||||
OFF (Default) If disabled, no `gtsam_unstable` code will be included in build or install.
|
||||
|
||||
## Check
|
||||
|
||||
`make check` will build and run all of the tests. Note that the tests will only be
|
||||
built when using the "check" targets, to prevent `make install` from building the tests
|
||||
unnecessarily. You can also run `make timing` to build all of the timing scripts.
|
||||
To run check on a particular module only, run `make check.[subfolder]`, so to run
|
||||
just the geometry tests, run `make check.geometry`. Individual tests can be run by
|
||||
appending `.run` to the name of the test, for example, to run testMatrix, run
|
||||
`make testMatrix.run`.
|
||||
|
||||
MEX_COMMAND: Path to the mex compiler. Defaults to assume the path is included in your shell's PATH environment variable. mex is installed with matlab at `$MATLABROOT/bin/mex`
|
||||
|
||||
$MATLABROOT can be found by executing the command `matlabroot` in MATLAB
|
||||
|
||||
## Performance
|
||||
|
||||
Here are some tips to get the best possible performance out of GTSAM.
|
||||
|
||||
1. Build in `Release` mode. GTSAM will run up to 10x faster compared to `Debug` mode.
|
||||
2. Enable TBB. On modern processors with multiple cores, this can easily speed up
|
||||
optimization by 30-50%. Please note that this may not be true for very small
|
||||
problems where the overhead of dispatching work to multiple threads outweighs
|
||||
the benefit. We recommend that you benchmark your problem with/without TBB.
|
||||
3. Add `-march=native` to `GTSAM_CMAKE_CXX_FLAGS`. A performance gain of
|
||||
25-30% can be expected on modern processors. Note that this affects the portability
|
||||
of your executable. It may not run when copied to another system with older/different
|
||||
processor architecture.
|
||||
Also note that all dependent projects *must* be compiled with the same flag, or
|
||||
seg-faults and other undefined behavior may result.
|
||||
4. Possibly enable MKL. Please note that our benchmarks have shown that this helps only
|
||||
in very limited cases, and actually hurts performance in the usual case. We therefore
|
||||
recommend that you do *not* enable MKL, unless you have benchmarked it on
|
||||
your problem and have verified that it improves performance.
|
||||
|
||||
|
||||
## Debugging tips
|
||||
|
||||
Another useful debugging symbol is _GLIBCXX_DEBUG, which enables debug checks and safe containers in the standard C++ library and makes problems much easier to find.
|
||||
|
||||
NOTE: The native Snow Leopard g++ compiler/library contains a bug that makes it impossible to use _GLIBCXX_DEBUG. MacPorts g++ compilers do work with it though.
|
||||
|
||||
NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against gtsam will need to be compiled with _GLIBCXX_DEBUG as well, due to the use of header-only Eigen.
|
||||
|
||||
|
||||
## Installing MKL on Linux
|
||||
|
||||
Intel has a guide for installing MKL on Linux through APT repositories at <https://software.intel.com/en-us/articles/installing-intel-free-libs-and-python-apt-repo>.
|
||||
|
||||
After following the instructions, add the following to your `~/.bashrc` (and afterwards, open a new terminal before compiling GTSAM):
|
||||
`LD_PRELOAD` need only be set if you are building the cython wrapper to use GTSAM from python.
|
||||
```sh
|
||||
source /opt/intel/mkl/bin/mklvars.sh intel64
|
||||
export LD_PRELOAD="$LD_PRELOAD:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so"
|
||||
```
|
||||
To use MKL in GTSAM pass the flag `-DGTSAM_WITH_EIGEN_MKL=ON` to cmake.
|
||||
|
||||
|
||||
The `LD_PRELOAD` fix seems to be related to a well known problem with MKL which leads to lots of undefined symbol errors, for example:
|
||||
- <https://software.intel.com/en-us/forums/intel-math-kernel-library/topic/300857>
|
||||
- <https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628976>
|
||||
- <https://groups.google.com/a/continuum.io/forum/#!topic/anaconda/J3YGoef64z8>
|
||||
|
||||
Failing to specify `LD_PRELOAD` may lead to errors such as:
|
||||
`ImportError: /opt/intel/mkl/lib/intel64/libmkl_vml_avx2.so: undefined symbol: mkl_serv_getenv`
|
||||
or
|
||||
`Intel MKL FATAL ERROR: Cannot load libmkl_avx2.so or libmkl_def.so.`
|
||||
when importing GTSAM using the cython wrapper in python.
|
||||
|
||||
|
||||
31
LICENSE
31
LICENSE
|
|
@ -1,18 +1,25 @@
|
|||
GTSAM is released under the simplified BSD license, reproduced in the file
|
||||
LICENSE.BSD in this directory.
|
||||
|
||||
GTSAM contains two third party libraries, with documentation of licensing and
|
||||
modifications as follows:
|
||||
GTSAM contains several third party libraries, with documentation of licensing
|
||||
and modifications as follows:
|
||||
|
||||
- CCOLAMD 2.73: Tim Davis' constrained column approximate minimum degree
|
||||
- CCOLAMD 2.9.6: Tim Davis' constrained column approximate minimum degree
|
||||
ordering library
|
||||
- Included unmodified in gtsam/3rdparty/CCOLAMD and gtsam/3rdparty/UFconfig
|
||||
- http://www.cise.ufl.edu/research/sparse
|
||||
- Licenced under LGPL v2.1, provided in gtsam/3rdparty/CCOLAMD/Doc/lesser.txt
|
||||
- Eigen 3.2: General C++ matrix and linear algebra library
|
||||
- Modified with 3 patches that have been contributed back to the Eigen team:
|
||||
- http://eigen.tuxfamily.org/bz/show_bug.cgi?id=704 (Householder QR MKL selection)
|
||||
- http://eigen.tuxfamily.org/bz/show_bug.cgi?id=705 (Fix MKL LLT return code)
|
||||
- http://eigen.tuxfamily.org/bz/show_bug.cgi?id=716 (Improved comma initialization)
|
||||
- Included unmodified in gtsam/3rdparty/CCOLAMD and
|
||||
gtsam/3rdparty/SuiteSparse_config
|
||||
- http://faculty.cse.tamu.edu/davis/suitesparse.html
|
||||
- Licenced under BSD-3, provided in gtsam/3rdparty/CCOLAMD/Doc/License.txt
|
||||
- ceres: Google's nonlinear least-squares optimization library
|
||||
- Includes only auto-diff/jet code, with minor modifications to includes
|
||||
- http://ceres-solver.org/license.html
|
||||
- Eigen 3.3.7: General C++ matrix and linear algebra library
|
||||
- Licenced under MPL2, provided in gtsam/3rdparty/Eigen/COPYING.README
|
||||
- Some code that is 3rd-party to Eigen is BSD and LGPL
|
||||
- Some code that is 3rd-party to Eigen is BSD and LGPL
|
||||
- GeographicLib 1.35: Charles Karney's geographic conversion utility library
|
||||
- Included unmodified in gtsam/3rdparty/GeographicLib
|
||||
- Licenced under MIT, provided in gtsam/3rdparty/GeographicLib/LICENSE.txt
|
||||
- METIS 5.1.0: Graph partitioning and fill-reducing matrix ordering library
|
||||
- Included unmodified in gtsam/3rdparty/metis
|
||||
- Licenced under Apache License v 2.0, provided in
|
||||
gtsam/3rdparty/metis/LICENSE.txt
|
||||
|
|
|
|||
19
README.md
19
README.md
|
|
@ -30,13 +30,15 @@ $ make install
|
|||
Prerequisites:
|
||||
|
||||
- [Boost](http://www.boost.org/users/download/) >= 1.43 (Ubuntu: `sudo apt-get install libboost-all-dev`)
|
||||
- [CMake](http://www.cmake.org/cmake/resources/software.html) >= 2.6 (Ubuntu: `sudo apt-get install cmake`)
|
||||
- [CMake](http://www.cmake.org/cmake/resources/software.html) >= 3.0 (Ubuntu: `sudo apt-get install cmake`)
|
||||
- A modern compiler, i.e., at least gcc 4.7.3 on Linux.
|
||||
|
||||
Optional prerequisites - used automatically if findable by CMake:
|
||||
|
||||
- [Intel Threaded Building Blocks (TBB)](http://www.threadingbuildingblocks.org/) (Ubuntu: `sudo apt-get install libtbb-dev`)
|
||||
- [Intel Math Kernel Library (MKL)](http://software.intel.com/en-us/intel-mkl)
|
||||
- [Intel Math Kernel Library (MKL)](http://software.intel.com/en-us/intel-mkl) (Ubuntu: [installing using APT](https://software.intel.com/en-us/articles/installing-intel-free-libs-and-python-apt-repo))
|
||||
- See [INSTALL.md](INSTALL.md) for more installation information
|
||||
- Note that MKL may not provide a speedup in all cases. Make sure to benchmark your problem with and without MKL.
|
||||
|
||||
GTSAM 4 Compatibility
|
||||
---------------------
|
||||
|
|
@ -54,7 +56,7 @@ GTSAM includes a state of the art IMU handling scheme based on
|
|||
|
||||
Our implementation improves on this using integration on the manifold, as detailed in
|
||||
|
||||
- Luca Carlone, Zsolt Kira, Chris Beall, Vadim Indelman, and Frank Dellaert, "Eliminating conditionally independent sets in factor graphs: a unifying perspective based on smart factors", Int. Conf. on Robotics and Automation (ICRA), 2014.
|
||||
- Luca Carlone, Zsolt Kira, Chris Beall, Vadim Indelman, and Frank Dellaert, "Eliminating conditionally independent sets in factor graphs: a unifying perspective based on smart factors", Int. Conf. on Robotics and Automation (ICRA), 2014.
|
||||
- Christian Forster, Luca Carlone, Frank Dellaert, and Davide Scaramuzza, "IMU Preintegration on Manifold for Efficient Visual-Inertial Maximum-a-Posteriori Estimation", Robotics: Science and Systems (RSS), 2015.
|
||||
|
||||
If you are using the factor in academic work, please cite the publications above.
|
||||
|
|
@ -62,16 +64,19 @@ If you are using the factor in academic work, please cite the publications above
|
|||
In GTSAM 4 a new and more efficient implementation, based on integrating on the NavState tangent space and detailed in docs/ImuFactor.pdf, is enabled by default. To switch to the RSS 2015 version, set the flag **GTSAM_TANGENT_PREINTEGRATION** to OFF.
|
||||
|
||||
|
||||
|
||||
Additional Information
|
||||
----------------------
|
||||
|
||||
Read about important [`GTSAM-Concepts`](GTSAM-Concepts.md) here.
|
||||
There is a [`GTSAM users Google group`](https://groups.google.com/forum/#!forum/gtsam-users) for general discussion.
|
||||
|
||||
See the [`INSTALL`](INSTALL) file for more detailed installation instructions.
|
||||
Read about important [`GTSAM-Concepts`](GTSAM-Concepts.md) here. A primer on GTSAM Expressions,
|
||||
which support (superfast) automatic differentiation,
|
||||
can be found on the [GTSAM wiki on BitBucket](https://bitbucket.org/gtborg/gtsam/wiki/Home).
|
||||
|
||||
See the [`INSTALL`](INSTALL.md) file for more detailed installation instructions.
|
||||
|
||||
GTSAM is open source under the BSD license, see the [`LICENSE`](LICENSE) and [`LICENSE.BSD`](LICENSE.BSD) files.
|
||||
|
||||
Please see the [`examples/`](examples) directory and the [`USAGE`](USAGE.md) file for examples on how to use GTSAM.
|
||||
|
||||
GTSAM was developed in the lab of [Frank Dellaert](http://www.cc.gatech.edu/~dellaert) at the [Georgia Institute of Technology](http://www.gatech.edu), with the help of many contributors over the years, see [THANKS](THANKS).
|
||||
GTSAM was developed in the lab of [Frank Dellaert](http://www.cc.gatech.edu/~dellaert) at the [Georgia Institute of Technology](http://www.gatech.edu), with the help of many contributors over the years, see [THANKS](THANKS).
|
||||
|
|
|
|||
2
THANKS
2
THANKS
|
|
@ -1,5 +1,6 @@
|
|||
GTSAM was made possible by the efforts of many collaborators at Georgia Tech, listed below with their current afffiliation, if they left Tech:
|
||||
|
||||
* Jeremy Aguilon, Facebook
|
||||
* Sungtae An
|
||||
* Doru Balcan, Bank of America
|
||||
* Chris Beall
|
||||
|
|
@ -26,6 +27,7 @@ GTSAM was made possible by the efforts of many collaborators at Georgia Tech, li
|
|||
* Natesh Srinivasan
|
||||
* Alex Trevor
|
||||
* Stephen Williams, BossaNova
|
||||
* Matthew Broadway
|
||||
|
||||
at ETH, Zurich
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
# Built from sample configuration for C++ – Make.
|
||||
# Check https://confluence.atlassian.com/x/5Q4SMw for more examples.
|
||||
# -----
|
||||
# Our custom docker image from Docker Hub as the build environment.
|
||||
image: dellaert/ubuntu-boost-tbb-eigen3:bionic
|
||||
|
||||
pipelines:
|
||||
default:
|
||||
- step:
|
||||
script: # Modify the commands below to build your repository.
|
||||
- mkdir build
|
||||
- cd build
|
||||
- cmake -DGTSAM_USE_SYSTEM_EIGEN=OFF -DGTSAM_USE_EIGEN_MKL=OFF ..
|
||||
- make -j2
|
||||
- make -j2 check
|
||||
|
|
@ -18,7 +18,10 @@ install(FILES
|
|||
GtsamMakeConfigFile.cmake
|
||||
GtsamMatlabWrap.cmake
|
||||
GtsamPythonWrap.cmake
|
||||
GtsamCythonWrap.cmake
|
||||
GtsamTesting.cmake
|
||||
FindCython.cmake
|
||||
FindNumPy.cmake
|
||||
README.html
|
||||
DESTINATION "${SCRIPT_INSTALL_DIR}/GTSAMCMakeTools")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# - Config file for @CMAKE_PROJECT_NAME@
|
||||
# It defines the following variables
|
||||
# @PACKAGE_NAME@_INCLUDE_DIR - include directories for @CMAKE_PROJECT_NAME@
|
||||
|
||||
|
||||
# Compute paths
|
||||
get_filename_component(OUR_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
|
||||
if(EXISTS "${OUR_CMAKE_DIR}/CMakeCache.txt")
|
||||
|
|
@ -11,7 +11,11 @@ else()
|
|||
# Find installed library
|
||||
set(@PACKAGE_NAME@_INCLUDE_DIR "${OUR_CMAKE_DIR}/@CONF_REL_INCLUDE_DIR@" CACHE PATH "@PACKAGE_NAME@ include directory")
|
||||
endif()
|
||||
|
||||
|
||||
# Find dependencies, required by cmake exported targets:
|
||||
include(CMakeFindDependencyMacro)
|
||||
find_dependency(Boost @BOOST_FIND_MINIMUM_VERSION@ COMPONENTS @BOOST_FIND_MINIMUM_COMPONENTS@)
|
||||
|
||||
# Load exports
|
||||
include(${OUR_CMAKE_DIR}/@PACKAGE_NAME@-exports.cmake)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,81 @@
|
|||
# Modifed from: https://github.com/nest/nest-simulator/blob/master/cmake/FindCython.cmake
|
||||
#
|
||||
# Find the Cython compiler.
|
||||
#
|
||||
# This code sets the following variables:
|
||||
#
|
||||
# CYTHON_FOUND
|
||||
# CYTHON_PATH
|
||||
# CYTHON_EXECUTABLE
|
||||
# CYTHON_VERSION
|
||||
#
|
||||
# See also UseCython.cmake
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2011 Kitware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#=============================================================================
|
||||
|
||||
# Use the Cython executable that lives next to the Python executable
|
||||
# if it is a local installation.
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
find_package(PythonInterp)
|
||||
else()
|
||||
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT)
|
||||
endif()
|
||||
|
||||
if ( PYTHONINTERP_FOUND )
|
||||
execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||
"import Cython; print(Cython.__path__[0])"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE CYTHON_PATH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
endif ()
|
||||
|
||||
# RESULT=0 means ok
|
||||
if ( NOT RESULT )
|
||||
get_filename_component( _python_path ${PYTHON_EXECUTABLE} PATH )
|
||||
find_program( CYTHON_EXECUTABLE
|
||||
NAMES cython cython.bat cython3
|
||||
HINTS ${_python_path}
|
||||
)
|
||||
endif ()
|
||||
|
||||
# RESULT=0 means ok
|
||||
if ( NOT RESULT )
|
||||
execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||
"import Cython; print(Cython.__version__)"
|
||||
RESULT_VARIABLE RESULT
|
||||
OUTPUT_VARIABLE CYTHON_VAR_OUTPUT
|
||||
ERROR_VARIABLE CYTHON_VAR_OUTPUT
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
if ( RESULT EQUAL 0 )
|
||||
string( REGEX REPLACE ".* ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1"
|
||||
CYTHON_VERSION "${CYTHON_VAR_OUTPUT}" )
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
include( FindPackageHandleStandardArgs )
|
||||
find_package_handle_standard_args( Cython
|
||||
FOUND_VAR
|
||||
CYTHON_FOUND
|
||||
REQUIRED_VARS
|
||||
CYTHON_PATH
|
||||
CYTHON_EXECUTABLE
|
||||
VERSION_VAR
|
||||
CYTHON_VERSION
|
||||
)
|
||||
|
||||
|
|
@ -83,10 +83,20 @@ FIND_PATH(MKL_FFTW_INCLUDE_DIR
|
|||
NO_DEFAULT_PATH
|
||||
)
|
||||
|
||||
IF(WIN32)
|
||||
IF(WIN32 AND MKL_ROOT_DIR)
|
||||
SET(MKL_LIB_SEARCHPATH $ENV{ICC_LIB_DIR} $ENV{MKL_LIB_DIR} "${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR}" "${MKL_ROOT_DIR}/../compiler" "${MKL_ROOT_DIR}/../compiler/lib/${MKL_ARCH_DIR}")
|
||||
|
||||
IF (MKL_INCLUDE_DIR MATCHES "10.")
|
||||
IF(MKL_INCLUDE_DIR MATCHES "2017" OR MKL_INCLUDE_DIR MATCHES "2018")
|
||||
IF(CMAKE_CL_64)
|
||||
SET(MKL_LIBS mkl_core mkl_intel_lp64 mkl_lapack95_lp64 mkl_blas95_lp64)
|
||||
ELSE()
|
||||
SET(MKL_LIBS mkl_core mkl_intel_s mkl_lapack95 mkl_blas95)
|
||||
ENDIF()
|
||||
IF(TBB_FOUND AND GTSAM_WITH_TBB)
|
||||
SET(MKL_LIBS ${MKL_LIBS} mkl_tbb_thread)
|
||||
ELSE()
|
||||
SET(MKL_LIBS ${MKL_LIBS} mkl_intel_thread libiomp5md)
|
||||
ENDIF()
|
||||
ELSEIF(MKL_INCLUDE_DIR MATCHES "10.")
|
||||
IF(CMAKE_CL_64)
|
||||
SET(MKL_LIBS mkl_solver_lp64 mkl_core mkl_intel_lp64 mkl_intel_thread libguide mkl_lapack95_lp64 mkl_blas95_lp64)
|
||||
ELSE()
|
||||
|
|
@ -115,7 +125,7 @@ IF(WIN32)
|
|||
ENDIF()
|
||||
ENDFOREACH()
|
||||
SET(MKL_FOUND ON)
|
||||
ELSE() # UNIX and macOS
|
||||
ELSEIF(MKL_ROOT_DIR) # UNIX and macOS
|
||||
FIND_LIBRARY(MKL_CORE_LIBRARY
|
||||
mkl_core
|
||||
PATHS
|
||||
|
|
@ -196,6 +206,15 @@ ELSE() # UNIX and macOS
|
|||
)
|
||||
ENDIF()
|
||||
|
||||
IF(NOT MKL_LAPACK_LIBRARY)
|
||||
FIND_LIBRARY(MKL_LAPACK_LIBRARY
|
||||
mkl_intel_lp64
|
||||
PATHS
|
||||
${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR}
|
||||
${MKL_ROOT_DIR}/lib/
|
||||
)
|
||||
ENDIF()
|
||||
|
||||
# iomp5
|
||||
IF("${MKL_ARCH_DIR}" STREQUAL "32")
|
||||
IF(UNIX AND NOT APPLE)
|
||||
|
|
|
|||
|
|
@ -40,9 +40,17 @@
|
|||
|
||||
# Finding NumPy involves calling the Python interpreter
|
||||
if(NumPy_FIND_REQUIRED)
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
find_package(PythonInterp REQUIRED)
|
||||
else()
|
||||
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
|
||||
endif()
|
||||
else()
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
find_package(PythonInterp)
|
||||
else()
|
||||
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT PYTHONINTERP_FOUND)
|
||||
|
|
|
|||
|
|
@ -1,13 +1,6 @@
|
|||
# Locate Intel Threading Building Blocks include paths and libraries
|
||||
# FindTBB.cmake can be found at https://code.google.com/p/findtbb/
|
||||
# Written by Hannes Hofmann <hannes.hofmann _at_ informatik.uni-erlangen.de>
|
||||
# Improvements by Gino van den Bergen <gino _at_ dtecta.com>,
|
||||
# Florian Uhlig <F.Uhlig _at_ gsi.de>,
|
||||
# Jiri Marsik <jiri.marsik89 _at_ gmail.com>
|
||||
|
||||
# The MIT License
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Copyright (c) 2011 Hannes Hofmann
|
||||
# Copyright (c) 2015 Justus Calvin
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
@ -16,291 +9,306 @@
|
|||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# GvdB: This module uses the environment variable TBB_ARCH_PLATFORM which defines architecture and compiler.
|
||||
# e.g. "ia32/vc8" or "em64t/cc4.1.0_libc2.4_kernel2.6.16.21"
|
||||
# TBB_ARCH_PLATFORM is set by the build script tbbvars[.bat|.sh|.csh], which can be found
|
||||
# in the TBB installation directory (TBB_INSTALL_DIR).
|
||||
#
|
||||
# GvdB: Mac OS X distribution places libraries directly in lib directory.
|
||||
# FindTBB
|
||||
# -------
|
||||
#
|
||||
# For backwards compatibility, you may explicitely set the CMake variables TBB_ARCHITECTURE and TBB_COMPILER.
|
||||
# TBB_ARCHITECTURE [ ia32 | em64t | itanium ]
|
||||
# which architecture to use
|
||||
# TBB_COMPILER e.g. vc9 or cc3.2.3_libc2.3.2_kernel2.4.21 or cc4.0.1_os10.4.9
|
||||
# which compiler to use (detected automatically on Windows)
|
||||
# Find TBB include directories and libraries.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# find_package(TBB [major[.minor]] [EXACT]
|
||||
# [QUIET] [REQUIRED]
|
||||
# [[COMPONENTS] [components...]]
|
||||
# [OPTIONAL_COMPONENTS components...])
|
||||
#
|
||||
# where the allowed components are tbbmalloc and tbb_preview. Users may modify
|
||||
# the behavior of this module with the following variables:
|
||||
#
|
||||
# * TBB_ROOT_DIR - The base directory the of TBB installation.
|
||||
# * TBB_INCLUDE_DIR - The directory that contains the TBB headers files.
|
||||
# * TBB_LIBRARY - The directory that contains the TBB library files.
|
||||
# * TBB_<library>_LIBRARY - The path of the TBB the corresponding TBB library.
|
||||
# These libraries, if specified, override the
|
||||
# corresponding library search results, where <library>
|
||||
# may be tbb, tbb_debug, tbbmalloc, tbbmalloc_debug,
|
||||
# tbb_preview, or tbb_preview_debug.
|
||||
# * TBB_USE_DEBUG_BUILD - The debug version of tbb libraries, if present, will
|
||||
# be used instead of the release version.
|
||||
#
|
||||
# Users may modify the behavior of this module with the following environment
|
||||
# variables:
|
||||
#
|
||||
# * TBB_INSTALL_DIR
|
||||
# * TBBROOT
|
||||
# * LIBRARY_PATH
|
||||
#
|
||||
# This module will set the following variables:
|
||||
#
|
||||
# * TBB_FOUND - Set to false, or undefined, if we haven’t found, or
|
||||
# don’t want to use TBB.
|
||||
# * TBB_<component>_FOUND - If False, optional <component> part of TBB sytem is
|
||||
# not available.
|
||||
# * TBB_VERSION - The full version string
|
||||
# * TBB_VERSION_MAJOR - The major version
|
||||
# * TBB_VERSION_MINOR - The minor version
|
||||
# * TBB_INTERFACE_VERSION - The interface version number defined in
|
||||
# tbb/tbb_stddef.h.
|
||||
# * TBB_<library>_LIBRARY_RELEASE - The path of the TBB release version of
|
||||
# <library>, where <library> may be tbb, tbb_debug,
|
||||
# tbbmalloc, tbbmalloc_debug, tbb_preview, or
|
||||
# tbb_preview_debug.
|
||||
# * TBB_<library>_LIBRARY_DEGUG - The path of the TBB release version of
|
||||
# <library>, where <library> may be tbb, tbb_debug,
|
||||
# tbbmalloc, tbbmalloc_debug, tbb_preview, or
|
||||
# tbb_preview_debug.
|
||||
#
|
||||
# The following varibles should be used to build and link with TBB:
|
||||
#
|
||||
# * TBB_INCLUDE_DIRS - The include directory for TBB.
|
||||
# * TBB_LIBRARIES - The libraries to link against to use TBB.
|
||||
# * TBB_LIBRARIES_RELEASE - The release libraries to link against to use TBB.
|
||||
# * TBB_LIBRARIES_DEBUG - The debug libraries to link against to use TBB.
|
||||
# * TBB_DEFINITIONS - Definitions to use when compiling code that uses
|
||||
# TBB.
|
||||
# * TBB_DEFINITIONS_RELEASE - Definitions to use when compiling release code that
|
||||
# uses TBB.
|
||||
# * TBB_DEFINITIONS_DEBUG - Definitions to use when compiling debug code that
|
||||
# uses TBB.
|
||||
#
|
||||
# This module will also create the "tbb" target that may be used when building
|
||||
# executables and libraries.
|
||||
|
||||
# This module respects
|
||||
# TBB_INSTALL_DIR or $ENV{TBB21_INSTALL_DIR} or $ENV{TBB_INSTALL_DIR}
|
||||
include(FindPackageHandleStandardArgs)
|
||||
|
||||
# This module defines
|
||||
# TBB_INCLUDE_DIRS, where to find task_scheduler_init.h, etc.
|
||||
# TBB_LIBRARY_DIRS, where to find libtbb, libtbbmalloc
|
||||
# TBB_DEBUG_LIBRARY_DIRS, where to find libtbb_debug, libtbbmalloc_debug
|
||||
# TBB_INSTALL_DIR, the base TBB install directory
|
||||
# TBB_LIBRARIES, the libraries to link against to use TBB.
|
||||
# TBB_DEBUG_LIBRARIES, the libraries to link against to use TBB with debug symbols.
|
||||
# TBB_FOUND, If false, don't try to use TBB.
|
||||
# TBB_INTERFACE_VERSION, as defined in tbb/tbb_stddef.h
|
||||
if(NOT TBB_FOUND)
|
||||
|
||||
##################################
|
||||
# Check the build type
|
||||
##################################
|
||||
|
||||
if (WIN32)
|
||||
# has em64t/vc8 em64t/vc9
|
||||
# has ia32/vc7.1 ia32/vc8 ia32/vc9
|
||||
set(_TBB_DEFAULT_INSTALL_DIR "C:/Program Files/Intel/TBB")
|
||||
set(_TBB_LIB_NAME "tbb")
|
||||
set(_TBB_LIB_MALLOC_NAME "${_TBB_LIB_NAME}malloc")
|
||||
set(_TBB_LIB_DEBUG_NAME "${_TBB_LIB_NAME}_debug")
|
||||
set(_TBB_LIB_MALLOC_DEBUG_NAME "${_TBB_LIB_MALLOC_NAME}_debug")
|
||||
if (MSVC71)
|
||||
set (_TBB_COMPILER "vc7.1")
|
||||
set (TBB_COMPILER "vc7.1")
|
||||
endif(MSVC71)
|
||||
if (MSVC80)
|
||||
set(_TBB_COMPILER "vc8")
|
||||
set(TBB_COMPILER "vc8")
|
||||
endif(MSVC80)
|
||||
if (MSVC90)
|
||||
set(_TBB_COMPILER "vc9")
|
||||
set(TBB_COMPILER "vc9")
|
||||
endif(MSVC90)
|
||||
if(MSVC10)
|
||||
set(_TBB_COMPILER "vc10")
|
||||
set(TBB_COMPILER "vc10")
|
||||
endif(MSVC10)
|
||||
if(MSVC11)
|
||||
set(_TBB_COMPILER "vc11")
|
||||
set(TBB_COMPILER "vc11")
|
||||
endif(MSVC11)
|
||||
# Todo: add other Windows compilers such as ICL.
|
||||
if(TBB_ARCHITECTURE)
|
||||
set(_TBB_ARCHITECTURE ${TBB_ARCHITECTURE})
|
||||
elseif("$ENV{TBB_ARCH_PLATFORM}" STREQUAL "")
|
||||
# Try to guess the architecture
|
||||
if(CMAKE_CL_64)
|
||||
set(_TBB_ARCHITECTURE intel64)
|
||||
set(TBB_ARCHITECTURE intel64)
|
||||
else()
|
||||
set(_TBB_ARCHITECTURE ia32)
|
||||
set(TBB_ARCHITECTURE ia32)
|
||||
endif()
|
||||
endif()
|
||||
endif (WIN32)
|
||||
if(NOT DEFINED TBB_USE_DEBUG_BUILD)
|
||||
if(CMAKE_BUILD_TYPE MATCHES "(Debug|DEBUG|debug|RelWithDebInfo|RELWITHDEBINFO|relwithdebinfo)")
|
||||
set(TBB_BUILD_TYPE DEBUG)
|
||||
else()
|
||||
set(TBB_BUILD_TYPE RELEASE)
|
||||
endif()
|
||||
elseif(TBB_USE_DEBUG_BUILD)
|
||||
set(TBB_BUILD_TYPE DEBUG)
|
||||
else()
|
||||
set(TBB_BUILD_TYPE RELEASE)
|
||||
endif()
|
||||
|
||||
if (UNIX)
|
||||
if (APPLE)
|
||||
# MAC
|
||||
set(_TBB_DEFAULT_INSTALL_DIR "/Library/Frameworks/Intel_TBB.framework/Versions")
|
||||
# libs: libtbb.dylib, libtbbmalloc.dylib, *_debug
|
||||
set(_TBB_LIB_NAME "tbb")
|
||||
set(_TBB_LIB_MALLOC_NAME "${_TBB_LIB_NAME}malloc")
|
||||
set(_TBB_LIB_DEBUG_NAME "${_TBB_LIB_NAME}_debug")
|
||||
set(_TBB_LIB_MALLOC_DEBUG_NAME "${_TBB_LIB_MALLOC_NAME}_debug")
|
||||
# default flavor on apple: ia32/cc4.0.1_os10.4.9
|
||||
# Jiri: There is no reason to presume there is only one flavor and
|
||||
# that user's setting of variables should be ignored.
|
||||
if(NOT TBB_COMPILER)
|
||||
set(_TBB_COMPILER "cc4.0.1_os10.4.9")
|
||||
elseif (NOT TBB_COMPILER)
|
||||
set(_TBB_COMPILER ${TBB_COMPILER})
|
||||
endif(NOT TBB_COMPILER)
|
||||
if(NOT TBB_ARCHITECTURE)
|
||||
set(_TBB_ARCHITECTURE "ia32")
|
||||
elseif(NOT TBB_ARCHITECTURE)
|
||||
set(_TBB_ARCHITECTURE ${TBB_ARCHITECTURE})
|
||||
endif(NOT TBB_ARCHITECTURE)
|
||||
else (APPLE)
|
||||
# LINUX
|
||||
set(_TBB_DEFAULT_INSTALL_DIR "/opt/intel/tbb" "/usr/local/include" "/usr/include")
|
||||
set(_TBB_LIB_NAME "tbb")
|
||||
set(_TBB_LIB_MALLOC_NAME "${_TBB_LIB_NAME}malloc")
|
||||
set(_TBB_LIB_DEBUG_NAME "${_TBB_LIB_NAME}_debug")
|
||||
set(_TBB_LIB_MALLOC_DEBUG_NAME "${_TBB_LIB_MALLOC_NAME}_debug")
|
||||
# has em64t/cc3.2.3_libc2.3.2_kernel2.4.21 em64t/cc3.3.3_libc2.3.3_kernel2.6.5 em64t/cc3.4.3_libc2.3.4_kernel2.6.9 em64t/cc4.1.0_libc2.4_kernel2.6.16.21
|
||||
# has ia32/*
|
||||
# has itanium/*
|
||||
set(_TBB_COMPILER ${TBB_COMPILER})
|
||||
set(_TBB_ARCHITECTURE ${TBB_ARCHITECTURE})
|
||||
endif (APPLE)
|
||||
endif (UNIX)
|
||||
##################################
|
||||
# Set the TBB search directories
|
||||
##################################
|
||||
|
||||
if (CMAKE_SYSTEM MATCHES "SunOS.*")
|
||||
# SUN
|
||||
# not yet supported
|
||||
# has em64t/cc3.4.3_kernel5.10
|
||||
# has ia32/*
|
||||
endif (CMAKE_SYSTEM MATCHES "SunOS.*")
|
||||
# Define search paths based on user input and environment variables
|
||||
set(TBB_SEARCH_DIR ${TBB_ROOT_DIR} $ENV{TBB_INSTALL_DIR} $ENV{TBBROOT})
|
||||
|
||||
# Define the search directories based on the current platform
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
|
||||
set(TBB_DEFAULT_SEARCH_DIR "C:/Program Files/Intel/TBB"
|
||||
"C:/Program Files (x86)/Intel/TBB")
|
||||
|
||||
#-- Clear the public variables
|
||||
set (TBB_FOUND "NO")
|
||||
# Set the target architecture
|
||||
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
set(TBB_ARCHITECTURE "intel64")
|
||||
else()
|
||||
set(TBB_ARCHITECTURE "ia32")
|
||||
endif()
|
||||
|
||||
# Set the TBB search library path search suffix based on the version of VC
|
||||
if(WINDOWS_STORE)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc11_ui")
|
||||
elseif(MSVC14)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc14")
|
||||
elseif(MSVC12)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc12")
|
||||
elseif(MSVC11)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc11")
|
||||
elseif(MSVC10)
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc10")
|
||||
endif()
|
||||
|
||||
#-- Find TBB install dir and set ${_TBB_INSTALL_DIR} and cached ${TBB_INSTALL_DIR}
|
||||
# first: use CMake variable TBB_INSTALL_DIR
|
||||
if (TBB_INSTALL_DIR)
|
||||
set (_TBB_INSTALL_DIR ${TBB_INSTALL_DIR})
|
||||
endif (TBB_INSTALL_DIR)
|
||||
# second: use environment variable
|
||||
if (NOT _TBB_INSTALL_DIR)
|
||||
if (NOT "$ENV{TBB_INSTALL_DIR}" STREQUAL "")
|
||||
set (_TBB_INSTALL_DIR $ENV{TBB_INSTALL_DIR})
|
||||
endif (NOT "$ENV{TBB_INSTALL_DIR}" STREQUAL "")
|
||||
# Intel recommends setting TBB21_INSTALL_DIR
|
||||
if (NOT "$ENV{TBB21_INSTALL_DIR}" STREQUAL "")
|
||||
set (_TBB_INSTALL_DIR $ENV{TBB21_INSTALL_DIR})
|
||||
endif (NOT "$ENV{TBB21_INSTALL_DIR}" STREQUAL "")
|
||||
if (NOT "$ENV{TBB22_INSTALL_DIR}" STREQUAL "")
|
||||
set (_TBB_INSTALL_DIR $ENV{TBB22_INSTALL_DIR})
|
||||
endif (NOT "$ENV{TBB22_INSTALL_DIR}" STREQUAL "")
|
||||
if (NOT "$ENV{TBB30_INSTALL_DIR}" STREQUAL "")
|
||||
set (_TBB_INSTALL_DIR $ENV{TBB30_INSTALL_DIR})
|
||||
endif (NOT "$ENV{TBB30_INSTALL_DIR}" STREQUAL "")
|
||||
endif (NOT _TBB_INSTALL_DIR)
|
||||
# third: try to find path automatically
|
||||
if (NOT _TBB_INSTALL_DIR)
|
||||
if (_TBB_DEFAULT_INSTALL_DIR)
|
||||
set (_TBB_INSTALL_DIR ${_TBB_DEFAULT_INSTALL_DIR})
|
||||
endif (_TBB_DEFAULT_INSTALL_DIR)
|
||||
endif (NOT _TBB_INSTALL_DIR)
|
||||
# sanity check
|
||||
if (NOT _TBB_INSTALL_DIR)
|
||||
message (STATUS "TBB: Unable to find Intel TBB install directory. ${_TBB_INSTALL_DIR}")
|
||||
else (NOT _TBB_INSTALL_DIR)
|
||||
# finally: set the cached CMake variable TBB_INSTALL_DIR
|
||||
if (NOT TBB_INSTALL_DIR)
|
||||
set (TBB_INSTALL_DIR ${_TBB_INSTALL_DIR} CACHE PATH "Intel TBB install directory")
|
||||
mark_as_advanced(TBB_INSTALL_DIR)
|
||||
endif (NOT TBB_INSTALL_DIR)
|
||||
# Add the library path search suffix for the VC independent version of TBB
|
||||
list(APPEND TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc_mt")
|
||||
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
|
||||
# OS X
|
||||
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb")
|
||||
|
||||
#-- A macro to rewrite the paths of the library. This is necessary, because
|
||||
# find_library() always found the em64t/vc9 version of the TBB libs
|
||||
macro(TBB_CORRECT_LIB_DIR var_name)
|
||||
# if (NOT "${_TBB_ARCHITECTURE}" STREQUAL "em64t")
|
||||
string(REPLACE em64t "${_TBB_ARCHITECTURE}" ${var_name} ${${var_name}})
|
||||
# endif (NOT "${_TBB_ARCHITECTURE}" STREQUAL "em64t")
|
||||
string(REPLACE ia32 "${_TBB_ARCHITECTURE}" ${var_name} ${${var_name}})
|
||||
string(REPLACE vc7.1 "${_TBB_COMPILER}" ${var_name} ${${var_name}})
|
||||
string(REPLACE vc8 "${_TBB_COMPILER}" ${var_name} ${${var_name}})
|
||||
string(REPLACE vc9 "${_TBB_COMPILER}" ${var_name} ${${var_name}})
|
||||
string(REPLACE vc10 "${_TBB_COMPILER}" ${var_name} ${${var_name}})
|
||||
string(REPLACE vc11 "${_TBB_COMPILER}" ${var_name} ${${var_name}})
|
||||
endmacro(TBB_CORRECT_LIB_DIR var_content)
|
||||
# TODO: Check to see which C++ library is being used by the compiler.
|
||||
if(NOT ${CMAKE_SYSTEM_VERSION} VERSION_LESS 13.0)
|
||||
# The default C++ library on OS X 10.9 and later is libc++
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/libc++" "lib")
|
||||
else()
|
||||
set(TBB_LIB_PATH_SUFFIX "lib")
|
||||
endif()
|
||||
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
# Linux
|
||||
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb")
|
||||
|
||||
# TODO: Check compiler version to see the suffix should be <arch>/gcc4.1 or
|
||||
# <arch>/gcc4.1. For now, assume that the compiler is more recent than
|
||||
# gcc 4.4.x or later.
|
||||
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/intel64/gcc4.4")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$")
|
||||
set(TBB_LIB_PATH_SUFFIX "lib/ia32/gcc4.4")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
#-- Look for include directory and set ${TBB_INCLUDE_DIR}
|
||||
set (TBB_INC_SEARCH_DIR ${_TBB_INSTALL_DIR}/include)
|
||||
# Jiri: tbbvars now sets the CPATH environment variable to the directory
|
||||
# containing the headers.
|
||||
find_path(TBB_INCLUDE_DIR
|
||||
tbb/task_scheduler_init.h
|
||||
PATHS ${TBB_INC_SEARCH_DIR} ENV CPATH
|
||||
)
|
||||
mark_as_advanced(TBB_INCLUDE_DIR)
|
||||
##################################
|
||||
# Find the TBB include dir
|
||||
##################################
|
||||
|
||||
find_path(TBB_INCLUDE_DIRS tbb/tbb.h
|
||||
HINTS ${TBB_INCLUDE_DIR} ${TBB_SEARCH_DIR}
|
||||
PATHS ${TBB_DEFAULT_SEARCH_DIR}
|
||||
PATH_SUFFIXES include)
|
||||
|
||||
#-- Look for libraries
|
||||
# GvdB: $ENV{TBB_ARCH_PLATFORM} is set by the build script tbbvars[.bat|.sh|.csh]
|
||||
if (NOT $ENV{TBB_ARCH_PLATFORM} STREQUAL "")
|
||||
set (_TBB_LIBRARY_DIR
|
||||
${_TBB_INSTALL_DIR}/lib/$ENV{TBB_ARCH_PLATFORM}
|
||||
${_TBB_INSTALL_DIR}/$ENV{TBB_ARCH_PLATFORM}/lib
|
||||
)
|
||||
endif (NOT $ENV{TBB_ARCH_PLATFORM} STREQUAL "")
|
||||
# Jiri: This block isn't mutually exclusive with the previous one
|
||||
# (hence no else), instead I test if the user really specified
|
||||
# the variables in question.
|
||||
if ((NOT ${TBB_ARCHITECTURE} STREQUAL "") AND (NOT ${TBB_COMPILER} STREQUAL ""))
|
||||
# HH: deprecated
|
||||
message(STATUS "[Warning] FindTBB.cmake: The use of TBB_ARCHITECTURE and TBB_COMPILER is deprecated and may not be supported in future versions. Please set \$ENV{TBB_ARCH_PLATFORM} (using tbbvars.[bat|csh|sh]).")
|
||||
# Jiri: It doesn't hurt to look in more places, so I store the hints from
|
||||
# ENV{TBB_ARCH_PLATFORM} and the TBB_ARCHITECTURE and TBB_COMPILER
|
||||
# variables and search them both.
|
||||
set (
|
||||
_TBB_LIBRARY_DIR "${_TBB_INSTALL_DIR}/${_TBB_ARCHITECTURE}/${_TBB_COMPILER}/lib" ${_TBB_LIBRARY_DIR}
|
||||
_TBB_LIBRARY_DIR "${_TBB_INSTALL_DIR}/lib/${_TBB_ARCHITECTURE}/${_TBB_COMPILER}" ${_TBB_LIBRARY_DIR}
|
||||
)
|
||||
endif ((NOT ${TBB_ARCHITECTURE} STREQUAL "") AND (NOT ${TBB_COMPILER} STREQUAL ""))
|
||||
##################################
|
||||
# Set version strings
|
||||
##################################
|
||||
|
||||
# GvdB: Mac OS X distribution places libraries directly in lib directory.
|
||||
list(APPEND _TBB_LIBRARY_DIR ${_TBB_INSTALL_DIR}/lib)
|
||||
if(TBB_INCLUDE_DIRS)
|
||||
file(READ "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h" _tbb_version_file)
|
||||
string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1"
|
||||
TBB_VERSION_MAJOR "${_tbb_version_file}")
|
||||
string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1"
|
||||
TBB_VERSION_MINOR "${_tbb_version_file}")
|
||||
string(REGEX REPLACE ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1"
|
||||
TBB_INTERFACE_VERSION "${_tbb_version_file}")
|
||||
set(TBB_VERSION "${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}")
|
||||
endif()
|
||||
|
||||
# Jiri: No reason not to check the default paths. From recent versions,
|
||||
# tbbvars has started exporting the LIBRARY_PATH and LD_LIBRARY_PATH
|
||||
# variables, which now point to the directories of the lib files.
|
||||
# It all makes more sense to use the ${_TBB_LIBRARY_DIR} as a HINTS
|
||||
# argument instead of the implicit PATHS as it isn't hard-coded
|
||||
# but computed by system introspection. Searching the LIBRARY_PATH
|
||||
# and LD_LIBRARY_PATH environment variables is now even more important
|
||||
# that tbbvars doesn't export TBB_ARCH_PLATFORM and it facilitates
|
||||
# the use of TBB built from sources.
|
||||
find_library(TBB_LIBRARY ${_TBB_LIB_NAME} HINTS ${_TBB_LIBRARY_DIR}
|
||||
PATHS ENV LIBRARY_PATH ENV LD_LIBRARY_PATH)
|
||||
find_library(TBB_MALLOC_LIBRARY ${_TBB_LIB_MALLOC_NAME} HINTS ${_TBB_LIBRARY_DIR}
|
||||
PATHS ENV LIBRARY_PATH ENV LD_LIBRARY_PATH)
|
||||
##################################
|
||||
# Find TBB components
|
||||
##################################
|
||||
|
||||
#Extract path from TBB_LIBRARY name
|
||||
get_filename_component(TBB_LIBRARY_DIR ${TBB_LIBRARY} PATH)
|
||||
if(TBB_VERSION VERSION_LESS 4.3)
|
||||
set(TBB_SEARCH_COMPOMPONENTS tbb_preview tbbmalloc tbb)
|
||||
else()
|
||||
set(TBB_SEARCH_COMPOMPONENTS tbb_preview tbbmalloc_proxy tbbmalloc tbb)
|
||||
endif()
|
||||
|
||||
#TBB_CORRECT_LIB_DIR(TBB_LIBRARY)
|
||||
#TBB_CORRECT_LIB_DIR(TBB_MALLOC_LIBRARY)
|
||||
mark_as_advanced(TBB_LIBRARY TBB_MALLOC_LIBRARY)
|
||||
# Find each component
|
||||
foreach(_comp ${TBB_SEARCH_COMPOMPONENTS})
|
||||
if(";${TBB_FIND_COMPONENTS};tbb;" MATCHES ";${_comp};")
|
||||
|
||||
#-- Look for debug libraries
|
||||
# Jiri: Changed the same way as for the release libraries.
|
||||
find_library(TBB_LIBRARY_DEBUG ${_TBB_LIB_DEBUG_NAME} HINTS ${_TBB_LIBRARY_DIR}
|
||||
PATHS ENV LIBRARY_PATH ENV LD_LIBRARY_PATH)
|
||||
find_library(TBB_MALLOC_LIBRARY_DEBUG ${_TBB_LIB_MALLOC_DEBUG_NAME} HINTS ${_TBB_LIBRARY_DIR}
|
||||
PATHS ENV LIBRARY_PATH ENV LD_LIBRARY_PATH)
|
||||
# Search for the libraries
|
||||
find_library(TBB_${_comp}_LIBRARY_RELEASE ${_comp}
|
||||
HINTS ${TBB_LIBRARY} ${TBB_SEARCH_DIR}
|
||||
PATHS ${TBB_DEFAULT_SEARCH_DIR} ENV LIBRARY_PATH
|
||||
PATH_SUFFIXES ${TBB_LIB_PATH_SUFFIX})
|
||||
|
||||
# Jiri: Self-built TBB stores the debug libraries in a separate directory.
|
||||
# Extract path from TBB_LIBRARY_DEBUG name
|
||||
get_filename_component(TBB_LIBRARY_DEBUG_DIR ${TBB_LIBRARY_DEBUG} PATH)
|
||||
find_library(TBB_${_comp}_LIBRARY_DEBUG ${_comp}_debug
|
||||
HINTS ${TBB_LIBRARY} ${TBB_SEARCH_DIR}
|
||||
PATHS ${TBB_DEFAULT_SEARCH_DIR} ENV LIBRARY_PATH
|
||||
PATH_SUFFIXES ${TBB_LIB_PATH_SUFFIX})
|
||||
|
||||
#TBB_CORRECT_LIB_DIR(TBB_LIBRARY_DEBUG)
|
||||
#TBB_CORRECT_LIB_DIR(TBB_MALLOC_LIBRARY_DEBUG)
|
||||
mark_as_advanced(TBB_LIBRARY_DEBUG TBB_MALLOC_LIBRARY_DEBUG)
|
||||
if(TBB_${_comp}_LIBRARY_DEBUG)
|
||||
list(APPEND TBB_LIBRARIES_DEBUG "${TBB_${_comp}_LIBRARY_DEBUG}")
|
||||
endif()
|
||||
if(TBB_${_comp}_LIBRARY_RELEASE)
|
||||
list(APPEND TBB_LIBRARIES_RELEASE "${TBB_${_comp}_LIBRARY_RELEASE}")
|
||||
endif()
|
||||
if(TBB_${_comp}_LIBRARY_${TBB_BUILD_TYPE} AND NOT TBB_${_comp}_LIBRARY)
|
||||
set(TBB_${_comp}_LIBRARY "${TBB_${_comp}_LIBRARY_${TBB_BUILD_TYPE}}")
|
||||
endif()
|
||||
|
||||
if(TBB_${_comp}_LIBRARY AND EXISTS "${TBB_${_comp}_LIBRARY}")
|
||||
set(TBB_${_comp}_FOUND TRUE)
|
||||
else()
|
||||
set(TBB_${_comp}_FOUND FALSE)
|
||||
endif()
|
||||
|
||||
if (TBB_INCLUDE_DIR)
|
||||
if (TBB_LIBRARY)
|
||||
set (TBB_FOUND "YES")
|
||||
set (TBB_LIBRARIES ${TBB_LIBRARY} ${TBB_MALLOC_LIBRARY} ${TBB_LIBRARIES})
|
||||
set (TBB_DEBUG_LIBRARIES ${TBB_LIBRARY_DEBUG} ${TBB_MALLOC_LIBRARY_DEBUG} ${TBB_DEBUG_LIBRARIES})
|
||||
set (TBB_INCLUDE_DIRS ${TBB_INCLUDE_DIR} CACHE PATH "TBB include directory" FORCE)
|
||||
set (TBB_LIBRARY_DIRS ${TBB_LIBRARY_DIR} CACHE PATH "TBB library directory" FORCE)
|
||||
# Jiri: Self-built TBB stores the debug libraries in a separate directory.
|
||||
set (TBB_DEBUG_LIBRARY_DIRS ${TBB_LIBRARY_DEBUG_DIR} CACHE PATH "TBB debug library directory" FORCE)
|
||||
mark_as_advanced(TBB_INCLUDE_DIRS TBB_LIBRARY_DIRS TBB_DEBUG_LIBRARY_DIRS TBB_LIBRARIES TBB_DEBUG_LIBRARIES)
|
||||
message(STATUS "Found Intel TBB")
|
||||
endif (TBB_LIBRARY)
|
||||
endif (TBB_INCLUDE_DIR)
|
||||
# Mark internal variables as advanced
|
||||
mark_as_advanced(TBB_${_comp}_LIBRARY_RELEASE)
|
||||
mark_as_advanced(TBB_${_comp}_LIBRARY_DEBUG)
|
||||
mark_as_advanced(TBB_${_comp}_LIBRARY)
|
||||
|
||||
if (NOT TBB_FOUND)
|
||||
message(STATUS "TBB: Intel TBB NOT found!")
|
||||
message(STATUS "TBB: Looked for Threading Building Blocks in ${_TBB_INSTALL_DIR}")
|
||||
# do only throw fatal, if this pkg is REQUIRED
|
||||
if (TBB_FIND_REQUIRED)
|
||||
message(FATAL_ERROR "Could NOT find TBB library.")
|
||||
endif (TBB_FIND_REQUIRED)
|
||||
endif (NOT TBB_FOUND)
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
endif (NOT _TBB_INSTALL_DIR)
|
||||
##################################
|
||||
# Set compile flags and libraries
|
||||
##################################
|
||||
|
||||
if (TBB_FOUND)
|
||||
set(TBB_INTERFACE_VERSION 0)
|
||||
FILE(READ "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h" _TBB_VERSION_CONTENTS)
|
||||
STRING(REGEX REPLACE ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1" TBB_INTERFACE_VERSION "${_TBB_VERSION_CONTENTS}")
|
||||
set(TBB_INTERFACE_VERSION "${TBB_INTERFACE_VERSION}")
|
||||
endif (TBB_FOUND)
|
||||
set(TBB_DEFINITIONS_RELEASE "")
|
||||
set(TBB_DEFINITIONS_DEBUG "-DTBB_USE_DEBUG=1")
|
||||
|
||||
if(TBB_LIBRARIES_${TBB_BUILD_TYPE})
|
||||
set(TBB_DEFINITIONS "${TBB_DEFINITIONS_${TBB_BUILD_TYPE}}")
|
||||
set(TBB_LIBRARIES "${TBB_LIBRARIES_${TBB_BUILD_TYPE}}")
|
||||
elseif(TBB_LIBRARIES_RELEASE)
|
||||
set(TBB_DEFINITIONS "${TBB_DEFINITIONS_RELEASE}")
|
||||
set(TBB_LIBRARIES "${TBB_LIBRARIES_RELEASE}")
|
||||
elseif(TBB_LIBRARIES_DEBUG)
|
||||
set(TBB_DEFINITIONS "${TBB_DEFINITIONS_DEBUG}")
|
||||
set(TBB_LIBRARIES "${TBB_LIBRARIES_DEBUG}")
|
||||
endif()
|
||||
|
||||
find_package_handle_standard_args(TBB
|
||||
REQUIRED_VARS TBB_INCLUDE_DIRS TBB_LIBRARIES
|
||||
HANDLE_COMPONENTS
|
||||
VERSION_VAR TBB_VERSION)
|
||||
|
||||
##################################
|
||||
# Create targets
|
||||
##################################
|
||||
|
||||
if(NOT CMAKE_VERSION VERSION_LESS 3.0 AND TBB_FOUND)
|
||||
# Start fix to support different targets for tbb, tbbmalloc, etc.
|
||||
# (Jose Luis Blanco, Jan 2019)
|
||||
# Iterate over tbb, tbbmalloc, etc.
|
||||
foreach(libname ${TBB_SEARCH_COMPOMPONENTS})
|
||||
if ((NOT TBB_${libname}_LIBRARY_RELEASE) AND (NOT TBB_${libname}_LIBRARY_DEBUG))
|
||||
continue()
|
||||
endif()
|
||||
|
||||
add_library(${libname} SHARED IMPORTED)
|
||||
|
||||
set_target_properties(${libname} PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${TBB_INCLUDE_DIRS}
|
||||
IMPORTED_LOCATION ${TBB_${libname}_LIBRARY_RELEASE})
|
||||
if(TBB_${libname}_LIBRARY_RELEASE AND TBB_${libname}_LIBRARY_DEBUG)
|
||||
set_target_properties(${libname} PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS "$<$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>:TBB_USE_DEBUG=1>"
|
||||
IMPORTED_LOCATION_DEBUG ${TBB_${libname}_LIBRARY_DEBUG}
|
||||
IMPORTED_LOCATION_RELWITHDEBINFO ${TBB_${libname}_LIBRARY_DEBUG}
|
||||
IMPORTED_LOCATION_RELEASE ${TBB_${libname}_LIBRARY_RELEASE}
|
||||
IMPORTED_LOCATION_MINSIZEREL ${TBB_${libname}_LIBRARY_RELEASE}
|
||||
)
|
||||
elseif(TBB_${libname}_LIBRARY_RELEASE)
|
||||
set_target_properties(${libname} PROPERTIES IMPORTED_LOCATION ${TBB_${libname}_LIBRARY_RELEASE})
|
||||
else()
|
||||
set_target_properties(${libname} PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS "${TBB_DEFINITIONS_DEBUG}"
|
||||
IMPORTED_LOCATION ${TBB_${libname}_LIBRARY_DEBUG}
|
||||
)
|
||||
endif()
|
||||
endforeach()
|
||||
# End of fix to support different targets
|
||||
endif()
|
||||
|
||||
mark_as_advanced(TBB_INCLUDE_DIRS TBB_LIBRARIES)
|
||||
|
||||
unset(TBB_ARCHITECTURE)
|
||||
unset(TBB_BUILD_TYPE)
|
||||
unset(TBB_LIB_PATH_SUFFIX)
|
||||
unset(TBB_DEFAULT_SEARCH_DIR)
|
||||
|
||||
endif()
|
||||
|
|
|
|||
|
|
@ -0,0 +1,27 @@
|
|||
###############################################################################
|
||||
# Macro:
|
||||
#
|
||||
# gtsamAddPch(precompiledHeader precompiledSource sources)
|
||||
#
|
||||
# Adds a precompiled header to compile all sources with. Currently only on MSVC.
|
||||
# Inspired by https://stackoverflow.com/questions/148570/
|
||||
#
|
||||
# Arguments:
|
||||
# precompiledHeader: the header file that includes headers to be precompiled.
|
||||
# precompiledSource: the source file that simply includes that header above.
|
||||
# sources: the list of source files to apply this to.
|
||||
#
|
||||
macro(gtsamAddPch precompiledHeader precompiledSource sources)
|
||||
get_filename_component(pchBasename ${precompiledHeader} NAME_WE)
|
||||
SET(precompiledBinary "${CMAKE_CURRENT_BINARY_DIR}/${pchBasename}.pch")
|
||||
IF(MSVC)
|
||||
message(STATUS "Adding precompiled header for MSVC")
|
||||
set_source_files_properties(${precompiledSource}
|
||||
PROPERTIES COMPILE_FLAGS "/Yc\"${precompiledHeader}\" /Fp\"${precompiledBinary}\""
|
||||
OBJECT_OUTPUTS "${precompiledBinary}")
|
||||
set_source_files_properties(${sources}
|
||||
PROPERTIES COMPILE_FLAGS "/Yu\"${precompiledHeader}\" /FI\"${precompiledHeader}\" /Fp\"${precompiledBinary}\""
|
||||
OBJECT_DEPENDS "${precompiledBinary}")
|
||||
ENDIF(MSVC)
|
||||
endmacro(gtsamAddPch)
|
||||
|
||||
|
|
@ -90,6 +90,12 @@ if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
|
|||
endif()
|
||||
endif()
|
||||
|
||||
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" ON)
|
||||
if(GTSAM_BUILD_WITH_MARCH_NATIVE)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
|
||||
endif()
|
||||
|
||||
# Set up build type library postfixes
|
||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
||||
foreach(build_type Debug Timing Profiling RelWithDebInfo MinSizeRel)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,261 @@
|
|||
# Check Cython version, need to be >=0.25.2
|
||||
# Unset these cached variables to avoid surprises when the python/cython
|
||||
# in the current environment are different from the cached!
|
||||
unset(PYTHON_EXECUTABLE CACHE)
|
||||
unset(CYTHON_EXECUTABLE CACHE)
|
||||
unset(PYTHON_INCLUDE_DIR CACHE)
|
||||
unset(PYTHON_MAJOR_VERSION CACHE)
|
||||
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
find_package(PythonInterp REQUIRED)
|
||||
find_package(PythonLibs REQUIRED)
|
||||
else()
|
||||
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
|
||||
find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
|
||||
endif()
|
||||
find_package(Cython 0.25.2 REQUIRED)
|
||||
|
||||
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||
"from __future__ import print_function;import sys;print(sys.version[0], end='')"
|
||||
OUTPUT_VARIABLE PYTHON_MAJOR_VERSION
|
||||
)
|
||||
|
||||
# User-friendly Cython wrapping and installing function.
|
||||
# Builds a Cython module from the provided interface_header.
|
||||
# For example, for the interface header gtsam.h,
|
||||
# this will build the wrap module 'gtsam'.
|
||||
#
|
||||
# Arguments:
|
||||
#
|
||||
# interface_header: The relative path to the wrapper interface definition file.
|
||||
# extra_imports: extra header to import in the Cython pxd file.
|
||||
# For example, to use Cython gtsam.pxd in your own module,
|
||||
# use "from gtsam cimport *"
|
||||
# install_path: destination to install the library
|
||||
# libs: libraries to link with
|
||||
# dependencies: Dependencies which need to be built before the wrapper
|
||||
function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
|
||||
# Paths for generated files
|
||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
||||
set(generated_files_path "${PROJECT_BINARY_DIR}/cython/${module_name}")
|
||||
wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
|
||||
install_cython_wrapped_library("${interface_header}" "${generated_files_path}" "${install_path}")
|
||||
endfunction()
|
||||
|
||||
function(set_up_required_cython_packages)
|
||||
# Set up building of cython module
|
||||
include_directories(${PYTHON_INCLUDE_DIRS})
|
||||
find_package(NumPy REQUIRED)
|
||||
include_directories(${NUMPY_INCLUDE_DIRS})
|
||||
endfunction()
|
||||
|
||||
|
||||
# Convert pyx to cpp by executing cython
|
||||
# This is the first step to compile cython from the command line
|
||||
# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html
|
||||
#
|
||||
# Arguments:
|
||||
# - target: The specified target for this step
|
||||
# - pyx_file: The input pyx_file in full *absolute* path
|
||||
# - generated_cpp: The output cpp file in full absolute path
|
||||
# - include_dirs: Directories to include when executing cython
|
||||
function(pyx_to_cpp target pyx_file generated_cpp include_dirs)
|
||||
foreach(dir ${include_dirs})
|
||||
set(includes_for_cython ${includes_for_cython} -I ${dir})
|
||||
endforeach()
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${generated_cpp}
|
||||
COMMAND
|
||||
${CYTHON_EXECUTABLE} -X boundscheck=False -v --fast-fail --cplus -${PYTHON_MAJOR_VERSION} ${includes_for_cython} ${pyx_file} -o ${generated_cpp}
|
||||
VERBATIM)
|
||||
add_custom_target(${target} ALL DEPENDS ${generated_cpp})
|
||||
endfunction()
|
||||
|
||||
# Build the cpp file generated by converting pyx using cython
|
||||
# This is the second step to compile cython from the command line
|
||||
# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html
|
||||
#
|
||||
# Arguments:
|
||||
# - target: The specified target for this step
|
||||
# - cpp_file: The input cpp_file in full *absolute* path
|
||||
# - output_lib_we: The output lib filename only (without extension)
|
||||
# - output_dir: The output directory
|
||||
function(build_cythonized_cpp target cpp_file output_lib_we output_dir)
|
||||
add_library(${target} MODULE ${cpp_file})
|
||||
if(APPLE)
|
||||
set(link_flags "-undefined dynamic_lookup")
|
||||
endif()
|
||||
set_target_properties(${target} PROPERTIES COMPILE_FLAGS "-w" LINK_FLAGS "${link_flags}"
|
||||
OUTPUT_NAME ${output_lib_we} PREFIX "" LIBRARY_OUTPUT_DIRECTORY ${output_dir})
|
||||
endfunction()
|
||||
|
||||
# Cythonize a pyx from the command line as described at
|
||||
# http://cython.readthedocs.io/en/latest/src/reference/compilation.html
|
||||
# Arguments:
|
||||
# - target: The specified target
|
||||
# - pyx_file: The input pyx_file in full *absolute* path
|
||||
# - output_lib_we: The output lib filename only (without extension)
|
||||
# - output_dir: The output directory
|
||||
# - include_dirs: Directories to include when executing cython
|
||||
# - libs: Libraries to link with
|
||||
# - interface_header: For dependency. Any update in interface header will re-trigger cythonize
|
||||
function(cythonize target pyx_file output_lib_we output_dir include_dirs libs interface_header dependencies)
|
||||
get_filename_component(pyx_path "${pyx_file}" DIRECTORY)
|
||||
get_filename_component(pyx_name "${pyx_file}" NAME_WE)
|
||||
set(generated_cpp "${output_dir}/${pyx_name}.cpp")
|
||||
|
||||
set_up_required_cython_packages()
|
||||
pyx_to_cpp(${target}_pyx2cpp ${pyx_file} ${generated_cpp} "${include_dirs}")
|
||||
|
||||
# Late dependency injection, to make sure this gets called whenever the interface header is updated
|
||||
# See: https://stackoverflow.com/questions/40032593/cmake-does-not-rebuild-dependent-after-prerequisite-changes
|
||||
add_custom_command(OUTPUT ${generated_cpp} DEPENDS ${interface_header} ${pyx_file} APPEND)
|
||||
if (NOT "${dependencies}" STREQUAL "")
|
||||
add_dependencies(${target}_pyx2cpp "${dependencies}")
|
||||
endif()
|
||||
|
||||
build_cythonized_cpp(${target} ${generated_cpp} ${output_lib_we} ${output_dir})
|
||||
if (NOT "${libs}" STREQUAL "")
|
||||
target_link_libraries(${target} "${libs}")
|
||||
endif()
|
||||
add_dependencies(${target} ${target}_pyx2cpp)
|
||||
endfunction()
|
||||
|
||||
# Internal function that wraps a library and compiles the wrapper
|
||||
function(wrap_library_cython interface_header generated_files_path extra_imports libs dependencies)
|
||||
# Wrap codegen interface
|
||||
# Extract module path and name from interface header file name
|
||||
# wrap requires interfacePath to be *absolute*
|
||||
get_filename_component(interface_header "${interface_header}" ABSOLUTE)
|
||||
get_filename_component(module_path "${interface_header}" PATH)
|
||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
||||
|
||||
# Wrap module to Cython pyx
|
||||
message(STATUS "Cython wrapper generating ${module_name}.pyx")
|
||||
set(generated_pyx "${generated_files_path}/${module_name}.pyx")
|
||||
file(MAKE_DIRECTORY "${generated_files_path}")
|
||||
add_custom_command(
|
||||
OUTPUT ${generated_pyx}
|
||||
DEPENDS ${interface_header} wrap
|
||||
COMMAND
|
||||
wrap --cython ${module_path} ${module_name} ${generated_files_path} "${extra_imports}"
|
||||
VERBATIM
|
||||
WORKING_DIRECTORY ${generated_files_path}/../)
|
||||
add_custom_target(cython_wrap_${module_name}_pyx ALL DEPENDS ${generated_pyx})
|
||||
if(NOT "${dependencies}" STREQUAL "")
|
||||
add_dependencies(cython_wrap_${module_name}_pyx ${dependencies})
|
||||
endif()
|
||||
|
||||
message(STATUS "Cythonize and build ${module_name}.pyx")
|
||||
get_property(include_dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES)
|
||||
cythonize(cythonize_${module_name} ${generated_pyx} ${module_name}
|
||||
${generated_files_path} "${include_dirs}" "${libs}" ${interface_header} cython_wrap_${module_name}_pyx)
|
||||
|
||||
# distclean
|
||||
add_custom_target(wrap_${module_name}_cython_distclean
|
||||
COMMAND cmake -E remove_directory ${generated_files_path})
|
||||
endfunction()
|
||||
|
||||
# Internal function that installs a wrap toolbox
|
||||
function(install_cython_wrapped_library interface_header generated_files_path install_path)
|
||||
get_filename_component(module_name "${interface_header}" NAME_WE)
|
||||
|
||||
# NOTE: only installs .pxd and .pyx and binary files (not .cpp) - the trailing slash on the directory name
|
||||
# here prevents creating the top-level module name directory in the destination.
|
||||
message(STATUS "Installing Cython Toolbox to ${install_path}") #${GTSAM_CYTHON_INSTALL_PATH}")
|
||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
||||
string(TOUPPER "${build_type}" build_type_upper)
|
||||
if(${build_type_upper} STREQUAL "RELEASE")
|
||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
||||
else()
|
||||
set(build_type_tag "${build_type}")
|
||||
endif()
|
||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
|
||||
get_filename_component(location "${install_path}" PATH)
|
||||
get_filename_component(name "${install_path}" NAME)
|
||||
install(DIRECTORY "${generated_files_path}/" DESTINATION "${location}/${name}${build_type_tag}"
|
||||
CONFIGURATIONS "${build_type}"
|
||||
PATTERN "build" EXCLUDE
|
||||
PATTERN "CMakeFiles" EXCLUDE
|
||||
PATTERN "Makefile" EXCLUDE
|
||||
PATTERN "*.cmake" EXCLUDE
|
||||
PATTERN "*.cpp" EXCLUDE
|
||||
PATTERN "*.py" EXCLUDE)
|
||||
endforeach()
|
||||
else()
|
||||
install(DIRECTORY "${generated_files_path}/" DESTINATION ${install_path}
|
||||
PATTERN "build" EXCLUDE
|
||||
PATTERN "CMakeFiles" EXCLUDE
|
||||
PATTERN "Makefile" EXCLUDE
|
||||
PATTERN "*.cmake" EXCLUDE
|
||||
PATTERN "*.cpp" EXCLUDE
|
||||
PATTERN "*.py" EXCLUDE)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# Helper function to install Cython scripts and handle multiple build types where the scripts
|
||||
# should be installed to all build type toolboxes
|
||||
#
|
||||
# Arguments:
|
||||
# source_directory: The source directory to be installed. "The last component of each directory
|
||||
# name is appended to the destination directory but a trailing slash may be
|
||||
# used to avoid this because it leaves the last component empty."
|
||||
# (https://cmake.org/cmake/help/v3.3/command/install.html?highlight=install#installing-directories)
|
||||
# dest_directory: The destination directory to install to.
|
||||
# patterns: list of file patterns to install
|
||||
function(install_cython_scripts source_directory dest_directory patterns)
|
||||
set(patterns_args "")
|
||||
set(exclude_patterns "")
|
||||
|
||||
foreach(pattern ${patterns})
|
||||
list(APPEND patterns_args PATTERN "${pattern}")
|
||||
endforeach()
|
||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
||||
string(TOUPPER "${build_type}" build_type_upper)
|
||||
if(${build_type_upper} STREQUAL "RELEASE")
|
||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
||||
else()
|
||||
set(build_type_tag "${build_type}")
|
||||
endif()
|
||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
|
||||
get_filename_component(location "${dest_directory}" PATH)
|
||||
get_filename_component(name "${dest_directory}" NAME)
|
||||
install(DIRECTORY "${source_directory}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}"
|
||||
FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
||||
endforeach()
|
||||
else()
|
||||
install(DIRECTORY "${source_directory}" DESTINATION "${dest_directory}" FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
# Helper function to install specific files and handle multiple build types where the scripts
|
||||
# should be installed to all build type toolboxes
|
||||
#
|
||||
# Arguments:
|
||||
# source_files: The source files to be installed.
|
||||
# dest_directory: The destination directory to install to.
|
||||
function(install_cython_files source_files dest_directory)
|
||||
|
||||
if(GTSAM_BUILD_TYPE_POSTFIXES)
|
||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
||||
string(TOUPPER "${build_type}" build_type_upper)
|
||||
if(${build_type_upper} STREQUAL "RELEASE")
|
||||
set(build_type_tag "") # Don't create release mode tag on installed directory
|
||||
else()
|
||||
set(build_type_tag "${build_type}")
|
||||
endif()
|
||||
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
|
||||
get_filename_component(location "${dest_directory}" PATH)
|
||||
get_filename_component(name "${dest_directory}" NAME)
|
||||
install(FILES "${source_files}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}")
|
||||
endforeach()
|
||||
else()
|
||||
install(FILES "${source_files}" DESTINATION "${dest_directory}")
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
|
|
@ -128,8 +128,7 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex
|
|||
## This needs to be fixed!!
|
||||
if(UNIX AND NOT APPLE)
|
||||
list(APPEND automaticDependencies ${Boost_SERIALIZATION_LIBRARY_RELEASE} ${Boost_FILESYSTEM_LIBRARY_RELEASE}
|
||||
${Boost_SYSTEM_LIBRARY_RELEASE} ${Boost_THREAD_LIBRARY_RELEASE} ${Boost_DATE_TIME_LIBRARY_RELEASE}
|
||||
${Boost_REGEX_LIBRARY_RELEASE})
|
||||
${Boost_SYSTEM_LIBRARY_RELEASE} ${Boost_THREAD_LIBRARY_RELEASE} ${Boost_DATE_TIME_LIBRARY_RELEASE})
|
||||
if(Boost_TIMER_LIBRARY_RELEASE AND NOT GTSAM_DISABLE_NEW_TIMERS) # Only present in Boost >= 1.48.0
|
||||
list(APPEND automaticDependencies ${Boost_TIMER_LIBRARY_RELEASE} ${Boost_CHRONO_LIBRARY_RELEASE})
|
||||
if(GTSAM_MEX_BUILD_STATIC_MODULE)
|
||||
|
|
@ -209,7 +208,7 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex
|
|||
OUTPUT ${generated_cpp_file}
|
||||
DEPENDS ${interfaceHeader} wrap ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects}
|
||||
COMMAND
|
||||
wrap
|
||||
wrap --matlab
|
||||
${modulePath}
|
||||
${moduleName}
|
||||
${generated_files_path}
|
||||
|
|
@ -220,9 +219,9 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex
|
|||
# Set up building of mex module
|
||||
string(REPLACE ";" " " extraMexFlagsSpaced "${extraMexFlags}")
|
||||
string(REPLACE ";" " " mexFlagsSpaced "${GTSAM_BUILD_MEX_BINARY_FLAGS}")
|
||||
add_library(${moduleName}_wrapper MODULE ${generated_cpp_file} ${interfaceHeader} ${otherSourcesAndObjects})
|
||||
target_link_libraries(${moduleName}_wrapper ${correctedOtherLibraries})
|
||||
set_target_properties(${moduleName}_wrapper PROPERTIES
|
||||
add_library(${moduleName}_matlab_wrapper MODULE ${generated_cpp_file} ${interfaceHeader} ${otherSourcesAndObjects})
|
||||
target_link_libraries(${moduleName}_matlab_wrapper ${correctedOtherLibraries})
|
||||
set_target_properties(${moduleName}_matlab_wrapper PROPERTIES
|
||||
OUTPUT_NAME "${moduleName}_wrapper"
|
||||
PREFIX ""
|
||||
SUFFIX ".${mexModuleExt}"
|
||||
|
|
@ -230,12 +229,12 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex
|
|||
ARCHIVE_OUTPUT_DIRECTORY "${compiled_mex_modules_root}"
|
||||
RUNTIME_OUTPUT_DIRECTORY "${compiled_mex_modules_root}"
|
||||
CLEAN_DIRECT_OUTPUT 1)
|
||||
set_property(TARGET ${moduleName}_wrapper APPEND_STRING PROPERTY COMPILE_FLAGS " ${extraMexFlagsSpaced} ${mexFlagsSpaced} \"-I${MATLAB_ROOT}/extern/include\" -DMATLAB_MEX_FILE -DMX_COMPAT_32")
|
||||
set_property(TARGET ${moduleName}_wrapper APPEND PROPERTY INCLUDE_DIRECTORIES ${extraIncludeDirs})
|
||||
set_property(TARGET ${moduleName}_matlab_wrapper APPEND_STRING PROPERTY COMPILE_FLAGS " ${extraMexFlagsSpaced} ${mexFlagsSpaced} \"-I${MATLAB_ROOT}/extern/include\" -DMATLAB_MEX_FILE -DMX_COMPAT_32")
|
||||
set_property(TARGET ${moduleName}_matlab_wrapper APPEND PROPERTY INCLUDE_DIRECTORIES ${extraIncludeDirs})
|
||||
# Disable build type postfixes for the mex module - we install in different directories for each build type instead
|
||||
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
|
||||
string(TOUPPER "${build_type}" build_type_upper)
|
||||
set_target_properties(${moduleName}_wrapper PROPERTIES ${build_type_upper}_POSTFIX "")
|
||||
set_target_properties(${moduleName}_matlab_wrapper PROPERTIES ${build_type_upper}_POSTFIX "")
|
||||
endforeach()
|
||||
# Set up platform-specific flags
|
||||
if(MSVC)
|
||||
|
|
@ -244,17 +243,17 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex
|
|||
else()
|
||||
set(mxLibPath "${MATLAB_ROOT}/extern/lib/win32/microsoft")
|
||||
endif()
|
||||
target_link_libraries(${moduleName}_wrapper "${mxLibPath}/libmex.lib" "${mxLibPath}/libmx.lib" "${mxLibPath}/libmat.lib")
|
||||
set_target_properties(${moduleName}_wrapper PROPERTIES LINK_FLAGS "/export:mexFunction")
|
||||
target_link_libraries(${moduleName}_matlab_wrapper "${mxLibPath}/libmex.lib" "${mxLibPath}/libmx.lib" "${mxLibPath}/libmat.lib")
|
||||
set_target_properties(${moduleName}_matlab_wrapper PROPERTIES LINK_FLAGS "/export:mexFunction")
|
||||
set_property(SOURCE "${generated_cpp_file}" APPEND PROPERTY COMPILE_FLAGS "/bigobj")
|
||||
elseif(APPLE)
|
||||
set(mxLibPath "${MATLAB_ROOT}/bin/maci64")
|
||||
target_link_libraries(${moduleName}_wrapper "${mxLibPath}/libmex.dylib" "${mxLibPath}/libmx.dylib" "${mxLibPath}/libmat.dylib")
|
||||
target_link_libraries(${moduleName}_matlab_wrapper "${mxLibPath}/libmex.dylib" "${mxLibPath}/libmx.dylib" "${mxLibPath}/libmat.dylib")
|
||||
endif()
|
||||
|
||||
# Hacking around output issue with custom command
|
||||
# Deletes generated build folder
|
||||
add_custom_target(wrap_${moduleName}_distclean
|
||||
add_custom_target(wrap_${moduleName}_matlab_distclean
|
||||
COMMAND cmake -E remove_directory ${generated_files_path}
|
||||
COMMAND cmake -E remove_directory ${compiled_mex_modules_root})
|
||||
endfunction()
|
||||
|
|
@ -279,13 +278,13 @@ function(install_wrapped_library_internal interfaceHeader)
|
|||
get_filename_component(location "${GTSAM_TOOLBOX_INSTALL_PATH}" PATH)
|
||||
get_filename_component(name "${GTSAM_TOOLBOX_INSTALL_PATH}" NAME)
|
||||
install(DIRECTORY "${generated_files_path}/" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}" FILES_MATCHING PATTERN "*.m")
|
||||
install(TARGETS ${moduleName}_wrapper
|
||||
install(TARGETS ${moduleName}_matlab_wrapper
|
||||
LIBRARY DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}"
|
||||
RUNTIME DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}")
|
||||
endforeach()
|
||||
else()
|
||||
install(DIRECTORY "${generated_files_path}/" DESTINATION ${GTSAM_TOOLBOX_INSTALL_PATH} FILES_MATCHING PATTERN "*.m")
|
||||
install(TARGETS ${moduleName}_wrapper
|
||||
install(TARGETS ${moduleName}_matlab_wrapper
|
||||
LIBRARY DESTINATION ${GTSAM_TOOLBOX_INSTALL_PATH}
|
||||
RUNTIME DESTINATION ${GTSAM_TOOLBOX_INSTALL_PATH})
|
||||
endif()
|
||||
|
|
|
|||
|
|
@ -101,6 +101,9 @@ mark_as_advanced(GTSAM_SINGLE_TEST_EXE)
|
|||
# Enable make check (http://www.cmake.org/Wiki/CMakeEmulateMakeCheck)
|
||||
if(GTSAM_BUILD_TESTS)
|
||||
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} -C $<CONFIGURATION> --output-on-failure)
|
||||
|
||||
# Add target to build tests without running
|
||||
add_custom_target(all.tests)
|
||||
endif()
|
||||
|
||||
# Add examples target
|
||||
|
|
@ -109,8 +112,6 @@ add_custom_target(examples)
|
|||
# Add timing target
|
||||
add_custom_target(timing)
|
||||
|
||||
# Add target to build tests without running
|
||||
add_custom_target(all.tests)
|
||||
|
||||
# Implementations of this file's macros:
|
||||
|
||||
|
|
|
|||
|
|
@ -26,21 +26,30 @@
|
|||
// class __declspec(dllexport) MyClass { ... };
|
||||
// When included while compiling other code against GTSAM:
|
||||
// class __declspec(dllimport) MyClass { ... };
|
||||
|
||||
#pragma once
|
||||
|
||||
// Whether GTSAM is compiled as static or DLL in windows.
|
||||
// This will be used to decide whether include __declspec(dllimport) or not in headers
|
||||
#cmakedefine BUILD_SHARED_LIBS
|
||||
|
||||
#ifdef _WIN32
|
||||
# ifdef @library_name@_EXPORTS
|
||||
# define @library_name@_EXPORT __declspec(dllexport)
|
||||
# define @library_name@_EXTERN_EXPORT __declspec(dllexport) extern
|
||||
# ifndef BUILD_SHARED_LIBS
|
||||
# define @library_name@_EXPORT
|
||||
# define @library_name@_EXTERN_EXPORT extern
|
||||
# else
|
||||
# ifndef @library_name@_IMPORT_STATIC
|
||||
# ifdef @library_name@_EXPORTS
|
||||
# define @library_name@_EXPORT __declspec(dllexport)
|
||||
# define @library_name@_EXTERN_EXPORT __declspec(dllexport) extern
|
||||
# else
|
||||
# define @library_name@_EXPORT __declspec(dllimport)
|
||||
# define @library_name@_EXTERN_EXPORT __declspec(dllimport)
|
||||
# else /* @library_name@_IMPORT_STATIC */
|
||||
# define @library_name@_EXPORT
|
||||
# define @library_name@_EXTERN_EXPORT extern
|
||||
# endif /* @library_name@_IMPORT_STATIC */
|
||||
# endif /* @library_name@_EXPORTS */
|
||||
#else /* _WIN32 */
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
# define @library_name@_EXPORT
|
||||
# define @library_name@_EXTERN_EXPORT extern
|
||||
#endif
|
||||
|
||||
#undef BUILD_SHARED_LIBS
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
###################################################################################
|
||||
# To create your own project, replace "example" with the actual name of your project
|
||||
cmake_minimum_required(VERSION 2.6)
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
project(example CXX C)
|
||||
|
||||
# Include GTSAM CMake tools
|
||||
|
|
@ -22,14 +22,20 @@ include_directories(BEFORE "${PROJECT_SOURCE_DIR}")
|
|||
###################################################################################
|
||||
# Find GTSAM components
|
||||
find_package(GTSAM REQUIRED) # Uses installed package
|
||||
include_directories(${GTSAM_INCLUDE_DIR})
|
||||
# Note: Since Jan-2019, GTSAMConfig.cmake defines exported CMake targets
|
||||
# that automatically do include the include_directories() without the need
|
||||
# to call include_directories(), just target_link_libraries(NAME gtsam)
|
||||
#include_directories(${GTSAM_INCLUDE_DIR})
|
||||
|
||||
###################################################################################
|
||||
# Build static library from common sources
|
||||
set(CONVENIENCE_LIB_NAME ${PROJECT_NAME})
|
||||
add_library(${CONVENIENCE_LIB_NAME} STATIC example/PrintExamples.h example/PrintExamples.cpp)
|
||||
add_library(${CONVENIENCE_LIB_NAME} SHARED example/PrintExamples.h example/PrintExamples.cpp)
|
||||
target_link_libraries(${CONVENIENCE_LIB_NAME} gtsam)
|
||||
|
||||
# Install library
|
||||
install(TARGETS ${CONVENIENCE_LIB_NAME} LIBRARY DESTINATION lib ARCHIVE DESTINATION lib RUNTIME DESTINATION bin)
|
||||
|
||||
###################################################################################
|
||||
# Build tests (CMake tracks the dependecy to link with GTSAM through our project's static library)
|
||||
gtsamAddTestsGlob("example" "tests/test*.cpp" "" "${CONVENIENCE_LIB_NAME}")
|
||||
|
|
|
|||
|
|
@ -0,0 +1,32 @@
|
|||
# MATLAB Wrapper Example Project
|
||||
|
||||
This project serves as a lightweight example for demonstrating how to wrap C++ code in MATLAB using GTSAM.
|
||||
|
||||
## Compiling
|
||||
|
||||
We follow the regular build procedure inside the `example_project` directory:
|
||||
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
cmake ..
|
||||
make -j8
|
||||
sudo make install
|
||||
|
||||
sudo ldconfig # ensures the shared object file generated is correctly loaded
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Now you can open MATLAB and add the `gtsam_toolbox` to the MATLAB path
|
||||
|
||||
```matlab
|
||||
addpath('/usr/local/gtsam_toolbox')
|
||||
```
|
||||
|
||||
At this point you are ready to run the example project. Starting from the `example_project` directory inside MATLAB, simply run code like regular MATLAB, e.g.
|
||||
|
||||
```matlab
|
||||
pe = example.PrintExamples();
|
||||
pe.sayHello();
|
||||
pe.sayGoodbye();
|
||||
```
|
||||
|
|
@ -18,11 +18,14 @@
|
|||
// This is an interface file for automatic MATLAB wrapper generation. See
|
||||
// gtsam.h for full documentation and more examples.
|
||||
|
||||
#include <example/PrintExamples.h>
|
||||
|
||||
namespace example {
|
||||
|
||||
class PrintExamples {
|
||||
void sayHello() const;
|
||||
void sayGoodbye() const;
|
||||
PrintExamples();
|
||||
void sayHello() const;
|
||||
void sayGoodbye() const;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
namespace example {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,43 @@
|
|||
# Install cython components
|
||||
include(GtsamCythonWrap)
|
||||
|
||||
# Create the cython toolbox for the gtsam library
|
||||
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||
# build and include the eigency version of eigency
|
||||
add_subdirectory(gtsam_eigency)
|
||||
include_directories(${PROJECT_BINARY_DIR}/cython/gtsam_eigency)
|
||||
|
||||
# wrap gtsam
|
||||
add_custom_target(gtsam_header DEPENDS "../gtsam.h")
|
||||
wrap_and_install_library_cython("../gtsam.h" # interface_header
|
||||
"" # extra imports
|
||||
"${GTSAM_CYTHON_INSTALL_PATH}/gtsam" # install path
|
||||
gtsam # library to link with
|
||||
"wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
|
||||
)
|
||||
|
||||
# wrap gtsam_unstable
|
||||
if(GTSAM_BUILD_UNSTABLE)
|
||||
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
|
||||
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
|
||||
"from gtsam.gtsam cimport *" # extra imports
|
||||
"${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable" # install path
|
||||
gtsam_unstable # library to link with
|
||||
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
|
||||
)
|
||||
endif()
|
||||
|
||||
file(READ "${PROJECT_SOURCE_DIR}/cython/requirements.txt" CYTHON_INSTALL_REQUIREMENTS)
|
||||
file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
|
||||
|
||||
# Install the custom-generated __init__.py
|
||||
# This is to make the build/cython/gtsam folder a python package, so gtsam can be found while wrapping gtsam_unstable
|
||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py COPYONLY)
|
||||
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py COPYONLY)
|
||||
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${PROJECT_BINARY_DIR}/cython/setup.py)
|
||||
install_cython_files("${PROJECT_BINARY_DIR}/cython/setup.py" "${GTSAM_CYTHON_INSTALL_PATH}")
|
||||
# install scripts and tests
|
||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
|
||||
|
||||
endif ()
|
||||
|
|
@ -0,0 +1,161 @@
|
|||
This is the Cython/Python wrapper around the GTSAM C++ library.
|
||||
|
||||
INSTALL
|
||||
=======
|
||||
- if you want to build the gtsam python library for a specific python version (eg 2.7), use the `-DGTSAM_PYTHON_VERSION=2.7` option when running `cmake` otherwise the default interpreter will be used.
|
||||
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment) then the environment should be active while building gtsam.
|
||||
- This wrapper needs Cython(>=0.25.2), backports_abc>=0.5, and numpy. These can be installed as follows:
|
||||
|
||||
```bash
|
||||
pip install -r <gtsam_folder>/cython/requirements.txt
|
||||
```
|
||||
|
||||
- For compatibility with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
|
||||
named **gtsam_eigency**, to interface between C++'s Eigen and Python's numpy.
|
||||
|
||||
- Build and install gtsam using cmake with `GTSAM_INSTALL_CYTHON_TOOLBOX` enabled.
|
||||
The wrapped module will be installed to `GTSAM_CYTHON_INSTALL_PATH`, which is
|
||||
by default: `<your CMAKE_INSTALL_PREFIX>/cython`
|
||||
|
||||
- To use the library without installing system-wide: modify your `PYTHONPATH` to include the `GTSAM_CYTHON_INSTALL_PATH`:
|
||||
```bash
|
||||
export PYTHONPATH=$PYTHONPATH:<GTSAM_CYTHON_INSTALL_PATH>
|
||||
```
|
||||
- To install system-wide: run `make install` then navigate to `GTSAM_CYTHON_INSTALL_PATH` and run `python setup.py install`
|
||||
- (the same command can be used to install into a virtual environment if it is active)
|
||||
- note: if you don't want gtsam to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install gtsam to a subdirectory of the build directory.
|
||||
- if you run `setup.py` from the build directory rather than the installation directory, the script will warn you with the message: `setup.py is being run from an unexpected location`.
|
||||
Before `make install` is run, not all the components of the package have been copied across, so running `setup.py` from the build directory would result in an incomplete package.
|
||||
|
||||
UNIT TESTS
|
||||
==========
|
||||
The Cython toolbox also has a small set of unit tests located in the
|
||||
test directory. To run them:
|
||||
|
||||
```bash
|
||||
cd <your GTSAM_CYTHON_INSTALL_PATH>
|
||||
python -m unittest discover
|
||||
```
|
||||
|
||||
WRITING YOUR OWN SCRIPTS
|
||||
========================
|
||||
See the tests for examples.
|
||||
|
||||
## Some important notes:
|
||||
|
||||
- Vector/Matrix:
|
||||
+ GTSAM expects double-precision floating point vectors and matrices.
|
||||
Hence, you should pass numpy matrices with dtype=float, or 'float64'.
|
||||
+ Also, GTSAM expects *column-major* matrices, unlike the default storage
|
||||
scheme in numpy. Hence, you should pass column-major matrices to gtsam using
|
||||
the flag order='F'. And you always get column-major matrices back.
|
||||
For more details, see: https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed
|
||||
+ Passing row-major matrices of different dtype, e.g. 'int', will also work
|
||||
as the wrapper converts them to column-major and dtype float for you,
|
||||
using numpy.array.astype(float, order='F', copy=False).
|
||||
However, this will result a copy if your matrix is not in the expected type
|
||||
and storage order.
|
||||
|
||||
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>_ in Python.
|
||||
Examples: noiseModel_Gaussian, noiseModel_mEstimator_Tukey
|
||||
|
||||
- Casting from a base class to a derive class must be done explicitly.
|
||||
Examples:
|
||||
```Python
|
||||
noiseBase = factor.noiseModel()
|
||||
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
|
||||
```
|
||||
|
||||
WRAPPING YOUR OWN PROJECT THAT USES GTSAM
|
||||
=========================================
|
||||
- Set PYTHONPATH to include ${GTSAM_CYTHON_INSTALL_PATH}
|
||||
+ so that it can find gtsam Cython header: gtsam/gtsam.pxd
|
||||
|
||||
- In your CMakeList.txt
|
||||
```cmake
|
||||
find_package(GTSAM REQUIRED) # Make sure gtsam's install folder is in your PATH
|
||||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${GTSAM_DIR}/../GTSAMCMakeTools")
|
||||
|
||||
# Wrap
|
||||
include(GtsamCythonWrap)
|
||||
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
|
||||
wrap_and_install_library_cython("your_project_interface.h"
|
||||
"from gtsam.gtsam cimport *" # extra import of gtsam/gtsam.pxd Cython header
|
||||
"your_install_path"
|
||||
"libraries_to_link_with_the_cython_module"
|
||||
"dependencies_which_need_to_be_built_before_the_wrapper"
|
||||
)
|
||||
#Optional: install_cython_scripts and install_cython_files. See GtsamCythonWrap.cmake.
|
||||
```
|
||||
|
||||
KNOWN ISSUES
|
||||
============
|
||||
- Doesn't work with python3 installed from homebrew
|
||||
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
|
||||
- Guess: 64 vs 32b? disutils Compiler flags?
|
||||
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
|
||||
- Upgrading to 0.25 solves the problem
|
||||
- Need default constructor and default copy constructor for almost every classes... :(
|
||||
- support these constructors by default and declare "delete" for special classes?
|
||||
|
||||
|
||||
TODO
|
||||
=====
|
||||
☐ allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
|
||||
☐ a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in gtsam?)
|
||||
☐ inner namespaces ==> inner packages?
|
||||
☐ Wrap fixed-size Matrices/Vectors?
|
||||
|
||||
|
||||
Completed/Cancelled:
|
||||
=====
|
||||
✔ Fix Python tests: don't use " import <package> * ": Bad style!!! @done (18-03-17 19:50)
|
||||
✔ Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
|
||||
✔ Wrap unstable @done (18-03-17 15:30)
|
||||
✔ Unify cython/gtsam.h and the original gtsam.h @done (18-03-17 15:30)
|
||||
✔ 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
|
||||
✔ 06-03-17: manage to remove the requirements for default and copy constructors
|
||||
✘ 25-11-16:
|
||||
Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet.
|
||||
Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>.
|
||||
✘ Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: ```gtsam::JointMarginal __pyx_t_1;``` Users don't have to wrap this constructor, however.
|
||||
✔ Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
|
||||
✔ Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
|
||||
✔ CMake install script @done (25-11-16 02:30)
|
||||
✘ [REFACTOR] better name for uninstantiateClass: very vague!! @cancelled (25-11-16 02:30) -- lazy
|
||||
✘ forward declaration? @cancelled (23-11-16 13:00) - nothing to do, seem to work?
|
||||
✔ wrap VariableIndex: why is it in inference? If need to, shouldn't have constructors to specific FactorGraphs @done (23-11-16 13:00)
|
||||
✔ Global functions @done (22-11-16 21:00)
|
||||
✔ [REFACTOR] typesEqual --> isSameSignature @done (22-11-16 21:00)
|
||||
✔ Proper overloads (constructors, static methods, methods) @done (20-11-16 21:00)
|
||||
✔ Allow overloading methods. The current solution is annoying!!! @done (20-11-16 21:00)
|
||||
✔ Casting from parent and grandparents @done (16-11-16 17:00)
|
||||
✔ Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
|
||||
✔ Support "print obj" @done (16-11-16 17:00)
|
||||
✔ methods for FastVector: at, [], ... @done (16-11-16 17:00)
|
||||
✔ Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34)
|
||||
✔ KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
|
||||
✔ [Nice to have] parse typedef @done (16-09-13 17:19)
|
||||
✔ ctypedef at correct places @done (16-09-12 18:34)
|
||||
✔ expand template variable type in constructor/static methods? @done (16-09-12 18:34)
|
||||
✔ NonlinearOptimizer: copy constructor deleted!!! @done (16-09-13 17:20)
|
||||
✔ Value: no default constructor @done (16-09-13 17:20)
|
||||
✔ ctypedef PriorFactor[Vector] PriorFactorVector @done (16-09-19 12:25)
|
||||
✔ Delete duplicate methods in derived class @done (16-09-12 13:38)
|
||||
✔ Fix return properly @done (16-09-11 17:14)
|
||||
✔ handle pair @done (16-09-11 17:14)
|
||||
✔ Eigency: ambiguous call: A(const T&) A(const Vector& v) and Eigency A(Map[Vector]& v) @done (16-09-11 07:59)
|
||||
✔ Eigency: Constructor: ambiguous construct from Vector/Matrix @done (16-09-11 07:59)
|
||||
✔ Eigency: Fix method template of Vector/Matrix: template argument is [Vector] while arugment is Map[Vector] @done (16-09-11 08:22)
|
||||
✔ Robust noise: copy assignment operator is deleted because of shared_ptr of the abstract Base class @done (16-09-10 09:05)
|
||||
✘ Cython: Constructor: generate default constructor? (hack: if it's serializable?) @cancelled (16-09-13 17:20)
|
||||
✘ Eigency: Map[] to Block @created(16-09-10 07:59) @cancelled (16-09-11 08:28)
|
||||
|
||||
- inference before symbolic/linear
|
||||
- what's the purpose of "virtual" ??
|
||||
|
||||
Installation:
|
||||
☐ Prerequisite:
|
||||
- Users create venv and pip install requirements before compiling
|
||||
- Wrap cython script in gtsam/cython folder
|
||||
☐ Install built module into venv?
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
from .gtsam import *
|
||||
|
||||
try:
|
||||
import gtsam_unstable
|
||||
|
||||
|
||||
def _deprecated_wrapper(item, name):
|
||||
def wrapper(*args, **kwargs):
|
||||
from warnings import warn
|
||||
message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) +
|
||||
'Please import it from gtsam_unstable.')
|
||||
warn(message)
|
||||
return item(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
for name in dir(gtsam_unstable):
|
||||
if not name.startswith('__'):
|
||||
item = getattr(gtsam_unstable, name)
|
||||
if callable(item):
|
||||
item = _deprecated_wrapper(item, name)
|
||||
globals()[name] = item
|
||||
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
|
@ -0,0 +1,118 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Example comparing DoglegOptimizer with Levenberg-Marquardt.
|
||||
Author: Frank Dellaert
|
||||
"""
|
||||
# pylint: disable=no-member, invalid-name
|
||||
|
||||
import math
|
||||
import argparse
|
||||
|
||||
import gtsam
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def run(args):
|
||||
"""Test Dogleg vs LM, inspired by issue #452."""
|
||||
|
||||
# print parameters
|
||||
print("num samples = {}, deltaInitial = {}".format(
|
||||
args.num_samples, args.delta))
|
||||
|
||||
# Ground truth solution
|
||||
T11 = gtsam.Pose2(0, 0, 0)
|
||||
T12 = gtsam.Pose2(1, 0, 0)
|
||||
T21 = gtsam.Pose2(0, 1, 0)
|
||||
T22 = gtsam.Pose2(1, 1, 0)
|
||||
|
||||
# Factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Priors
|
||||
prior = gtsam.noiseModel_Isotropic.Sigma(3, 1)
|
||||
graph.add(gtsam.PriorFactorPose2(11, T11, prior))
|
||||
graph.add(gtsam.PriorFactorPose2(21, T21, prior))
|
||||
|
||||
# Odometry
|
||||
model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.01, 0.01, 0.3]))
|
||||
graph.add(gtsam.BetweenFactorPose2(11, 12, T11.between(T12), model))
|
||||
graph.add(gtsam.BetweenFactorPose2(21, 22, T21.between(T22), model))
|
||||
|
||||
# Range
|
||||
model_rho = gtsam.noiseModel_Isotropic.Sigma(1, 0.01)
|
||||
graph.add(gtsam.RangeFactorPose2(12, 22, 1.0, model_rho))
|
||||
|
||||
params = gtsam.DoglegParams()
|
||||
params.setDeltaInitial(args.delta) # default is 10
|
||||
|
||||
# Add progressively more noise to ground truth
|
||||
sigmas = [0.01, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20]
|
||||
n = len(sigmas)
|
||||
p_dl, s_dl, p_lm, s_lm = [0]*n, [0]*n, [0]*n, [0]*n
|
||||
for i, sigma in enumerate(sigmas):
|
||||
dl_fails, lm_fails = 0, 0
|
||||
# Attempt num_samples optimizations for both DL and LM
|
||||
for _attempt in range(args.num_samples):
|
||||
initial = gtsam.Values()
|
||||
initial.insert(11, T11.retract(np.random.normal(0, sigma, 3)))
|
||||
initial.insert(12, T12.retract(np.random.normal(0, sigma, 3)))
|
||||
initial.insert(21, T21.retract(np.random.normal(0, sigma, 3)))
|
||||
initial.insert(22, T22.retract(np.random.normal(0, sigma, 3)))
|
||||
|
||||
# Run dogleg optimizer
|
||||
dl = gtsam.DoglegOptimizer(graph, initial, params)
|
||||
result = dl.optimize()
|
||||
dl_fails += graph.error(result) > 1e-9
|
||||
|
||||
# Run
|
||||
lm = gtsam.LevenbergMarquardtOptimizer(graph, initial)
|
||||
result = lm.optimize()
|
||||
lm_fails += graph.error(result) > 1e-9
|
||||
|
||||
# Calculate Bayes estimate of success probability
|
||||
# using a beta prior of alpha=0.5, beta=0.5
|
||||
alpha, beta = 0.5, 0.5
|
||||
v = args.num_samples+alpha+beta
|
||||
p_dl[i] = (args.num_samples-dl_fails+alpha)/v
|
||||
p_lm[i] = (args.num_samples-lm_fails+alpha)/v
|
||||
|
||||
def stddev(p):
|
||||
"""Calculate standard deviation."""
|
||||
return math.sqrt(p*(1-p)/(1+v))
|
||||
|
||||
s_dl[i] = stddev(p_dl[i])
|
||||
s_lm[i] = stddev(p_lm[i])
|
||||
|
||||
fmt = "sigma= {}:\tDL success {:.2f}% +/- {:.2f}%, LM success {:.2f}% +/- {:.2f}%"
|
||||
print(fmt.format(sigma,
|
||||
100*p_dl[i], 100*s_dl[i],
|
||||
100*p_lm[i], 100*s_lm[i]))
|
||||
|
||||
if args.plot:
|
||||
fig, ax = plt.subplots()
|
||||
dl_plot = plt.errorbar(sigmas, p_dl, yerr=s_dl, label="Dogleg")
|
||||
lm_plot = plt.errorbar(sigmas, p_lm, yerr=s_lm, label="LM")
|
||||
plt.title("Dogleg emprical success vs. LM")
|
||||
plt.legend(handles=[dl_plot, lm_plot])
|
||||
ax.set_xlim(0, sigmas[-1]+1)
|
||||
ax.set_ylim(0, 1)
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Compare Dogleg and LM success rates")
|
||||
parser.add_argument("-n", "--num_samples", type=int, default=1000,
|
||||
help="Number of samples for each sigma")
|
||||
parser.add_argument("-d", "--delta", type=float, default=10.0,
|
||||
help="Initial delta for dogleg")
|
||||
parser.add_argument("-p", "--plot", action="store_true",
|
||||
help="Flag to plot results")
|
||||
args = parser.parse_args()
|
||||
run(args)
|
||||
|
|
@ -1,58 +1,75 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
A script validating the ImuFactor inference.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import math
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
import gtsam
|
||||
from gtsam_utils import plotPose3
|
||||
from PreintegrationExample import PreintegrationExample, POSES_FIG
|
||||
from gtsam.utils.plot import plot_pose3
|
||||
from PreintegrationExample import POSES_FIG, PreintegrationExample
|
||||
|
||||
BIAS_KEY = int(gtsam.symbol(ord('b'), 0))
|
||||
|
||||
|
||||
def X(key):
|
||||
"""Create symbol for pose key."""
|
||||
return gtsam.symbol(ord('x'), key)
|
||||
|
||||
|
||||
def V(key):
|
||||
"""Create symbol for velocity key."""
|
||||
return gtsam.symbol(ord('v'), key)
|
||||
|
||||
# shorthand symbols:
|
||||
BIAS_KEY = int(gtsam.Symbol('b', 0))
|
||||
V = lambda j: int(gtsam.Symbol('v', j))
|
||||
X = lambda i: int(gtsam.Symbol('x', i))
|
||||
|
||||
np.set_printoptions(precision=3, suppress=True)
|
||||
|
||||
|
||||
class ImuFactorExample(PreintegrationExample):
|
||||
|
||||
def __init__(self):
|
||||
self.velocity = np.array([2, 0, 0])
|
||||
self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1)
|
||||
self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
|
||||
self.priorNoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1)
|
||||
self.velNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
|
||||
|
||||
# Choose one of these twists to change scenario:
|
||||
# Choose one of these twists to change scenario:
|
||||
zero_twist = (np.zeros(3), np.zeros(3))
|
||||
forward_twist = (np.zeros(3), self.velocity)
|
||||
loop_twist = (np.array([0, -math.radians(30), 0]), self.velocity)
|
||||
sick_twist = (np.array([math.radians(30), -math.radians(30), 0]), self.velocity)
|
||||
sick_twist = (
|
||||
np.array([math.radians(30), -math.radians(30), 0]), self.velocity)
|
||||
|
||||
accBias = np.array([-0.3, 0.1, 0.2])
|
||||
gyroBias = np.array([0.1, 0.3, -0.1])
|
||||
bias = gtsam.ConstantBias(accBias, gyroBias)
|
||||
bias = gtsam.imuBias_ConstantBias(accBias, gyroBias)
|
||||
|
||||
dt = 1e-2
|
||||
super(ImuFactorExample, self).__init__(sick_twist, bias, dt)
|
||||
|
||||
def addPrior(self, i, graph):
|
||||
state = self.scenario.navState(i)
|
||||
graph.push_back(gtsam.PriorFactorPose3(X(i), state.pose(), self.priorNoise))
|
||||
graph.push_back(gtsam.PriorFactorVector3(V(i), state.velocity(), self.velNoise))
|
||||
|
||||
graph.push_back(gtsam.PriorFactorPose3(
|
||||
X(i), state.pose(), self.priorNoise))
|
||||
graph.push_back(gtsam.PriorFactorVector(
|
||||
V(i), state.velocity(), self.velNoise))
|
||||
|
||||
def run(self):
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# initialize data structure for pre-integrated IMU measurements
|
||||
# initialize data structure for pre-integrated IMU measurements
|
||||
pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)
|
||||
|
||||
H9 = gtsam.OptionalJacobian9();
|
||||
|
||||
|
||||
T = 12
|
||||
num_poses = T + 1 # assumes 1 factor per second
|
||||
initial = gtsam.Values()
|
||||
|
|
@ -61,32 +78,32 @@ class ImuFactorExample(PreintegrationExample):
|
|||
state_i = self.scenario.navState(float(i))
|
||||
initial.insert(X(i), state_i.pose())
|
||||
initial.insert(V(i), state_i.velocity())
|
||||
|
||||
|
||||
# simulate the loop
|
||||
i = 0 # state index
|
||||
i = 0 # state index
|
||||
actual_state_i = self.scenario.navState(0)
|
||||
for k, t in enumerate(np.arange(0, T, self.dt)):
|
||||
# get measurements and add them to PIM
|
||||
measuredOmega = self.runner.measuredAngularVelocity(t)
|
||||
measuredAcc = self.runner.measuredSpecificForce(t)
|
||||
pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)
|
||||
|
||||
|
||||
# Plot IMU many times
|
||||
if k % 10 == 0:
|
||||
self.plotImu(t, measuredOmega, measuredAcc)
|
||||
|
||||
|
||||
# Plot every second
|
||||
if k % int(1 / self.dt) == 0:
|
||||
self.plotGroundTruthPose(t)
|
||||
|
||||
|
||||
# create IMU factor every second
|
||||
if (k + 1) % int(1 / self.dt) == 0:
|
||||
factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1), BIAS_KEY, pim)
|
||||
factor = gtsam.ImuFactor(X(i), V(i), X(
|
||||
i + 1), V(i + 1), BIAS_KEY, pim)
|
||||
graph.push_back(factor)
|
||||
if True:
|
||||
print(factor)
|
||||
H2 = gtsam.OptionalJacobian96();
|
||||
print(pim.predict(actual_state_i, self.actualBias, H9, H2))
|
||||
print(pim.predict(actual_state_i, self.actualBias))
|
||||
pim.resetIntegration()
|
||||
actual_state_i = self.scenario.navState(t + self.dt)
|
||||
i += 1
|
||||
|
|
@ -94,30 +111,33 @@ class ImuFactorExample(PreintegrationExample):
|
|||
# add priors on beginning and end
|
||||
self.addPrior(0, graph)
|
||||
self.addPrior(num_poses - 1, graph)
|
||||
|
||||
|
||||
# optimize using Levenberg-Marquardt optimization
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
params.setVerbosityLM("SUMMARY")
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
|
||||
|
||||
# Calculate and print marginal covariances
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY))
|
||||
for i in range(num_poses):
|
||||
print("Covariance on pose {}:\n{}\n".format(i, marginals.marginalCovariance(X(i))))
|
||||
print("Covariance on vel {}:\n{}\n".format(i, marginals.marginalCovariance(V(i))))
|
||||
print("Covariance on pose {}:\n{}\n".format(
|
||||
i, marginals.marginalCovariance(X(i))))
|
||||
print("Covariance on vel {}:\n{}\n".format(
|
||||
i, marginals.marginalCovariance(V(i))))
|
||||
|
||||
# Plot resulting poses
|
||||
i = 0
|
||||
while result.exists(X(i)):
|
||||
pose_i = result.atPose3(X(i))
|
||||
plotPose3(POSES_FIG, pose_i, 0.1)
|
||||
plot_pose3(POSES_FIG, pose_i, 0.1)
|
||||
i += 1
|
||||
print(result.atConstantBias(BIAS_KEY))
|
||||
|
||||
print(result.atimuBias_ConstantBias(BIAS_KEY))
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
ImuFactorExample().run()
|
||||
|
|
@ -0,0 +1,176 @@
|
|||
"""
|
||||
iSAM2 example with ImuFactor.
|
||||
Author: Robert Truax (C++), Frank Dellaert (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import math
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
|
||||
def X(key):
|
||||
"""Create symbol for pose key."""
|
||||
return gtsam.symbol(ord('x'), key)
|
||||
|
||||
|
||||
def V(key):
|
||||
"""Create symbol for velocity key."""
|
||||
return gtsam.symbol(ord('v'), key)
|
||||
|
||||
|
||||
def vector3(x, y, z):
|
||||
"""Create 3d double numpy array."""
|
||||
return np.array([x, y, z], dtype=np.float)
|
||||
|
||||
|
||||
def ISAM2_plot(values, fignum=0):
|
||||
"""Plot poses."""
|
||||
fig = plt.figure(fignum)
|
||||
axes = fig.gca(projection='3d')
|
||||
plt.cla()
|
||||
|
||||
i = 0
|
||||
min_bounds = 0, 0, 0
|
||||
max_bounds = 0, 0, 0
|
||||
while values.exists(X(i)):
|
||||
pose_i = values.atPose3(X(i))
|
||||
gtsam_plot.plot_pose3(fignum, pose_i, 10)
|
||||
position = pose_i.translation().vector()
|
||||
min_bounds = [min(v1, v2) for (v1, v2) in zip(position, min_bounds)]
|
||||
max_bounds = [max(v1, v2) for (v1, v2) in zip(position, max_bounds)]
|
||||
# max_bounds = min(pose_i.x(), max_bounds[0]), 0, 0
|
||||
i += 1
|
||||
|
||||
# draw
|
||||
axes.set_xlim3d(min_bounds[0]-1, max_bounds[0]+1)
|
||||
axes.set_ylim3d(min_bounds[1]-1, max_bounds[1]+1)
|
||||
axes.set_zlim3d(min_bounds[2]-1, max_bounds[2]+1)
|
||||
plt.pause(1)
|
||||
|
||||
|
||||
# IMU preintegration parameters
|
||||
# Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis
|
||||
g = 9.81
|
||||
n_gravity = vector3(0, 0, -g)
|
||||
PARAMS = gtsam.PreintegrationParams.MakeSharedU(g)
|
||||
I = np.eye(3)
|
||||
PARAMS.setAccelerometerCovariance(I * 0.1)
|
||||
PARAMS.setGyroscopeCovariance(I * 0.1)
|
||||
PARAMS.setIntegrationCovariance(I * 0.1)
|
||||
PARAMS.setUse2ndOrderCoriolis(False)
|
||||
PARAMS.setOmegaCoriolis(vector3(0, 0, 0))
|
||||
|
||||
BIAS_COVARIANCE = gtsam.noiseModel_Isotropic.Variance(6, 0.1)
|
||||
DELTA = gtsam.Pose3(gtsam.Rot3.Rodrigues(0, 0, 0),
|
||||
gtsam.Point3(0.05, -0.10, 0.20))
|
||||
|
||||
|
||||
def IMU_example():
|
||||
"""Run iSAM 2 example with IMU factor."""
|
||||
|
||||
# Start with a camera on x-axis looking at origin
|
||||
radius = 30
|
||||
up = gtsam.Point3(0, 0, 1)
|
||||
target = gtsam.Point3(0, 0, 0)
|
||||
position = gtsam.Point3(radius, 0, 0)
|
||||
camera = gtsam.SimpleCamera.Lookat(position, target, up, gtsam.Cal3_S2())
|
||||
pose_0 = camera.pose()
|
||||
|
||||
# Create the set of ground-truth landmarks and poses
|
||||
angular_velocity = math.radians(180) # rad/sec
|
||||
delta_t = 1.0/18 # makes for 10 degrees per step
|
||||
|
||||
angular_velocity_vector = vector3(0, -angular_velocity, 0)
|
||||
linear_velocity_vector = vector3(radius * angular_velocity, 0, 0)
|
||||
scenario = gtsam.ConstantTwistScenario(
|
||||
angular_velocity_vector, linear_velocity_vector, pose_0)
|
||||
|
||||
# Create a factor graph
|
||||
newgraph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Create (incremental) ISAM2 solver
|
||||
isam = gtsam.ISAM2()
|
||||
|
||||
# Create the initial estimate to the solution
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
initialEstimate = gtsam.Values()
|
||||
|
||||
# Add a prior on pose x0. This indirectly specifies where the origin is.
|
||||
# 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
|
||||
noise = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
|
||||
newgraph.push_back(gtsam.PriorFactorPose3(X(0), pose_0, noise))
|
||||
|
||||
# Add imu priors
|
||||
biasKey = gtsam.symbol(ord('b'), 0)
|
||||
biasnoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1)
|
||||
biasprior = gtsam.PriorFactorConstantBias(biasKey, gtsam.imuBias_ConstantBias(),
|
||||
biasnoise)
|
||||
newgraph.push_back(biasprior)
|
||||
initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias())
|
||||
velnoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
|
||||
|
||||
# Calculate with correct initial velocity
|
||||
n_velocity = vector3(0, angular_velocity * radius, 0)
|
||||
velprior = gtsam.PriorFactorVector(V(0), n_velocity, velnoise)
|
||||
newgraph.push_back(velprior)
|
||||
initialEstimate.insert(V(0), n_velocity)
|
||||
|
||||
accum = gtsam.PreintegratedImuMeasurements(PARAMS)
|
||||
|
||||
# Simulate poses and imu measurements, adding them to the factor graph
|
||||
for i in range(80):
|
||||
t = i * delta_t # simulation time
|
||||
if i == 0: # First time add two poses
|
||||
pose_1 = scenario.pose(delta_t)
|
||||
initialEstimate.insert(X(0), pose_0.compose(DELTA))
|
||||
initialEstimate.insert(X(1), pose_1.compose(DELTA))
|
||||
elif i >= 2: # Add more poses as necessary
|
||||
pose_i = scenario.pose(t)
|
||||
initialEstimate.insert(X(i), pose_i.compose(DELTA))
|
||||
|
||||
if i > 0:
|
||||
# Add Bias variables periodically
|
||||
if i % 5 == 0:
|
||||
biasKey += 1
|
||||
factor = gtsam.BetweenFactorConstantBias(
|
||||
biasKey - 1, biasKey, gtsam.imuBias_ConstantBias(), BIAS_COVARIANCE)
|
||||
newgraph.add(factor)
|
||||
initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias())
|
||||
|
||||
# Predict acceleration and gyro measurements in (actual) body frame
|
||||
nRb = scenario.rotation(t).matrix()
|
||||
bRn = np.transpose(nRb)
|
||||
measuredAcc = scenario.acceleration_b(t) - np.dot(bRn, n_gravity)
|
||||
measuredOmega = scenario.omega_b(t)
|
||||
accum.integrateMeasurement(measuredAcc, measuredOmega, delta_t)
|
||||
|
||||
# Add Imu Factor
|
||||
imufac = gtsam.ImuFactor(
|
||||
X(i - 1), V(i - 1), X(i), V(i), biasKey, accum)
|
||||
newgraph.add(imufac)
|
||||
|
||||
# insert new velocity, which is wrong
|
||||
initialEstimate.insert(V(i), n_velocity)
|
||||
accum.resetIntegration()
|
||||
|
||||
# Incremental solution
|
||||
isam.update(newgraph, initialEstimate)
|
||||
result = isam.calculateEstimate()
|
||||
ISAM2_plot(result)
|
||||
|
||||
# reset
|
||||
newgraph = gtsam.NonlinearFactorGraph()
|
||||
initialEstimate.clear()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
IMU_example()
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Simple robot motion example, with prior and two odometry measurements
|
||||
Author: Frank Dellaert
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
# Create noise models
|
||||
ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
|
||||
# Create an empty nonlinear factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add a prior on the first pose, setting it to the origin
|
||||
# A prior factor consists of a mean and a noise model (covariance matrix)
|
||||
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin
|
||||
graph.add(gtsam.PriorFactorPose2(1, priorMean, PRIOR_NOISE))
|
||||
|
||||
# Add odometry factors
|
||||
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
|
||||
# For simplicity, we will use the same noise model for each odometry factor
|
||||
# Create odometry (Between) factors between consecutive poses
|
||||
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, ODOMETRY_NOISE))
|
||||
print("\nFactor Graph:\n{}".format(graph))
|
||||
|
||||
# Create the data structure to hold the initialEstimate estimate to the solution
|
||||
# For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initial = gtsam.Values()
|
||||
initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
|
||||
print("\nInitial Estimate:\n{}".format(initial))
|
||||
|
||||
# optimize using Levenberg-Marquardt optimization
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
print("\nFinal Result:\n{}".format(result))
|
||||
|
||||
# 5. Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for i in range(1, 4):
|
||||
print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i)))
|
||||
|
||||
fig = plt.figure(0)
|
||||
for i in range(1, 4):
|
||||
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i))
|
||||
plt.axis('equal')
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,334 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Kinematics of three-link manipulator with GTSAM poses and product of exponential maps.
|
||||
Author: Frank Dellaert
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import math
|
||||
import unittest
|
||||
from functools import reduce
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
from gtsam import Pose2
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
def vector3(x, y, z):
|
||||
"""Create 3D double numpy array."""
|
||||
return np.array([x, y, z], dtype=np.float)
|
||||
|
||||
|
||||
def compose(*poses):
|
||||
"""Compose all Pose2 transforms given as arguments from left to right."""
|
||||
return reduce((lambda x, y: x.compose(y)), poses)
|
||||
|
||||
|
||||
def vee(M):
|
||||
"""Pose2 vee operator."""
|
||||
return vector3(M[0, 2], M[1, 2], M[1, 0])
|
||||
|
||||
|
||||
def delta(g0, g1):
|
||||
"""Difference between x,y,,theta components of SE(2) poses."""
|
||||
return vector3(g1.x() - g0.x(), g1.y() - g0.y(), g1.theta() - g0.theta())
|
||||
|
||||
|
||||
def trajectory(g0, g1, N=20):
|
||||
""" Create an interpolated trajectory in SE(2), treating x,y, and theta separately.
|
||||
g0 and g1 are the initial and final pose, respectively.
|
||||
N is the number of *intervals*
|
||||
Returns N+1 poses
|
||||
"""
|
||||
e = delta(g0, g1)
|
||||
return [Pose2(g0.x()+e[0]*t, g0.y()+e[1]*t, g0.theta()+e[2]*t) for t in np.linspace(0, 1, N)]
|
||||
|
||||
|
||||
class ThreeLinkArm(object):
|
||||
"""Three-link arm class."""
|
||||
|
||||
def __init__(self):
|
||||
self.L1 = 3.5
|
||||
self.L2 = 3.5
|
||||
self.L3 = 2.5
|
||||
self.xi1 = vector3(0, 0, 1)
|
||||
self.xi2 = vector3(self.L1, 0, 1)
|
||||
self.xi3 = vector3(self.L1+self.L2, 0, 1)
|
||||
self.sXt0 = Pose2(0, self.L1+self.L2 + self.L3, math.radians(90))
|
||||
|
||||
def fk(self, q):
|
||||
""" Forward kinematics.
|
||||
Takes numpy array of joint angles, in radians.
|
||||
"""
|
||||
sXl1 = Pose2(0, 0, math.radians(90))
|
||||
l1Zl1 = Pose2(0, 0, q[0])
|
||||
l1Xl2 = Pose2(self.L1, 0, 0)
|
||||
l2Zl2 = Pose2(0, 0, q[1])
|
||||
l2Xl3 = Pose2(self.L2, 0, 0)
|
||||
l3Zl3 = Pose2(0, 0, q[2])
|
||||
l3Xt = Pose2(self.L3, 0, 0)
|
||||
return compose(sXl1, l1Zl1, l1Xl2, l2Zl2, l2Xl3, l3Zl3, l3Xt)
|
||||
|
||||
def jacobian(self, q):
|
||||
""" Calculate manipulator Jacobian.
|
||||
Takes numpy array of joint angles, in radians.
|
||||
"""
|
||||
a = q[0]+q[1]
|
||||
b = a+q[2]
|
||||
return np.array([[-self.L1*math.cos(q[0]) - self.L2*math.cos(a)-self.L3*math.cos(b),
|
||||
-self.L1*math.cos(a)-self.L3*math.cos(b),
|
||||
- self.L3*math.cos(b)],
|
||||
[-self.L1*math.sin(q[0]) - self.L2*math.sin(a)-self.L3*math.sin(b),
|
||||
-self.L1*math.sin(a)-self.L3*math.sin(b),
|
||||
- self.L3*math.sin(b)],
|
||||
[1, 1, 1]], np.float)
|
||||
|
||||
def poe(self, q):
|
||||
""" Forward kinematics.
|
||||
Takes numpy array of joint angles, in radians.
|
||||
"""
|
||||
l1Zl1 = Pose2.Expmap(self.xi1 * q[0])
|
||||
l2Zl2 = Pose2.Expmap(self.xi2 * q[1])
|
||||
l3Zl3 = Pose2.Expmap(self.xi3 * q[2])
|
||||
return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0)
|
||||
|
||||
def con(self, q):
|
||||
""" Forward kinematics, conjugation form.
|
||||
Takes numpy array of joint angles, in radians.
|
||||
"""
|
||||
def expmap(x, y, theta):
|
||||
"""Implement exponential map via conjugation with axis (x,y)."""
|
||||
return compose(Pose2(x, y, 0), Pose2(0, 0, theta), Pose2(-x, -y, 0))
|
||||
|
||||
l1Zl1 = expmap(0.0, 0.0, q[0])
|
||||
l2Zl2 = expmap(0.0, self.L1, q[1])
|
||||
l3Zl3 = expmap(0.0, self.L1+self.L2, q[2])
|
||||
return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0)
|
||||
|
||||
def ik(self, sTt_desired, e=1e-9):
|
||||
""" Inverse kinematics.
|
||||
Takes desired Pose2 of tool T with respect to base S.
|
||||
Optional: mu, gradient descent rate; e: error norm threshold
|
||||
"""
|
||||
q = np.radians(vector3(30, -30, 45)) # well within workspace
|
||||
error = vector3(100, 100, 100)
|
||||
|
||||
while np.linalg.norm(error) > e:
|
||||
error = delta(sTt_desired, self.fk(q))
|
||||
J = self.jacobian(q)
|
||||
q -= np.dot(np.linalg.pinv(J), error)
|
||||
|
||||
# return result in interval [-pi,pi)
|
||||
return np.remainder(q+math.pi, 2*math.pi)-math.pi
|
||||
|
||||
def manipulator_jacobian(self, q):
|
||||
""" Calculate manipulator Jacobian.
|
||||
Takes numpy array of joint angles, in radians.
|
||||
Returns the manipulator Jacobian of differential twists. When multiplied with
|
||||
a vector of joint velocities, will yield a single differential twist which is
|
||||
the spatial velocity d(sTt)/dt * inv(sTt) of the end-effector pose.
|
||||
Just like always, differential twists can be hatted and multiplied with spatial
|
||||
coordinates of a point to give the spatial velocity of the point.
|
||||
"""
|
||||
l1Zl1 = Pose2.Expmap(self.xi1 * q[0])
|
||||
l2Zl2 = Pose2.Expmap(self.xi2 * q[1])
|
||||
# l3Zl3 = Pose2.Expmap(self.xi3 * q[2])
|
||||
|
||||
p1 = self.xi1
|
||||
# p1 = Pose2().Adjoint(self.xi1)
|
||||
|
||||
sTl1 = l1Zl1
|
||||
p2 = sTl1.Adjoint(self.xi2)
|
||||
|
||||
sTl2 = compose(l1Zl1, l2Zl2)
|
||||
p3 = sTl2.Adjoint(self.xi3)
|
||||
|
||||
differential_twists = [p1, p2, p3]
|
||||
return np.stack(differential_twists, axis=1)
|
||||
|
||||
def plot(self, fignum, q):
|
||||
""" Plot arm.
|
||||
Takes figure number, and numpy array of joint angles, in radians.
|
||||
"""
|
||||
fig = plt.figure(fignum)
|
||||
axes = fig.gca()
|
||||
|
||||
sXl1 = Pose2(0, 0, math.radians(90))
|
||||
t = sXl1.translation()
|
||||
p1 = np.array([t.x(), t.y()])
|
||||
gtsam_plot.plot_pose2_on_axes(axes, sXl1)
|
||||
|
||||
def plot_line(p, g, color):
|
||||
t = g.translation()
|
||||
q = np.array([t.x(), t.y()])
|
||||
line = np.append(p[np.newaxis], q[np.newaxis], axis=0)
|
||||
axes.plot(line[:, 0], line[:, 1], color)
|
||||
return q
|
||||
|
||||
l1Zl1 = Pose2(0, 0, q[0])
|
||||
l1Xl2 = Pose2(self.L1, 0, 0)
|
||||
sTl2 = compose(sXl1, l1Zl1, l1Xl2)
|
||||
p2 = plot_line(p1, sTl2, 'r-')
|
||||
gtsam_plot.plot_pose2_on_axes(axes, sTl2)
|
||||
|
||||
l2Zl2 = Pose2(0, 0, q[1])
|
||||
l2Xl3 = Pose2(self.L2, 0, 0)
|
||||
sTl3 = compose(sTl2, l2Zl2, l2Xl3)
|
||||
p3 = plot_line(p2, sTl3, 'g-')
|
||||
gtsam_plot.plot_pose2_on_axes(axes, sTl3)
|
||||
|
||||
l3Zl3 = Pose2(0, 0, q[2])
|
||||
l3Xt = Pose2(self.L3, 0, 0)
|
||||
sTt = compose(sTl3, l3Zl3, l3Xt)
|
||||
plot_line(p3, sTt, 'b-')
|
||||
gtsam_plot.plot_pose2_on_axes(axes, sTt)
|
||||
|
||||
|
||||
# Create common example configurations.
|
||||
Q0 = vector3(0, 0, 0)
|
||||
Q1 = np.radians(vector3(-30, -45, -90))
|
||||
Q2 = np.radians(vector3(-90, 90, 0))
|
||||
|
||||
|
||||
class TestPose2SLAMExample(GtsamTestCase):
|
||||
"""Unit tests for functions used below."""
|
||||
|
||||
def setUp(self):
|
||||
self.arm = ThreeLinkArm()
|
||||
|
||||
def assertPose2Equals(self, actual, expected, tol=1e-2):
|
||||
"""Helper function that prints out actual and expected if not equal."""
|
||||
equal = actual.equals(expected, tol)
|
||||
if not equal:
|
||||
raise self.failureException(
|
||||
"Poses are not equal:\n{}!={}".format(actual, expected))
|
||||
|
||||
def test_fk_arm(self):
|
||||
"""Make sure forward kinematics is correct for some known test configurations."""
|
||||
# at rest
|
||||
expected = Pose2(0, 2*3.5 + 2.5, math.radians(90))
|
||||
sTt = self.arm.fk(Q0)
|
||||
self.assertIsInstance(sTt, Pose2)
|
||||
self.assertPose2Equals(sTt, expected)
|
||||
|
||||
# -30, -45, -90
|
||||
expected = Pose2(5.78, 1.52, math.radians(-75))
|
||||
sTt = self.arm.fk(Q1)
|
||||
self.assertPose2Equals(sTt, expected)
|
||||
|
||||
def test_jacobian(self):
|
||||
"""Test Jacobian calculation."""
|
||||
# at rest
|
||||
expected = np.array([[-9.5, -6, -2.5], [0, 0, 0], [1, 1, 1]], np.float)
|
||||
J = self.arm.jacobian(Q0)
|
||||
np.testing.assert_array_almost_equal(J, expected)
|
||||
|
||||
# at -90, 90, 0
|
||||
expected = np.array([[-6, -6, -2.5], [3.5, 0, 0], [1, 1, 1]], np.float)
|
||||
J = self.arm.jacobian(Q2)
|
||||
np.testing.assert_array_almost_equal(J, expected)
|
||||
|
||||
def test_con_arm(self):
|
||||
"""Make sure POE is correct for some known test configurations."""
|
||||
# at rest
|
||||
expected = Pose2(0, 2*3.5 + 2.5, math.radians(90))
|
||||
sTt = self.arm.con(Q0)
|
||||
self.assertIsInstance(sTt, Pose2)
|
||||
self.assertPose2Equals(sTt, expected)
|
||||
|
||||
# -30, -45, -90
|
||||
expected = Pose2(5.78, 1.52, math.radians(-75))
|
||||
sTt = self.arm.con(Q1)
|
||||
self.assertPose2Equals(sTt, expected)
|
||||
|
||||
def test_poe_arm(self):
|
||||
"""Make sure POE is correct for some known test configurations."""
|
||||
# at rest
|
||||
expected = Pose2(0, 2*3.5 + 2.5, math.radians(90))
|
||||
sTt = self.arm.poe(Q0)
|
||||
self.assertIsInstance(sTt, Pose2)
|
||||
self.assertPose2Equals(sTt, expected)
|
||||
|
||||
# -30, -45, -90
|
||||
expected = Pose2(5.78, 1.52, math.radians(-75))
|
||||
sTt = self.arm.poe(Q1)
|
||||
self.assertPose2Equals(sTt, expected)
|
||||
|
||||
def test_ik(self):
|
||||
"""Check iterative inverse kinematics function."""
|
||||
# at rest
|
||||
actual = self.arm.ik(Pose2(0, 2*3.5 + 2.5, math.radians(90)))
|
||||
np.testing.assert_array_almost_equal(actual, Q0, decimal=2)
|
||||
|
||||
# -30, -45, -90
|
||||
sTt_desired = Pose2(5.78, 1.52, math.radians(-75))
|
||||
actual = self.arm.ik(sTt_desired)
|
||||
self.assertPose2Equals(self.arm.fk(actual), sTt_desired)
|
||||
np.testing.assert_array_almost_equal(actual, Q1, decimal=2)
|
||||
|
||||
def test_manipulator_jacobian(self):
|
||||
"""Test Jacobian calculation."""
|
||||
# at rest
|
||||
expected = np.array([[0, 3.5, 7], [0, 0, 0], [1, 1, 1]], np.float)
|
||||
J = self.arm.manipulator_jacobian(Q0)
|
||||
np.testing.assert_array_almost_equal(J, expected)
|
||||
|
||||
# at -90, 90, 0
|
||||
expected = np.array(
|
||||
[[0, 0, 3.5], [0, -3.5, -3.5], [1, 1, 1]], np.float)
|
||||
J = self.arm.manipulator_jacobian(Q2)
|
||||
np.testing.assert_array_almost_equal(J, expected)
|
||||
|
||||
|
||||
def run_example():
|
||||
""" Use trajectory interpolation and then trajectory tracking a la Murray
|
||||
to move a 3-link arm on a straight line.
|
||||
"""
|
||||
# Create arm
|
||||
arm = ThreeLinkArm()
|
||||
|
||||
# Get initial pose using forward kinematics
|
||||
q = np.radians(vector3(30, -30, 45))
|
||||
sTt_initial = arm.fk(q)
|
||||
|
||||
# Create interpolated trajectory in task space to desired goal pose
|
||||
sTt_goal = Pose2(2.4, 4.3, math.radians(0))
|
||||
poses = trajectory(sTt_initial, sTt_goal, 50)
|
||||
|
||||
# Setup figure and plot initial pose
|
||||
fignum = 0
|
||||
fig = plt.figure(fignum)
|
||||
axes = fig.gca()
|
||||
axes.set_xlim(-5, 5)
|
||||
axes.set_ylim(0, 10)
|
||||
gtsam_plot.plot_pose2(fignum, arm.fk(q))
|
||||
|
||||
# For all poses in interpolated trajectory, calculate dq to move to next pose.
|
||||
# We do this by calculating the local Jacobian J and doing dq = inv(J)*delta(sTt, pose).
|
||||
for pose in poses:
|
||||
sTt = arm.fk(q)
|
||||
error = delta(sTt, pose)
|
||||
J = arm.jacobian(q)
|
||||
q += np.dot(np.linalg.inv(J), error)
|
||||
arm.plot(fignum, q)
|
||||
plt.pause(0.01)
|
||||
|
||||
plt.pause(10)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_example()
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Simple robotics example using odometry measurements and bearing-range (laser) measurements
|
||||
Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
# Create noise models
|
||||
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
MEASUREMENT_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.2]))
|
||||
|
||||
# Create an empty nonlinear factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Create the keys corresponding to unknown variables in the factor graph
|
||||
X1 = gtsam.symbol(ord('x'), 1)
|
||||
X2 = gtsam.symbol(ord('x'), 2)
|
||||
X3 = gtsam.symbol(ord('x'), 3)
|
||||
L1 = gtsam.symbol(ord('l'), 4)
|
||||
L2 = gtsam.symbol(ord('l'), 5)
|
||||
|
||||
# Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model
|
||||
graph.add(gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE))
|
||||
|
||||
# Add odometry factors between X1,X2 and X2,X3, respectively
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
X1, X2, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
X2, X3, gtsam.Pose2(2.0, 0.0, 0.0), ODOMETRY_NOISE))
|
||||
|
||||
# Add Range-Bearing measurements to two different landmarks L1 and L2
|
||||
graph.add(gtsam.BearingRangeFactor2D(
|
||||
X1, L1, gtsam.Rot2.fromDegrees(45), np.sqrt(4.0+4.0), MEASUREMENT_NOISE))
|
||||
graph.add(gtsam.BearingRangeFactor2D(
|
||||
X2, L1, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE))
|
||||
graph.add(gtsam.BearingRangeFactor2D(
|
||||
X3, L2, gtsam.Rot2.fromDegrees(90), 2.0, MEASUREMENT_NOISE))
|
||||
|
||||
# Print graph
|
||||
print("Factor Graph:\n{}".format(graph))
|
||||
|
||||
# Create (deliberately inaccurate) initial estimate
|
||||
initial_estimate = gtsam.Values()
|
||||
initial_estimate.insert(X1, gtsam.Pose2(-0.25, 0.20, 0.15))
|
||||
initial_estimate.insert(X2, gtsam.Pose2(2.30, 0.10, -0.20))
|
||||
initial_estimate.insert(X3, gtsam.Pose2(4.10, 0.10, 0.10))
|
||||
initial_estimate.insert(L1, gtsam.Point2(1.80, 2.10))
|
||||
initial_estimate.insert(L2, gtsam.Point2(4.10, 1.80))
|
||||
|
||||
# Print
|
||||
print("Initial Estimate:\n{}".format(initial_estimate))
|
||||
|
||||
# Optimize using Levenberg-Marquardt optimization. The optimizer
|
||||
# accepts an optional set of configuration parameters, controlling
|
||||
# things like convergence criteria, the type of linear system solver
|
||||
# to use, and the amount of information displayed during optimization.
|
||||
# Here we will use the default set of parameters. See the
|
||||
# documentation for the full set of parameters.
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial_estimate, params)
|
||||
result = optimizer.optimize()
|
||||
print("\nFinal Result:\n{}".format(result))
|
||||
|
||||
# Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for (key, str) in [(X1, "X1"), (X2, "X2"), (X3, "X3"), (L1, "L1"), (L2, "L2")]:
|
||||
print("{} covariance:\n{}\n".format(str, marginals.marginalCovariance(key)))
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Simple robotics example using odometry measurements and bearing-range (laser) measurements
|
||||
Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
|
||||
def vector3(x, y, z):
|
||||
"""Create 3d double numpy array."""
|
||||
return np.array([x, y, z], dtype=np.float)
|
||||
|
||||
|
||||
# Create noise models
|
||||
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1))
|
||||
ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1))
|
||||
|
||||
# 1. Create a factor graph container and add factors to it
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# 2a. Add a prior on the first pose, setting it to the origin
|
||||
# A prior factor consists of a mean and a noise ODOMETRY_NOISE (covariance matrix)
|
||||
graph.add(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
|
||||
|
||||
# 2b. Add odometry factors
|
||||
# Create odometry (Between) factors between consecutive poses
|
||||
graph.add(gtsam.BetweenFactorPose2(1, 2, gtsam.Pose2(2, 0, 0), ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
2, 3, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
3, 4, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
4, 5, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE))
|
||||
|
||||
# 2c. Add the loop closure constraint
|
||||
# This factor encodes the fact that we have returned to the same pose. In real
|
||||
# systems, these constraints may be identified in many ways, such as appearance-based
|
||||
# techniques with camera images. We will use another Between Factor to enforce this constraint:
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
5, 2, gtsam.Pose2(2, 0, math.pi / 2), ODOMETRY_NOISE))
|
||||
print("\nFactor Graph:\n{}".format(graph)) # print
|
||||
|
||||
# 3. Create the data structure to hold the initial_estimate estimate to the
|
||||
# solution. For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initial_estimate = gtsam.Values()
|
||||
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initial_estimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initial_estimate.insert(3, gtsam.Pose2(4.1, 0.1, math.pi / 2))
|
||||
initial_estimate.insert(4, gtsam.Pose2(4.0, 2.0, math.pi))
|
||||
initial_estimate.insert(5, gtsam.Pose2(2.1, 2.1, -math.pi / 2))
|
||||
print("\nInitial Estimate:\n{}".format(initial_estimate)) # print
|
||||
|
||||
# 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
|
||||
# The optimizer accepts an optional set of configuration parameters,
|
||||
# controlling things like convergence criteria, the type of linear
|
||||
# system solver to use, and the amount of information displayed during
|
||||
# optimization. We will set a few parameters as a demonstration.
|
||||
parameters = gtsam.GaussNewtonParams()
|
||||
|
||||
# Stop iterating once the change in error between steps is less than this value
|
||||
parameters.setRelativeErrorTol(1e-5)
|
||||
# Do not perform more than N iteration steps
|
||||
parameters.setMaxIterations(100)
|
||||
# Create the optimizer ...
|
||||
optimizer = gtsam.GaussNewtonOptimizer(graph, initial_estimate, parameters)
|
||||
# ... and optimize
|
||||
result = optimizer.optimize()
|
||||
print("Final Result:\n{}".format(result))
|
||||
|
||||
# 5. Calculate and print marginal covariances for all variables
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
for i in range(1, 6):
|
||||
print("X{} covariance:\n{}\n".format(i, marginals.marginalCovariance(i)))
|
||||
|
||||
fig = plt.figure(0)
|
||||
for i in range(1, 6):
|
||||
gtsam_plot.plot_pose2(0, result.atPose2(i), 0.5, marginals.marginalCovariance(i))
|
||||
|
||||
plt.axis('equal')
|
||||
plt.show()
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
A 2D Pose SLAM example that reads input from g2o, converts it to a factor graph
|
||||
and does the optimization. Output is written on a file, in g2o format
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
import argparse
|
||||
import math
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils import plot
|
||||
|
||||
|
||||
def vector3(x, y, z):
|
||||
"""Create 3d double numpy array."""
|
||||
return np.array([x, y, z], dtype=np.float)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A 2D Pose SLAM example that reads input from g2o, "
|
||||
"converts it to a factor graph and does the optimization. "
|
||||
"Output is written on a file, in g2o format")
|
||||
parser.add_argument('-i', '--input', help='input file g2o format')
|
||||
parser.add_argument('-o', '--output',
|
||||
help="the path to the output file with optimized graph")
|
||||
parser.add_argument('-m', '--maxiter', type=int,
|
||||
help="maximum number of iterations for optimizer")
|
||||
parser.add_argument('-k', '--kernel', choices=['none', 'huber', 'tukey'],
|
||||
default="none", help="Type of kernel used")
|
||||
parser.add_argument("-p", "--plot", action="store_true",
|
||||
help="Flag to plot results")
|
||||
args = parser.parse_args()
|
||||
|
||||
g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") if args.input is None\
|
||||
else args.input
|
||||
|
||||
maxIterations = 100 if args.maxiter is None else args.maxiter
|
||||
|
||||
is3D = False
|
||||
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
|
||||
assert args.kernel == "none", "Supplied kernel type is not yet implemented"
|
||||
|
||||
# Add prior on the pose having index (key) = 0
|
||||
graphWithPrior = graph
|
||||
priorModel = gtsam.noiseModel_Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8))
|
||||
graphWithPrior.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))
|
||||
|
||||
params = gtsam.GaussNewtonParams()
|
||||
params.setVerbosity("Termination")
|
||||
params.setMaxIterations(maxIterations)
|
||||
# parameters.setRelativeErrorTol(1e-5)
|
||||
# Create the optimizer ...
|
||||
optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params)
|
||||
# ... and optimize
|
||||
result = optimizer.optimize()
|
||||
|
||||
print("Optimization complete")
|
||||
print("initial error = ", graph.error(initial))
|
||||
print("final error = ", graph.error(result))
|
||||
|
||||
|
||||
if args.output is None:
|
||||
print("\nFactor Graph:\n{}".format(graph))
|
||||
print("\nInitial Estimate:\n{}".format(initial))
|
||||
print("Final Result:\n{}".format(result))
|
||||
else:
|
||||
outputFile = args.output
|
||||
print("Writing results to file: ", outputFile)
|
||||
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
|
||||
gtsam.writeG2o(graphNoKernel, result, outputFile)
|
||||
print ("Done!")
|
||||
|
||||
if args.plot:
|
||||
resultPoses = gtsam.extractPose2(result)
|
||||
for i in range(resultPoses.shape[0]):
|
||||
plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :]))
|
||||
plt.show()
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
"""
|
||||
* @file Pose3SLAMExample_initializePose3.cpp
|
||||
* @brief A 3D Pose SLAM example that reads input from g2o, and initializes the
|
||||
* Pose3 using InitializePose3
|
||||
* @date Jan 17, 2019
|
||||
* @author Vikrant Shah based on CPP example by Luca Carlone
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
import argparse
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils import plot
|
||||
|
||||
|
||||
def vector6(x, y, z, a, b, c):
|
||||
"""Create 6d double numpy array."""
|
||||
return np.array([x, y, z, a, b, c], dtype=np.float)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A 3D Pose SLAM example that reads input from g2o, and "
|
||||
"initializes Pose3")
|
||||
parser.add_argument('-i', '--input', help='input file g2o format')
|
||||
parser.add_argument('-o', '--output',
|
||||
help="the path to the output file with optimized graph")
|
||||
parser.add_argument("-p", "--plot", action="store_true",
|
||||
help="Flag to plot results")
|
||||
args = parser.parse_args()
|
||||
|
||||
g2oFile = gtsam.findExampleDataFile("pose3example.txt") if args.input is None \
|
||||
else args.input
|
||||
|
||||
is3D = True
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
|
||||
# Add Prior on the first key
|
||||
priorModel = gtsam.noiseModel_Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6,
|
||||
1e-4, 1e-4, 1e-4))
|
||||
|
||||
print("Adding prior to g2o file ")
|
||||
graphWithPrior = graph
|
||||
firstKey = initial.keys().at(0)
|
||||
graphWithPrior.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel))
|
||||
|
||||
params = gtsam.GaussNewtonParams()
|
||||
params.setVerbosity("Termination") # this will show info about stopping conds
|
||||
optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params)
|
||||
result = optimizer.optimize()
|
||||
print("Optimization complete")
|
||||
|
||||
print("initial error = ", graphWithPrior.error(initial))
|
||||
print("final error = ", graphWithPrior.error(result))
|
||||
|
||||
if args.output is None:
|
||||
print("Final Result:\n{}".format(result))
|
||||
else:
|
||||
outputFile = args.output
|
||||
print("Writing results to file: ", outputFile)
|
||||
graphNoKernel, _ = gtsam.readG2o(g2oFile, is3D)
|
||||
gtsam.writeG2o(graphNoKernel, result, outputFile)
|
||||
print ("Done!")
|
||||
|
||||
if args.plot:
|
||||
resultPoses = gtsam.allPose3s(result)
|
||||
for i in range(resultPoses.size()):
|
||||
plot.plot_pose3(1, resultPoses.atPose3(i))
|
||||
plt.show()
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Initialize PoseSLAM with Chordal init
|
||||
Author: Luca Carlone, Frank Dellaert (python port)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
# Read graph from file
|
||||
g2oFile = gtsam.findExampleDataFile("pose3example.txt")
|
||||
|
||||
is3D = True
|
||||
graph, initial = gtsam.readG2o(g2oFile, is3D)
|
||||
|
||||
# Add prior on the first key. TODO: assumes first key ios z
|
||||
priorModel = gtsam.noiseModel_Diagonal.Variances(
|
||||
np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4]))
|
||||
firstKey = initial.keys().at(0)
|
||||
graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel))
|
||||
|
||||
# Initializing Pose3 - chordal relaxation"
|
||||
initialization = gtsam.InitializePose3.initialize(graph)
|
||||
|
||||
print(initialization)
|
||||
|
|
@ -1,19 +1,26 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
A script validating the Preintegration of IMU measurements
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
import gtsam
|
||||
from gtsam_utils import plotPose3
|
||||
from gtsam.utils.plot import plot_pose3
|
||||
|
||||
IMU_FIG = 1
|
||||
POSES_FIG = 2
|
||||
|
||||
|
||||
class PreintegrationExample(object):
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -22,18 +29,21 @@ class PreintegrationExample(object):
|
|||
params = gtsam.PreintegrationParams.MakeSharedU(g)
|
||||
kGyroSigma = math.radians(0.5) / 60 # 0.5 degree ARW
|
||||
kAccelSigma = 0.1 / 60 # 10 cm VRW
|
||||
params.gyroscopeCovariance = kGyroSigma ** 2 * np.identity(3, np.float)
|
||||
params.accelerometerCovariance = kAccelSigma ** 2 * np.identity(3, np.float)
|
||||
params.integrationCovariance = 0.0000001 ** 2 * np.identity(3, np.float)
|
||||
params.setGyroscopeCovariance(
|
||||
kGyroSigma ** 2 * np.identity(3, np.float))
|
||||
params.setAccelerometerCovariance(
|
||||
kAccelSigma ** 2 * np.identity(3, np.float))
|
||||
params.setIntegrationCovariance(
|
||||
0.0000001 ** 2 * np.identity(3, np.float))
|
||||
return params
|
||||
|
||||
def __init__(self, twist=None, bias=None, dt=1e-2):
|
||||
"""Initialize with given twist, a pair(angularVelocityVector, velocityVector)."""
|
||||
|
||||
|
||||
# setup interactive plotting
|
||||
plt.ion()
|
||||
|
||||
# Setup loop as default scenario
|
||||
# Setup loop as default scenario
|
||||
if twist is not None:
|
||||
(W, V) = twist
|
||||
else:
|
||||
|
|
@ -52,21 +62,21 @@ class PreintegrationExample(object):
|
|||
# Create runner
|
||||
self.g = 10 # simple gravity constant
|
||||
self.params = self.defaultParams(self.g)
|
||||
ptr = gtsam.ScenarioPointer(self.scenario)
|
||||
|
||||
if bias is not None:
|
||||
self.actualBias = bias
|
||||
else:
|
||||
accBias = np.array([0, 0.1, 0])
|
||||
gyroBias = np.array([0, 0, 0])
|
||||
self.actualBias = gtsam.ConstantBias(accBias, gyroBias)
|
||||
self.actualBias = gtsam.imuBias_ConstantBias(accBias, gyroBias)
|
||||
|
||||
self.runner = gtsam.ScenarioRunner(ptr, self.params, self.dt, self.actualBias)
|
||||
self.runner = gtsam.ScenarioRunner(
|
||||
self.scenario, self.params, self.dt, self.actualBias)
|
||||
|
||||
def plotImu(self, t, measuredOmega, measuredAcc):
|
||||
plt.figure(IMU_FIG)
|
||||
|
||||
# plot angular velocity
|
||||
# plot angular velocity
|
||||
omega_b = self.scenario.omega_b(t)
|
||||
for i, (label, color) in enumerate(zip(self.labels, self.colors)):
|
||||
plt.subplot(4, 3, i + 1)
|
||||
|
|
@ -99,9 +109,9 @@ class PreintegrationExample(object):
|
|||
def plotGroundTruthPose(self, t):
|
||||
# plot ground truth pose, as well as prediction from integrated IMU measurements
|
||||
actualPose = self.scenario.pose(t)
|
||||
plotPose3(POSES_FIG, actualPose, 0.3)
|
||||
plot_pose3(POSES_FIG, actualPose, 0.3)
|
||||
t = actualPose.translation()
|
||||
self.maxDim = max([abs(t[0]), abs(t[1]), abs(t[2]), self.maxDim])
|
||||
self.maxDim = max([abs(t.x()), abs(t.y()), abs(t.z()), self.maxDim])
|
||||
ax = plt.gca()
|
||||
ax.set_xlim3d(-self.maxDim, self.maxDim)
|
||||
ax.set_ylim3d(-self.maxDim, self.maxDim)
|
||||
|
|
@ -120,10 +130,11 @@ class PreintegrationExample(object):
|
|||
self.plotGroundTruthPose(t)
|
||||
pim = self.runner.integrate(t, self.actualBias, True)
|
||||
predictedNavState = self.runner.predict(pim, self.actualBias)
|
||||
plotPose3(POSES_FIG, predictedNavState.pose(), 0.1)
|
||||
plot_pose3(POSES_FIG, predictedNavState.pose(), 0.1)
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
PreintegrationExample().run()
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
These examples are almost identical to the old handwritten python wrapper
|
||||
examples. However, there are just some slight name changes, for example
|
||||
`noiseModel.Diagonal` becomes `noiseModel_Diagonal` etc...
|
||||
Also, annoyingly, instead of `gtsam.Symbol('b', 0)` we now need to say `gtsam.symbol(ord('b'), 0))`
|
||||
|
||||
# Porting Progress
|
||||
|
||||
| C++ Example Name | Ported |
|
||||
|-------------------------------------------------------|--------|
|
||||
| CameraResectioning | |
|
||||
| CreateSFMExampleData | |
|
||||
| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through cython |
|
||||
| easyPoint2KalmanFilter | ExtendedKalmanFilter not exposed through cython |
|
||||
| elaboratePoint2KalmanFilter | GaussianSequentialSolver not exposed through cython |
|
||||
| ImuFactorExample2 | X |
|
||||
| ImuFactorsExample | |
|
||||
| ISAM2Example_SmartFactor | |
|
||||
| ISAM2_SmartFactorStereo_IMU | |
|
||||
| LocalizationExample | impossible? |
|
||||
| METISOrderingExample | |
|
||||
| OdometryExample | X |
|
||||
| PlanarSLAMExample | X |
|
||||
| Pose2SLAMExample | X |
|
||||
| Pose2SLAMExampleExpressions | |
|
||||
| Pose2SLAMExample_g2o | X |
|
||||
| Pose2SLAMExample_graph | |
|
||||
| Pose2SLAMExample_graphviz | |
|
||||
| Pose2SLAMExample_lago | lago not exposed through cython |
|
||||
| Pose2SLAMStressTest | |
|
||||
| Pose2SLAMwSPCG | |
|
||||
| Pose3SLAMExample_changeKeys | |
|
||||
| Pose3SLAMExampleExpressions_BearingRangeWithTransform | |
|
||||
| Pose3SLAMExample_g2o | X |
|
||||
| Pose3SLAMExample_initializePose3Chordal | |
|
||||
| Pose3SLAMExample_initializePose3Gradient | |
|
||||
| RangeISAMExample_plaza2 | |
|
||||
| SelfCalibrationExample | |
|
||||
| SFMExample_bal_COLAMD_METIS | |
|
||||
| SFMExample_bal | |
|
||||
| SFMExample | X |
|
||||
| SFMExampleExpressions_bal | |
|
||||
| SFMExampleExpressions | |
|
||||
| SFMExample_SmartFactor | |
|
||||
| SFMExample_SmartFactorPCG | |
|
||||
| SimpleRotation | X |
|
||||
| SolverComparer | |
|
||||
| StereoVOExample | |
|
||||
| StereoVOExample_large | |
|
||||
| TimeTBB | |
|
||||
| UGM_chain | discrete functionality not exposed |
|
||||
| UGM_small | discrete functionality not exposed |
|
||||
| VisualISAM2Example | X |
|
||||
| VisualISAMExample | X |
|
||||
|
||||
Extra Examples (with no C++ equivalent)
|
||||
- PlanarManipulatorExample
|
||||
- SFMData
|
||||
|
|
@ -0,0 +1,118 @@
|
|||
"""
|
||||
GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
A structure-from-motion problem on a simulated dataset
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam.examples import SFMdata
|
||||
from gtsam.gtsam import (Cal3_S2, DoglegOptimizer,
|
||||
GenericProjectionFactorCal3_S2, NonlinearFactorGraph,
|
||||
Point3, Pose3, PriorFactorPoint3, PriorFactorPose3,
|
||||
Rot3, SimpleCamera, Values)
|
||||
|
||||
|
||||
def symbol(name: str, index: int) -> int:
|
||||
""" helper for creating a symbol without explicitly casting 'name' from str to int """
|
||||
return gtsam.symbol(ord(name), index)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).
|
||||
|
||||
Each variable in the system (poses and landmarks) must be identified with a unique key.
|
||||
We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1).
|
||||
Here we will use Symbols
|
||||
|
||||
In GTSAM, measurement functions are represented as 'factors'. Several common factors
|
||||
have been provided with the library for solving robotics/SLAM/Bundle Adjustment problems.
|
||||
Here we will use Projection factors to model the camera's landmark observations.
|
||||
Also, we will initialize the robot at some location using a Prior factor.
|
||||
|
||||
When the factors are created, we will add them to a Factor Graph. As the factors we are using
|
||||
are nonlinear factors, we will need a Nonlinear Factor Graph.
|
||||
|
||||
Finally, once all of the factors have been added to our factor graph, we will want to
|
||||
solve/optimize to graph to find the best (Maximum A Posteriori) set of variable values.
|
||||
GTSAM includes several nonlinear optimizers to perform this step. Here we will use a
|
||||
trust-region method known as Powell's Degleg
|
||||
|
||||
The nonlinear solvers within GTSAM are iterative solvers, meaning they linearize the
|
||||
nonlinear functions around an initial linearization point, then solve the linear system
|
||||
to update the linearization point. This happens repeatedly until the solver converges
|
||||
to a consistent set of variable values. This requires us to specify an initial guess
|
||||
for each variable, held in a Values container.
|
||||
"""
|
||||
|
||||
# Define the camera calibration parameters
|
||||
K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)
|
||||
|
||||
# Define the camera observation noise model
|
||||
measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v
|
||||
|
||||
# Create the set of ground-truth landmarks
|
||||
points = SFMdata.createPoints()
|
||||
|
||||
# Create the set of ground-truth poses
|
||||
poses = SFMdata.createPoses(K)
|
||||
|
||||
# Create a factor graph
|
||||
graph = NonlinearFactorGraph()
|
||||
|
||||
# Add a prior on pose x1. This indirectly specifies where the origin is.
|
||||
# 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z
|
||||
pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
|
||||
factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Simulated measurements from each camera pose, adding them to the factor graph
|
||||
for i, pose in enumerate(poses):
|
||||
camera = SimpleCamera(pose, K)
|
||||
for j, point in enumerate(points):
|
||||
measurement = camera.project(point)
|
||||
factor = GenericProjectionFactorCal3_S2(
|
||||
measurement, measurement_noise, symbol('x', i), symbol('l', j), K)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained
|
||||
# Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance
|
||||
# between the first camera and the first landmark. All other landmark positions are interpreted using this scale.
|
||||
point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
|
||||
factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise)
|
||||
graph.push_back(factor)
|
||||
graph.print_('Factor Graph:\n')
|
||||
|
||||
# Create the data structure to hold the initial estimate to the solution
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
initial_estimate = Values()
|
||||
for i, pose in enumerate(poses):
|
||||
r = Rot3.Rodrigues(-0.1, 0.2, 0.25)
|
||||
t = Point3(0.05, -0.10, 0.20)
|
||||
transformed_pose = pose.compose(Pose3(r, t))
|
||||
initial_estimate.insert(symbol('x', i), transformed_pose)
|
||||
for j, point in enumerate(points):
|
||||
transformed_point = Point3(point.vector() + np.array([-0.25, 0.20, 0.15]))
|
||||
initial_estimate.insert(symbol('l', j), transformed_point)
|
||||
initial_estimate.print_('Initial Estimates:\n')
|
||||
|
||||
# Optimize the graph and print results
|
||||
params = gtsam.DoglegParams()
|
||||
params.setVerbosity('TERMINATION')
|
||||
optimizer = DoglegOptimizer(graph, initial_estimate, params)
|
||||
print('Optimizing:')
|
||||
result = optimizer.optimize()
|
||||
result.print_('Final results:\n')
|
||||
print('initial error = {}'.format(graph.error(initial_estimate)))
|
||||
print('final error = {}'.format(graph.error(result)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
"""
|
||||
A structure-from-motion example with landmarks
|
||||
- The landmarks form a 10 meter cube
|
||||
- The robot rotates around the landmarks, always facing towards the cube
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
|
||||
def createPoints():
|
||||
# Create the set of ground-truth landmarks
|
||||
points = [gtsam.Point3(10.0, 10.0, 10.0),
|
||||
gtsam.Point3(-10.0, 10.0, 10.0),
|
||||
gtsam.Point3(-10.0, -10.0, 10.0),
|
||||
gtsam.Point3(10.0, -10.0, 10.0),
|
||||
gtsam.Point3(10.0, 10.0, -10.0),
|
||||
gtsam.Point3(-10.0, 10.0, -10.0),
|
||||
gtsam.Point3(-10.0, -10.0, -10.0),
|
||||
gtsam.Point3(10.0, -10.0, -10.0)]
|
||||
return points
|
||||
|
||||
|
||||
def createPoses(K):
|
||||
# Create the set of ground-truth poses
|
||||
radius = 30.0
|
||||
angles = np.linspace(0, 2*np.pi, 8, endpoint=False)
|
||||
up = gtsam.Point3(0, 0, 1)
|
||||
target = gtsam.Point3(0, 0, 0)
|
||||
poses = []
|
||||
for theta in angles:
|
||||
position = gtsam.Point3(radius*np.cos(theta),
|
||||
radius*np.sin(theta), 0.0)
|
||||
camera = gtsam.PinholeCameraCal3_S2.Lookat(position, target, up, K)
|
||||
poses.append(camera.pose())
|
||||
return poses
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
"""
|
||||
GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
This example will perform a relatively trivial optimization on
|
||||
a single variable with a single factor.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import gtsam
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Step 1: Create a factor to express a unary constraint
|
||||
|
||||
The "prior" in this case is the measurement from a sensor,
|
||||
with a model of the noise on the measurement.
|
||||
|
||||
The "Key" created here is a label used to associate parts of the
|
||||
state (stored in "RotValues") with particular factors. They require
|
||||
an index to allow for lookup, and should be unique.
|
||||
|
||||
In general, creating a factor requires:
|
||||
- A key or set of keys labeling the variables that are acted upon
|
||||
- A measurement value
|
||||
- A measurement model with the correct dimensionality for the factor
|
||||
"""
|
||||
prior = gtsam.Rot2.fromAngle(np.deg2rad(30))
|
||||
prior.print_('goal angle')
|
||||
model = gtsam.noiseModel_Isotropic.Sigma(dim=1, sigma=np.deg2rad(1))
|
||||
key = gtsam.symbol(ord('x'), 1)
|
||||
factor = gtsam.PriorFactorRot2(key, prior, model)
|
||||
|
||||
"""
|
||||
Step 2: Create a graph container and add the factor to it
|
||||
|
||||
Before optimizing, all factors need to be added to a Graph container,
|
||||
which provides the necessary top-level functionality for defining a
|
||||
system of constraints.
|
||||
|
||||
In this case, there is only one factor, but in a practical scenario,
|
||||
many more factors would be added.
|
||||
"""
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
graph.push_back(factor)
|
||||
graph.print_('full graph')
|
||||
|
||||
"""
|
||||
Step 3: Create an initial estimate
|
||||
|
||||
An initial estimate of the solution for the system is necessary to
|
||||
start optimization. This system state is the "Values" instance,
|
||||
which is similar in structure to a dictionary, in that it maps
|
||||
keys (the label created in step 1) to specific values.
|
||||
|
||||
The initial estimate provided to optimization will be used as
|
||||
a linearization point for optimization, so it is important that
|
||||
all of the variables in the graph have a corresponding value in
|
||||
this structure.
|
||||
"""
|
||||
initial = gtsam.Values()
|
||||
initial.insert(key, gtsam.Rot2.fromAngle(np.deg2rad(20)))
|
||||
initial.print_('initial estimate')
|
||||
|
||||
"""
|
||||
Step 4: Optimize
|
||||
|
||||
After formulating the problem with a graph of constraints
|
||||
and an initial estimate, executing optimization is as simple
|
||||
as calling a general optimization function with the graph and
|
||||
initial estimate. This will yield a new RotValues structure
|
||||
with the final state of the optimization.
|
||||
"""
|
||||
result = gtsam.LevenbergMarquardtOptimizer(graph, initial).optimize()
|
||||
result.print_('final result')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -0,0 +1,163 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
An example of running visual SLAM using iSAM2.
|
||||
Author: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
from gtsam.examples import SFMdata
|
||||
|
||||
|
||||
def X(i):
|
||||
"""Create key for pose i."""
|
||||
return int(gtsam.symbol(ord('x'), i))
|
||||
|
||||
|
||||
def L(j):
|
||||
"""Create key for landmark j."""
|
||||
return int(gtsam.symbol(ord('l'), j))
|
||||
|
||||
|
||||
def visual_ISAM2_plot(result):
|
||||
"""
|
||||
VisualISAMPlot plots current state of ISAM2 object
|
||||
Author: Ellon Paiva
|
||||
Based on MATLAB version by: Duy Nguyen Ta and Frank Dellaert
|
||||
"""
|
||||
|
||||
# Declare an id for the figure
|
||||
fignum = 0
|
||||
|
||||
fig = plt.figure(fignum)
|
||||
axes = fig.gca(projection='3d')
|
||||
plt.cla()
|
||||
|
||||
# Plot points
|
||||
# Can't use data because current frame might not see all points
|
||||
# marginals = Marginals(isam.getFactorsUnsafe(), isam.calculateEstimate())
|
||||
# gtsam.plot_3d_points(result, [], marginals)
|
||||
gtsam_plot.plot_3d_points(fignum, result, 'rx')
|
||||
|
||||
# Plot cameras
|
||||
i = 0
|
||||
while result.exists(X(i)):
|
||||
pose_i = result.atPose3(X(i))
|
||||
gtsam_plot.plot_pose3(fignum, pose_i, 10)
|
||||
i += 1
|
||||
|
||||
# draw
|
||||
axes.set_xlim3d(-40, 40)
|
||||
axes.set_ylim3d(-40, 40)
|
||||
axes.set_zlim3d(-40, 40)
|
||||
plt.pause(1)
|
||||
|
||||
|
||||
def visual_ISAM2_example():
|
||||
plt.ion()
|
||||
|
||||
# Define the camera calibration parameters
|
||||
K = gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)
|
||||
|
||||
# Define the camera observation noise model
|
||||
measurement_noise = gtsam.noiseModel_Isotropic.Sigma(
|
||||
2, 1.0) # one pixel in u and v
|
||||
|
||||
# Create the set of ground-truth landmarks
|
||||
points = SFMdata.createPoints()
|
||||
|
||||
# Create the set of ground-truth poses
|
||||
poses = SFMdata.createPoses(K)
|
||||
|
||||
# Create an iSAM2 object. Unlike iSAM1, which performs periodic batch steps
|
||||
# to maintain proper linearization and efficient variable ordering, iSAM2
|
||||
# performs partial relinearization/reordering at each step. A parameter
|
||||
# structure is available that allows the user to set various properties, such
|
||||
# as the relinearization threshold and type of linear solver. For this
|
||||
# example, we we set the relinearization threshold small so the iSAM2 result
|
||||
# will approach the batch result.
|
||||
parameters = gtsam.ISAM2Params()
|
||||
parameters.setRelinearizeThreshold(0.01)
|
||||
parameters.setRelinearizeSkip(1)
|
||||
isam = gtsam.ISAM2(parameters)
|
||||
|
||||
# Create a Factor Graph and Values to hold the new data
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
initial_estimate = gtsam.Values()
|
||||
|
||||
# Loop over the different poses, adding the observations to iSAM incrementally
|
||||
for i, pose in enumerate(poses):
|
||||
|
||||
# Add factors for each landmark observation
|
||||
for j, point in enumerate(points):
|
||||
camera = gtsam.PinholeCameraCal3_S2(pose, K)
|
||||
measurement = camera.project(point)
|
||||
graph.push_back(gtsam.GenericProjectionFactorCal3_S2(
|
||||
measurement, measurement_noise, X(i), L(j), K))
|
||||
|
||||
# Add an initial guess for the current pose
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
initial_estimate.insert(X(i), pose.compose(gtsam.Pose3(
|
||||
gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25), gtsam.Point3(0.05, -0.10, 0.20))))
|
||||
|
||||
# If this is the first iteration, add a prior on the first pose to set the
|
||||
# coordinate frame and a prior on the first landmark to set the scale.
|
||||
# Also, as iSAM solves incrementally, we must wait until each is observed
|
||||
# at least twice before adding it to iSAM.
|
||||
if i == 0:
|
||||
# Add a prior on pose x0
|
||||
pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array(
|
||||
[0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
|
||||
graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], pose_noise))
|
||||
|
||||
# Add a prior on landmark l0
|
||||
point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
|
||||
graph.push_back(gtsam.PriorFactorPoint3(
|
||||
L(0), points[0], point_noise)) # add directly to graph
|
||||
|
||||
# Add initial guesses to all observed landmarks
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
for j, point in enumerate(points):
|
||||
initial_estimate.insert(L(j), gtsam.Point3(
|
||||
point.x()-0.25, point.y()+0.20, point.z()+0.15))
|
||||
else:
|
||||
# Update iSAM with the new factors
|
||||
isam.update(graph, initial_estimate)
|
||||
# Each call to iSAM2 update(*) performs one iteration of the iterative nonlinear solver.
|
||||
# If accuracy is desired at the expense of time, update(*) can be called additional
|
||||
# times to perform multiple optimizer iterations every step.
|
||||
isam.update()
|
||||
current_estimate = isam.calculateEstimate()
|
||||
print("****************************************************")
|
||||
print("Frame", i, ":")
|
||||
for j in range(i + 1):
|
||||
print(X(j), ":", current_estimate.atPose3(X(j)))
|
||||
|
||||
for j in range(len(points)):
|
||||
print(L(j), ":", current_estimate.atPoint3(L(j)))
|
||||
|
||||
visual_ISAM2_plot(current_estimate)
|
||||
|
||||
# Clear the factor graph and values for the next iteration
|
||||
graph.resize(0)
|
||||
initial_estimate.clear()
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
visual_ISAM2_example()
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
"""
|
||||
GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
A visualSLAM example for the structure-from-motion problem on a simulated dataset
|
||||
This version uses iSAM to solve the problem incrementally
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import gtsam
|
||||
from gtsam.examples import SFMdata
|
||||
from gtsam.gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2,
|
||||
NonlinearFactorGraph, NonlinearISAM, Point3, Pose3,
|
||||
PriorFactorPoint3, PriorFactorPose3, Rot3,
|
||||
SimpleCamera, Values)
|
||||
|
||||
|
||||
def symbol(name: str, index: int) -> int:
|
||||
""" helper for creating a symbol without explicitly casting 'name' from str to int """
|
||||
return gtsam.symbol(ord(name), index)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
A structure-from-motion example with landmarks
|
||||
- The landmarks form a 10 meter cube
|
||||
- The robot rotates around the landmarks, always facing towards the cube
|
||||
"""
|
||||
|
||||
# Define the camera calibration parameters
|
||||
K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)
|
||||
|
||||
# Define the camera observation noise model
|
||||
camera_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v
|
||||
|
||||
# Create the set of ground-truth landmarks
|
||||
points = SFMdata.createPoints()
|
||||
# Create the set of ground-truth poses
|
||||
poses = SFMdata.createPoses(K)
|
||||
|
||||
# Create a NonlinearISAM object which will relinearize and reorder the variables
|
||||
# every "reorderInterval" updates
|
||||
isam = NonlinearISAM(reorderInterval=3)
|
||||
|
||||
# Create a Factor Graph and Values to hold the new data
|
||||
graph = NonlinearFactorGraph()
|
||||
initial_estimate = Values()
|
||||
|
||||
# Loop over the different poses, adding the observations to iSAM incrementally
|
||||
for i, pose in enumerate(poses):
|
||||
camera = SimpleCamera(pose, K)
|
||||
# Add factors for each landmark observation
|
||||
for j, point in enumerate(points):
|
||||
measurement = camera.project(point)
|
||||
factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, symbol('x', i), symbol('l', j), K)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), t=Point3(0.05, -0.10, 0.20))
|
||||
initial_xi = pose.compose(noise)
|
||||
|
||||
# Add an initial guess for the current pose
|
||||
initial_estimate.insert(symbol('x', i), initial_xi)
|
||||
|
||||
# If this is the first iteration, add a prior on the first pose to set the coordinate frame
|
||||
# and a prior on the first landmark to set the scale
|
||||
# Also, as iSAM solves incrementally, we must wait until each is observed at least twice before
|
||||
# adding it to iSAM.
|
||||
if i == 0:
|
||||
# Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z
|
||||
pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
|
||||
factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Add a prior on landmark l0
|
||||
point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
|
||||
factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise)
|
||||
graph.push_back(factor)
|
||||
|
||||
# Add initial guesses to all observed landmarks
|
||||
noise = np.array([-0.25, 0.20, 0.15])
|
||||
for j, point in enumerate(points):
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
initial_lj = points[j].vector() + noise
|
||||
initial_estimate.insert(symbol('l', j), Point3(initial_lj))
|
||||
else:
|
||||
# Update iSAM with the new factors
|
||||
isam.update(graph, initial_estimate)
|
||||
current_estimate = isam.estimate()
|
||||
print('*' * 50)
|
||||
print('Frame {}:'.format(i))
|
||||
current_estimate.print_('Current estimate: ')
|
||||
|
||||
# Clear the factor graph and values for the next iteration
|
||||
graph.resize(0)
|
||||
initial_estimate.clear()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
ScenarioRunner unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import math
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestScenarioRunner(GtsamTestCase):
|
||||
def setUp(self):
|
||||
self.g = 10 # simple gravity constant
|
||||
|
||||
def test_loop_runner(self):
|
||||
# Forward velocity 2m/s
|
||||
# Pitch up with angular velocity 6 degree/sec (negative in FLU)
|
||||
v = 2
|
||||
w = math.radians(6)
|
||||
W = np.array([0, -w, 0])
|
||||
V = np.array([v, 0, 0])
|
||||
scenario = gtsam.ConstantTwistScenario(W, V)
|
||||
|
||||
dt = 0.1
|
||||
params = gtsam.PreintegrationParams.MakeSharedU(self.g)
|
||||
bias = gtsam.imuBias_ConstantBias()
|
||||
runner = gtsam.ScenarioRunner(
|
||||
scenario, params, dt, bias)
|
||||
|
||||
# Test specific force at time 0: a is pointing up
|
||||
t = 0.0
|
||||
a = w * v
|
||||
np.testing.assert_almost_equal(
|
||||
np.array([0, 0, a + self.g]), runner.actualSpecificForce(t))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Cal3Unified unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestCal3Unified(GtsamTestCase):
|
||||
|
||||
def test_Cal3Unified(self):
|
||||
K = gtsam.Cal3Unified()
|
||||
self.assertEqual(K.fx(), 1.)
|
||||
self.assertEqual(K.fx(), 1.)
|
||||
|
||||
def test_retract(self):
|
||||
expected = gtsam.Cal3Unified(100 + 2, 105 + 3, 0.0 + 4, 320 + 5, 240 + 6,
|
||||
1e-3 + 7, 2.0*1e-3 + 8, 3.0*1e-3 + 9, 4.0*1e-3 + 10, 0.1 + 1)
|
||||
K = gtsam.Cal3Unified(100, 105, 0.0, 320, 240,
|
||||
1e-3, 2.0*1e-3, 3.0*1e-3, 4.0*1e-3, 0.1)
|
||||
d = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10, 1], order='F')
|
||||
actual = K.retract(d)
|
||||
self.gtsamAssertEquals(actual, expected)
|
||||
np.testing.assert_allclose(d, K.localCoordinates(actual))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
JacobianFactor unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestJacobianFactor(GtsamTestCase):
|
||||
|
||||
def test_eliminate(self):
|
||||
# Recommended way to specify a matrix (see cython/README)
|
||||
Ax2 = np.array(
|
||||
[[-5., 0.],
|
||||
[+0., -5.],
|
||||
[10., 0.],
|
||||
[+0., 10.]], order='F')
|
||||
|
||||
# This is good too
|
||||
Al1 = np.array(
|
||||
[[5, 0],
|
||||
[0, 5],
|
||||
[0, 0],
|
||||
[0, 0]], dtype=float, order = 'F')
|
||||
|
||||
# Not recommended for performance reasons, but should still work
|
||||
# as the wrapper should convert it to the correct type and storage order
|
||||
Ax1 = np.array(
|
||||
[[0, 0], # f4
|
||||
[0, 0], # f4
|
||||
[-10, 0], # f2
|
||||
[0, -10]]) # f2
|
||||
|
||||
x2 = 1
|
||||
l1 = 2
|
||||
x1 = 3
|
||||
|
||||
# the RHS
|
||||
b2 = np.array([-1., 1.5, 2., -1.])
|
||||
sigmas = np.array([1., 1., 1., 1.])
|
||||
model4 = gtsam.noiseModel_Diagonal.Sigmas(sigmas)
|
||||
combined = gtsam.JacobianFactor(x2, Ax2, l1, Al1, x1, Ax1, b2, model4)
|
||||
|
||||
# eliminate the first variable (x2) in the combined factor, destructive
|
||||
# !
|
||||
ord = gtsam.Ordering()
|
||||
ord.push_back(x2)
|
||||
actualCG, lf = combined.eliminate(ord)
|
||||
|
||||
# create expected Conditional Gaussian
|
||||
R11 = np.array([[11.1803, 0.00],
|
||||
[0.00, 11.1803]])
|
||||
S12 = np.array([[-2.23607, 0.00],
|
||||
[+0.00, -2.23607]])
|
||||
S13 = np.array([[-8.94427, 0.00],
|
||||
[+0.00, -8.94427]])
|
||||
d = np.array([2.23607, -1.56525])
|
||||
expectedCG = gtsam.GaussianConditional(
|
||||
x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel_Unit.Create(2))
|
||||
# check if the result matches
|
||||
self.gtsamAssertEquals(actualCG, expectedCG, 1e-4)
|
||||
|
||||
# the expected linear factor
|
||||
Bl1 = np.array([[4.47214, 0.00],
|
||||
[0.00, 4.47214]])
|
||||
|
||||
Bx1 = np.array(
|
||||
# x1
|
||||
[[-4.47214, 0.00],
|
||||
[+0.00, -4.47214]])
|
||||
|
||||
# the RHS
|
||||
b1 = np.array([0.0, 0.894427])
|
||||
|
||||
model2 = gtsam.noiseModel_Diagonal.Sigmas(np.array([1., 1.]))
|
||||
expectedLF = gtsam.JacobianFactor(l1, Bl1, x1, Bx1, b1, model2)
|
||||
|
||||
# check if the result matches the combined (reduced) factor
|
||||
self.gtsamAssertEquals(lf, expectedLF, 1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
KalmanFilter unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestKalmanFilter(GtsamTestCase):
|
||||
|
||||
def test_KalmanFilter(self):
|
||||
F = np.eye(2)
|
||||
B = np.eye(2)
|
||||
u = np.array([1.0, 0.0])
|
||||
modelQ = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1]))
|
||||
Q = 0.01 * np.eye(2)
|
||||
H = np.eye(2)
|
||||
z1 = np.array([1.0, 0.0])
|
||||
z2 = np.array([2.0, 0.0])
|
||||
z3 = np.array([3.0, 0.0])
|
||||
modelR = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1]))
|
||||
R = 0.01 * np.eye(2)
|
||||
|
||||
# Create the set of expected output TestValues
|
||||
expected0 = np.array([0.0, 0.0])
|
||||
P00 = 0.01 * np.eye(2)
|
||||
|
||||
expected1 = np.array([1.0, 0.0])
|
||||
P01 = P00 + Q
|
||||
I11 = np.linalg.inv(P01) + np.linalg.inv(R)
|
||||
|
||||
expected2 = np.array([2.0, 0.0])
|
||||
P12 = np.linalg.inv(I11) + Q
|
||||
I22 = np.linalg.inv(P12) + np.linalg.inv(R)
|
||||
|
||||
expected3 = np.array([3.0, 0.0])
|
||||
P23 = np.linalg.inv(I22) + Q
|
||||
I33 = np.linalg.inv(P23) + np.linalg.inv(R)
|
||||
|
||||
# Create an KalmanFilter object
|
||||
KF = gtsam.KalmanFilter(n=2)
|
||||
|
||||
# Create the Kalman Filter initialization point
|
||||
x_initial = np.array([0.0, 0.0])
|
||||
P_initial = 0.01 * np.eye(2)
|
||||
|
||||
# Create an KF object
|
||||
state = KF.init(x_initial, P_initial)
|
||||
self.assertTrue(np.allclose(expected0, state.mean()))
|
||||
self.assertTrue(np.allclose(P00, state.covariance()))
|
||||
|
||||
# Run iteration 1
|
||||
state = KF.predict(state, F, B, u, modelQ)
|
||||
self.assertTrue(np.allclose(expected1, state.mean()))
|
||||
self.assertTrue(np.allclose(P01, state.covariance()))
|
||||
state = KF.update(state, H, z1, modelR)
|
||||
self.assertTrue(np.allclose(expected1, state.mean()))
|
||||
self.assertTrue(np.allclose(I11, state.information()))
|
||||
|
||||
# Run iteration 2
|
||||
state = KF.predict(state, F, B, u, modelQ)
|
||||
self.assertTrue(np.allclose(expected2, state.mean()))
|
||||
state = KF.update(state, H, z2, modelR)
|
||||
self.assertTrue(np.allclose(expected2, state.mean()))
|
||||
|
||||
# Run iteration 3
|
||||
state = KF.predict(state, F, B, u, modelQ)
|
||||
self.assertTrue(np.allclose(expected3, state.mean()))
|
||||
state = KF.update(state, H, z3, modelR)
|
||||
self.assertTrue(np.allclose(expected3, state.mean()))
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Localization unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestLocalizationExample(GtsamTestCase):
|
||||
|
||||
def test_LocalizationExample(self):
|
||||
# Create the graph (defined in pose2SLAM.h, derived from
|
||||
# NonlinearFactorGraph)
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add two odometry factors
|
||||
# create a measurement for both factors (the same in this case)
|
||||
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
|
||||
odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta
|
||||
graph.add(gtsam.BetweenFactorPose2(0, 1, odometry, odometryNoise))
|
||||
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
|
||||
|
||||
# Add three "GPS" measurements
|
||||
# We use Pose2 Priors here with high variance on theta
|
||||
groundTruth = gtsam.Values()
|
||||
groundTruth.insert(0, gtsam.Pose2(0.0, 0.0, 0.0))
|
||||
groundTruth.insert(1, gtsam.Pose2(2.0, 0.0, 0.0))
|
||||
groundTruth.insert(2, gtsam.Pose2(4.0, 0.0, 0.0))
|
||||
model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1, 10.]))
|
||||
for i in range(3):
|
||||
graph.add(gtsam.PriorFactorPose2(i, groundTruth.atPose2(i), model))
|
||||
|
||||
# Initialize to noisy points
|
||||
initialEstimate = gtsam.Values()
|
||||
initialEstimate.insert(0, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initialEstimate.insert(1, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initialEstimate.insert(2, gtsam.Pose2(4.1, 0.1, 0.1))
|
||||
|
||||
# Optimize using Levenberg-Marquardt optimization with an ordering from
|
||||
# colamd
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
|
||||
result = optimizer.optimizeSafely()
|
||||
|
||||
# Plot Covariance Ellipses
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
P = [None] * result.size()
|
||||
for i in range(0, result.size()):
|
||||
pose_i = result.atPose2(i)
|
||||
self.gtsamAssertEquals(pose_i, groundTruth.atPose2(i), 1e-4)
|
||||
P[i] = marginals.marginalCovariance(i)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Unit tests for IMU testing scenarios.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, no-name-in-module
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer,
|
||||
GaussNewtonParams, LevenbergMarquardtOptimizer,
|
||||
LevenbergMarquardtParams, NonlinearFactorGraph, Ordering,
|
||||
Point2, PriorFactorPoint2, Values)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
KEY1 = 1
|
||||
KEY2 = 2
|
||||
|
||||
|
||||
class TestScenario(GtsamTestCase):
|
||||
def test_optimize(self):
|
||||
"""Do trivial test with three optimizer variants."""
|
||||
fg = NonlinearFactorGraph()
|
||||
model = gtsam.noiseModel_Unit.Create(2)
|
||||
fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model))
|
||||
|
||||
# test error at minimum
|
||||
xstar = Point2(0, 0)
|
||||
optimal_values = Values()
|
||||
optimal_values.insert(KEY1, xstar)
|
||||
self.assertEqual(0.0, fg.error(optimal_values), 0.0)
|
||||
|
||||
# test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
|
||||
x0 = Point2(3, 3)
|
||||
initial_values = Values()
|
||||
initial_values.insert(KEY1, x0)
|
||||
self.assertEqual(9.0, fg.error(initial_values), 1e-3)
|
||||
|
||||
# optimize parameters
|
||||
ordering = Ordering()
|
||||
ordering.push_back(KEY1)
|
||||
|
||||
# Gauss-Newton
|
||||
gnParams = GaussNewtonParams()
|
||||
gnParams.setOrdering(ordering)
|
||||
actual1 = GaussNewtonOptimizer(fg, initial_values, gnParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual1))
|
||||
|
||||
# Levenberg-Marquardt
|
||||
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||
lmParams.setOrdering(ordering)
|
||||
actual2 = LevenbergMarquardtOptimizer(
|
||||
fg, initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual2))
|
||||
|
||||
# Dogleg
|
||||
dlParams = DoglegParams()
|
||||
dlParams.setOrdering(ordering)
|
||||
actual3 = DoglegOptimizer(fg, initial_values, dlParams).optimize()
|
||||
self.assertAlmostEqual(0, fg.error(actual3))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Odometry unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestOdometryExample(GtsamTestCase):
|
||||
|
||||
def test_OdometryExample(self):
|
||||
# Create the graph (defined in pose2SLAM.h, derived from
|
||||
# NonlinearFactorGraph)
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add a Gaussian prior on pose x_1
|
||||
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior mean is at origin
|
||||
priorNoise = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.3, 0.3, 0.1])) # 30cm std on x,y, 0.1 rad on theta
|
||||
# add directly to graph
|
||||
graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))
|
||||
|
||||
# Add two odometry factors
|
||||
# create a measurement for both factors (the same in this case)
|
||||
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
|
||||
odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta
|
||||
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
|
||||
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise))
|
||||
|
||||
# Initialize to noisy points
|
||||
initialEstimate = gtsam.Values()
|
||||
initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
|
||||
|
||||
# Optimize using Levenberg-Marquardt optimization with an ordering from
|
||||
# colamd
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
|
||||
result = optimizer.optimizeSafely()
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
marginals.marginalCovariance(1)
|
||||
|
||||
# Check first pose equality
|
||||
pose_1 = result.atPose2(1)
|
||||
self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
PlanarSLAM unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
from math import pi
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPlanarSLAM(GtsamTestCase):
|
||||
|
||||
def test_PlanarSLAM(self):
|
||||
# Assumptions
|
||||
# - All values are axis aligned
|
||||
# - Robot poses are facing along the X axis (horizontal, to the right in images)
|
||||
# - We have full odometry for measurements
|
||||
# - The robot is on a grid, moving 2 meters each step
|
||||
|
||||
# Create graph container and add factors to it
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add prior
|
||||
# gaussian for prior
|
||||
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin
|
||||
priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
# add directly to graph
|
||||
graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))
|
||||
|
||||
# Add odometry
|
||||
# general noisemodel for odometry
|
||||
odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
2, 3, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
3, 4, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise))
|
||||
|
||||
# Add pose constraint
|
||||
model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model))
|
||||
|
||||
# Initialize to noisy points
|
||||
initialEstimate = gtsam.Values()
|
||||
initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, pi / 2))
|
||||
initialEstimate.insert(4, gtsam.Pose2(4.0, 2.0, pi))
|
||||
initialEstimate.insert(5, gtsam.Pose2(2.1, 2.1, -pi / 2))
|
||||
|
||||
# Optimize using Levenberg-Marquardt optimization with an ordering from
|
||||
# colamd
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
|
||||
result = optimizer.optimizeSafely()
|
||||
|
||||
# Plot Covariance Ellipses
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
P = marginals.marginalCovariance(1)
|
||||
|
||||
pose_1 = result.atPose2(1)
|
||||
self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose2 unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import Pose2
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPose2(GtsamTestCase):
|
||||
"""Test selected Pose2 methods."""
|
||||
|
||||
def test_adjoint(self):
|
||||
"""Test adjoint method."""
|
||||
xi = np.array([1, 2, 3])
|
||||
expected = np.dot(Pose2.adjointMap_(xi), xi)
|
||||
actual = Pose2.adjoint_(xi, xi)
|
||||
np.testing.assert_array_equal(actual, expected)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose2SLAM unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
from math import pi
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPose2SLAMExample(GtsamTestCase):
|
||||
|
||||
def test_Pose2SLAMExample(self):
|
||||
# Assumptions
|
||||
# - All values are axis aligned
|
||||
# - Robot poses are facing along the X axis (horizontal, to the right in images)
|
||||
# - We have full odometry for measurements
|
||||
# - The robot is on a grid, moving 2 meters each step
|
||||
|
||||
# Create graph container and add factors to it
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add prior
|
||||
# gaussian for prior
|
||||
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin
|
||||
priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
# add directly to graph
|
||||
graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))
|
||||
|
||||
# Add odometry
|
||||
# general noisemodel for odometry
|
||||
odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
2, 3, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
3, 4, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise))
|
||||
graph.add(gtsam.BetweenFactorPose2(
|
||||
4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise))
|
||||
|
||||
# Add pose constraint
|
||||
model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model))
|
||||
|
||||
# Initialize to noisy points
|
||||
initialEstimate = gtsam.Values()
|
||||
initialEstimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initialEstimate.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initialEstimate.insert(3, gtsam.Pose2(4.1, 0.1, pi / 2))
|
||||
initialEstimate.insert(4, gtsam.Pose2(4.0, 2.0, pi))
|
||||
initialEstimate.insert(5, gtsam.Pose2(2.1, 2.1, -pi / 2))
|
||||
|
||||
# Optimize using Levenberg-Marquardt optimization with an ordering from
|
||||
# colamd
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
|
||||
result = optimizer.optimizeSafely()
|
||||
|
||||
# Plot Covariance Ellipses
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
P = marginals.marginalCovariance(1)
|
||||
|
||||
pose_1 = result.atPose2(1)
|
||||
self.gtsamAssertEquals(pose_1, gtsam.Pose2(), 1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose3 unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import math
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import Point3, Pose3, Rot3
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPose3(GtsamTestCase):
|
||||
"""Test selected Pose3 methods."""
|
||||
|
||||
def test_between(self):
|
||||
"""Test between method."""
|
||||
T2 = Pose3(Rot3.Rodrigues(0.3, 0.2, 0.1), Point3(3.5, -8.2, 4.2))
|
||||
T3 = Pose3(Rot3.Rodrigues(-90, 0, 0), Point3(1, 2, 3))
|
||||
expected = T2.inverse().compose(T3)
|
||||
actual = T2.between(T3)
|
||||
self.gtsamAssertEquals(actual, expected, 1e-6)
|
||||
|
||||
def test_transform_to(self):
|
||||
"""Test transformTo method."""
|
||||
transform = Pose3(Rot3.Rodrigues(0, 0, -1.570796), Point3(2, 4, 0))
|
||||
actual = transform.transformTo(Point3(3, 2, 10))
|
||||
expected = Point3(2, 1, 10)
|
||||
self.gtsamAssertEquals(actual, expected, 1e-6)
|
||||
|
||||
def test_range(self):
|
||||
"""Test range method."""
|
||||
l1 = Point3(1, 0, 0)
|
||||
l2 = Point3(1, 1, 0)
|
||||
x1 = Pose3()
|
||||
|
||||
xl1 = Pose3(Rot3.Ypr(0.0, 0.0, 0.0), Point3(1, 0, 0))
|
||||
xl2 = Pose3(Rot3.Ypr(0.0, 1.0, 0.0), Point3(1, 1, 0))
|
||||
|
||||
# establish range is indeed zero
|
||||
self.assertEqual(1, x1.range(point=l1))
|
||||
|
||||
# establish range is indeed sqrt2
|
||||
self.assertEqual(math.sqrt(2.0), x1.range(point=l2))
|
||||
|
||||
# establish range is indeed zero
|
||||
self.assertEqual(1, x1.range(pose=xl1))
|
||||
|
||||
# establish range is indeed sqrt2
|
||||
self.assertEqual(math.sqrt(2.0), x1.range(pose=xl2))
|
||||
|
||||
def test_adjoint(self):
|
||||
"""Test adjoint method."""
|
||||
xi = np.array([1, 2, 3, 4, 5, 6])
|
||||
expected = np.dot(Pose3.adjointMap_(xi), xi)
|
||||
actual = Pose3.adjoint_(xi, xi)
|
||||
np.testing.assert_array_equal(actual, expected)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
PoseSLAM unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
from math import pi
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
from gtsam.utils.circlePose3 import *
|
||||
|
||||
|
||||
class TestPose3SLAMExample(GtsamTestCase):
|
||||
|
||||
def test_Pose3SLAMExample(self):
|
||||
# Create a hexagon of poses
|
||||
hexagon = circlePose3(6, 1.0)
|
||||
p0 = hexagon.atPose3(0)
|
||||
p1 = hexagon.atPose3(1)
|
||||
|
||||
# create a Pose graph with one equality constraint and one measurement
|
||||
fg = gtsam.NonlinearFactorGraph()
|
||||
fg.add(gtsam.NonlinearEqualityPose3(0, p0))
|
||||
delta = p0.between(p1)
|
||||
covariance = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.05, 0.05, 0.05, 5. * pi / 180, 5. * pi / 180, 5. * pi / 180]))
|
||||
fg.add(gtsam.BetweenFactorPose3(0, 1, delta, covariance))
|
||||
fg.add(gtsam.BetweenFactorPose3(1, 2, delta, covariance))
|
||||
fg.add(gtsam.BetweenFactorPose3(2, 3, delta, covariance))
|
||||
fg.add(gtsam.BetweenFactorPose3(3, 4, delta, covariance))
|
||||
fg.add(gtsam.BetweenFactorPose3(4, 5, delta, covariance))
|
||||
fg.add(gtsam.BetweenFactorPose3(5, 0, delta, covariance))
|
||||
|
||||
# Create initial config
|
||||
initial = gtsam.Values()
|
||||
s = 0.10
|
||||
initial.insert(0, p0)
|
||||
initial.insert(1, hexagon.atPose3(1).retract(s * np.random.randn(6, 1)))
|
||||
initial.insert(2, hexagon.atPose3(2).retract(s * np.random.randn(6, 1)))
|
||||
initial.insert(3, hexagon.atPose3(3).retract(s * np.random.randn(6, 1)))
|
||||
initial.insert(4, hexagon.atPose3(4).retract(s * np.random.randn(6, 1)))
|
||||
initial.insert(5, hexagon.atPose3(5).retract(s * np.random.randn(6, 1)))
|
||||
|
||||
# optimize
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(fg, initial)
|
||||
result = optimizer.optimizeSafely()
|
||||
|
||||
pose_1 = result.atPose3(1)
|
||||
self.gtsamAssertEquals(pose_1, p1, 1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
PriorFactor unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestPriorFactor(GtsamTestCase):
|
||||
|
||||
def test_PriorFactor(self):
|
||||
values = gtsam.Values()
|
||||
|
||||
key = 5
|
||||
priorPose3 = gtsam.Pose3()
|
||||
model = gtsam.noiseModel_Unit.Create(6)
|
||||
factor = gtsam.PriorFactorPose3(key, priorPose3, model)
|
||||
values.insert(key, priorPose3)
|
||||
self.assertEqual(factor.error(values), 0)
|
||||
|
||||
key = 3
|
||||
priorVector = np.array([0., 0., 0.])
|
||||
model = gtsam.noiseModel_Unit.Create(3)
|
||||
factor = gtsam.PriorFactorVector(key, priorVector, model)
|
||||
values.insert(key, priorVector)
|
||||
self.assertEqual(factor.error(values), 0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
SFM unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.visual_data_generator as generator
|
||||
from gtsam import symbol
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestSFMExample(GtsamTestCase):
|
||||
|
||||
def test_SFMExample(self):
|
||||
options = generator.Options()
|
||||
options.triangle = False
|
||||
options.nrCameras = 10
|
||||
|
||||
[data, truth] = generator.generate_data(options)
|
||||
|
||||
measurementNoiseSigma = 1.0
|
||||
pointNoiseSigma = 0.1
|
||||
poseNoiseSigmas = np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])
|
||||
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add factors for all measurements
|
||||
measurementNoise = gtsam.noiseModel_Isotropic.Sigma(2, measurementNoiseSigma)
|
||||
for i in range(len(data.Z)):
|
||||
for k in range(len(data.Z[i])):
|
||||
j = data.J[i][k]
|
||||
graph.add(gtsam.GenericProjectionFactorCal3_S2(
|
||||
data.Z[i][k], measurementNoise,
|
||||
symbol(ord('x'), i), symbol(ord('p'), j), data.K))
|
||||
|
||||
posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas)
|
||||
graph.add(gtsam.PriorFactorPose3(symbol(ord('x'), 0),
|
||||
truth.cameras[0].pose(), posePriorNoise))
|
||||
pointPriorNoise = gtsam.noiseModel_Isotropic.Sigma(3, pointNoiseSigma)
|
||||
graph.add(gtsam.PriorFactorPoint3(symbol(ord('p'), 0),
|
||||
truth.points[0], pointPriorNoise))
|
||||
|
||||
# Initial estimate
|
||||
initialEstimate = gtsam.Values()
|
||||
for i in range(len(truth.cameras)):
|
||||
pose_i = truth.cameras[i].pose()
|
||||
initialEstimate.insert(symbol(ord('x'), i), pose_i)
|
||||
for j in range(len(truth.points)):
|
||||
point_j = truth.points[j]
|
||||
initialEstimate.insert(symbol(ord('p'), j), point_j)
|
||||
|
||||
# Optimization
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
|
||||
for i in range(5):
|
||||
optimizer.iterate()
|
||||
result = optimizer.values()
|
||||
|
||||
# Marginalization
|
||||
marginals = gtsam.Marginals(graph, result)
|
||||
marginals.marginalCovariance(symbol(ord('p'), 0))
|
||||
marginals.marginalCovariance(symbol(ord('x'), 0))
|
||||
|
||||
# Check optimized results, should be equal to ground truth
|
||||
for i in range(len(truth.cameras)):
|
||||
pose_i = result.atPose3(symbol(ord('x'), i))
|
||||
self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5)
|
||||
|
||||
for j in range(len(truth.points)):
|
||||
point_j = result.atPoint3(symbol(ord('p'), j))
|
||||
self.gtsamAssertEquals(point_j, truth.points[j], 1e-5)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Scenario unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import math
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
# pylint: disable=invalid-name, E1101
|
||||
|
||||
|
||||
class TestScenario(GtsamTestCase):
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
def test_loop(self):
|
||||
# Forward velocity 2m/s
|
||||
# Pitch up with angular velocity 6 degree/sec (negative in FLU)
|
||||
v = 2
|
||||
w = math.radians(6)
|
||||
W = np.array([0, -w, 0])
|
||||
V = np.array([v, 0, 0])
|
||||
scenario = gtsam.ConstantTwistScenario(W, V)
|
||||
|
||||
T = 30
|
||||
np.testing.assert_almost_equal(W, scenario.omega_b(T))
|
||||
np.testing.assert_almost_equal(V, scenario.velocity_b(T))
|
||||
np.testing.assert_almost_equal(
|
||||
np.cross(W, V), scenario.acceleration_b(T))
|
||||
|
||||
# R = v/w, so test if loop crests at 2*R
|
||||
R = v / w
|
||||
T30 = scenario.pose(T)
|
||||
np.testing.assert_almost_equal(
|
||||
np.array([math.pi, 0, math.pi]), T30.rotation().xyz())
|
||||
self.gtsamAssertEquals(gtsam.Point3(
|
||||
0, 0, 2.0 * R), T30.translation(), 1e-9)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
SimpleCamera unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import math
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, SimpleCamera
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
K = Cal3_S2(625, 625, 0, 0, 0)
|
||||
|
||||
class TestSimpleCamera(GtsamTestCase):
|
||||
|
||||
def test_constructor(self):
|
||||
pose1 = Pose3(Rot3(np.diag([1, -1, -1])), Point3(0, 0, 0.5))
|
||||
camera = SimpleCamera(pose1, K)
|
||||
self.gtsamAssertEquals(camera.calibration(), K, 1e-9)
|
||||
self.gtsamAssertEquals(camera.pose(), pose1, 1e-9)
|
||||
|
||||
def test_level2(self):
|
||||
# Create a level camera, looking in Y-direction
|
||||
pose2 = Pose2(0.4,0.3,math.pi/2.0)
|
||||
camera = SimpleCamera.Level(K, pose2, 0.1)
|
||||
|
||||
# expected
|
||||
x = Point3(1,0,0)
|
||||
y = Point3(0,0,-1)
|
||||
z = Point3(0,1,0)
|
||||
wRc = Rot3(x,y,z)
|
||||
expected = Pose3(wRc,Point3(0.4,0.3,0.1))
|
||||
self.gtsamAssertEquals(camera.pose(), expected, 1e-9)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Stereo VO unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import symbol
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestStereoVOExample(GtsamTestCase):
|
||||
|
||||
def test_StereoVOExample(self):
|
||||
## Assumptions
|
||||
# - For simplicity this example is in the camera's coordinate frame
|
||||
# - X: right, Y: down, Z: forward
|
||||
# - Pose x1 is at the origin, Pose 2 is 1 meter forward (along Z-axis)
|
||||
# - x1 is fixed with a constraint, x2 is initialized with noisy values
|
||||
# - No noise on measurements
|
||||
|
||||
## Create keys for variables
|
||||
x1 = symbol(ord('x'),1)
|
||||
x2 = symbol(ord('x'),2)
|
||||
l1 = symbol(ord('l'),1)
|
||||
l2 = symbol(ord('l'),2)
|
||||
l3 = symbol(ord('l'),3)
|
||||
|
||||
## Create graph container and add factors to it
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
## add a constraint on the starting pose
|
||||
first_pose = gtsam.Pose3()
|
||||
graph.add(gtsam.NonlinearEqualityPose3(x1, first_pose))
|
||||
|
||||
## Create realistic calibration and measurement noise model
|
||||
# format: fx fy skew cx cy baseline
|
||||
K = gtsam.Cal3_S2Stereo(1000, 1000, 0, 320, 240, 0.2)
|
||||
stereo_model = gtsam.noiseModel_Diagonal.Sigmas(np.array([1.0, 1.0, 1.0]))
|
||||
|
||||
## Add measurements
|
||||
# pose 1
|
||||
graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(520, 480, 440), stereo_model, x1, l1, K))
|
||||
graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(120, 80, 440), stereo_model, x1, l2, K))
|
||||
graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(320, 280, 140), stereo_model, x1, l3, K))
|
||||
|
||||
#pose 2
|
||||
graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(570, 520, 490), stereo_model, x2, l1, K))
|
||||
graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2( 70, 20, 490), stereo_model, x2, l2, K))
|
||||
graph.add(gtsam.GenericStereoFactor3D(gtsam.StereoPoint2(320, 270, 115), stereo_model, x2, l3, K))
|
||||
|
||||
## Create initial estimate for camera poses and landmarks
|
||||
initialEstimate = gtsam.Values()
|
||||
initialEstimate.insert(x1, first_pose)
|
||||
# noisy estimate for pose 2
|
||||
initialEstimate.insert(x2, gtsam.Pose3(gtsam.Rot3(), gtsam.Point3(0.1,-.1,1.1)))
|
||||
expected_l1 = gtsam.Point3( 1, 1, 5)
|
||||
initialEstimate.insert(l1, expected_l1)
|
||||
initialEstimate.insert(l2, gtsam.Point3(-1, 1, 5))
|
||||
initialEstimate.insert(l3, gtsam.Point3( 0,-.5, 5))
|
||||
|
||||
## optimize
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate)
|
||||
result = optimizer.optimize()
|
||||
|
||||
## check equality for the first pose and point
|
||||
pose_x1 = result.atPose3(x1)
|
||||
self.gtsamAssertEquals(pose_x1, first_pose,1e-4)
|
||||
|
||||
point_l1 = result.atPoint3(l1)
|
||||
self.gtsamAssertEquals(point_l1, expected_l1,1e-4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Values unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101, E0611
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import (Cal3_S2, Cal3Bundler, Cal3DS2, EssentialMatrix, Point2,
|
||||
Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values,
|
||||
imuBias_ConstantBias)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestValues(GtsamTestCase):
|
||||
|
||||
def test_values(self):
|
||||
values = Values()
|
||||
E = EssentialMatrix(Rot3(), Unit3())
|
||||
tol = 1e-9
|
||||
|
||||
values.insert(0, Point2(0, 0))
|
||||
values.insert(1, Point3(0, 0, 0))
|
||||
values.insert(2, Rot2())
|
||||
values.insert(3, Pose2())
|
||||
values.insert(4, Rot3())
|
||||
values.insert(5, Pose3())
|
||||
values.insert(6, Cal3_S2())
|
||||
values.insert(7, Cal3DS2())
|
||||
values.insert(8, Cal3Bundler())
|
||||
values.insert(9, E)
|
||||
values.insert(10, imuBias_ConstantBias())
|
||||
|
||||
# Special cases for Vectors and Matrices
|
||||
# Note that gtsam's Eigen Vectors and Matrices requires double-precision
|
||||
# floating point numbers in column-major (Fortran style) storage order,
|
||||
# whereas by default, numpy.array is in row-major order and the type is
|
||||
# in whatever the number input type is, e.g. np.array([1,2,3])
|
||||
# will have 'int' type.
|
||||
#
|
||||
# The wrapper will automatically fix the type and storage order for you,
|
||||
# but for performance reasons, it's recommended to specify the correct
|
||||
# type and storage order.
|
||||
# for vectors, the order is not important, but dtype still is
|
||||
vec = np.array([1., 2., 3.])
|
||||
values.insert(11, vec)
|
||||
mat = np.array([[1., 2.], [3., 4.]], order='F')
|
||||
values.insert(12, mat)
|
||||
# Test with dtype int and the default order='C'
|
||||
# This still works as the wrapper converts to the correct type and order for you
|
||||
# but is nornally not recommended!
|
||||
mat2 = np.array([[1, 2, ], [3, 5]])
|
||||
values.insert(13, mat2)
|
||||
|
||||
self.gtsamAssertEquals(values.atPoint2(0), Point2(0,0), tol)
|
||||
self.gtsamAssertEquals(values.atPoint3(1), Point3(0,0,0), tol)
|
||||
self.gtsamAssertEquals(values.atRot2(2), Rot2(), tol)
|
||||
self.gtsamAssertEquals(values.atPose2(3), Pose2(), tol)
|
||||
self.gtsamAssertEquals(values.atRot3(4), Rot3(), tol)
|
||||
self.gtsamAssertEquals(values.atPose3(5), Pose3(), tol)
|
||||
self.gtsamAssertEquals(values.atCal3_S2(6), Cal3_S2(), tol)
|
||||
self.gtsamAssertEquals(values.atCal3DS2(7), Cal3DS2(), tol)
|
||||
self.gtsamAssertEquals(values.atCal3Bundler(8), Cal3Bundler(), tol)
|
||||
self.gtsamAssertEquals(values.atEssentialMatrix(9), E, tol)
|
||||
self.gtsamAssertEquals(values.atimuBias_ConstantBias(
|
||||
10), imuBias_ConstantBias(), tol)
|
||||
|
||||
# special cases for Vector and Matrix:
|
||||
actualVector = values.atVector(11)
|
||||
np.testing.assert_allclose(vec, actualVector, tol)
|
||||
actualMatrix = values.atMatrix(12)
|
||||
np.testing.assert_allclose(mat, actualMatrix, tol)
|
||||
actualMatrix2 = values.atMatrix(13)
|
||||
np.testing.assert_allclose(mat2, actualMatrix2, tol)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
visual_isam unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.visual_data_generator as generator
|
||||
import gtsam.utils.visual_isam as visual_isam
|
||||
from gtsam import symbol
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestVisualISAMExample(GtsamTestCase):
|
||||
|
||||
def test_VisualISAMExample(self):
|
||||
# Data Options
|
||||
options = generator.Options()
|
||||
options.triangle = False
|
||||
options.nrCameras = 20
|
||||
|
||||
# iSAM Options
|
||||
isamOptions = visual_isam.Options()
|
||||
isamOptions.hardConstraint = False
|
||||
isamOptions.pointPriors = False
|
||||
isamOptions.batchInitialization = True
|
||||
isamOptions.reorderInterval = 10
|
||||
isamOptions.alwaysRelinearize = False
|
||||
|
||||
# Generate data
|
||||
data, truth = generator.generate_data(options)
|
||||
|
||||
# Initialize iSAM with the first pose and points
|
||||
isam, result, nextPose = visual_isam.initialize(data, truth, isamOptions)
|
||||
|
||||
# Main loop for iSAM: stepping through all poses
|
||||
for currentPose in range(nextPose, options.nrCameras):
|
||||
isam, result = visual_isam.step(data, isam, result, truth, currentPose)
|
||||
|
||||
for i in range(len(truth.cameras)):
|
||||
pose_i = result.atPose3(symbol(ord('x'), i))
|
||||
self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5)
|
||||
|
||||
for j in range(len(truth.points)):
|
||||
point_j = result.atPoint3(symbol(ord('l'), j))
|
||||
self.gtsamAssertEquals(point_j, truth.points[j], 1e-5)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Unit tests for testing dataset access.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, no-name-in-module, no-member
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import BetweenFactorPose3, BetweenFactorPose3s
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestDataset(GtsamTestCase):
|
||||
"""Tests for datasets.h wrapper."""
|
||||
|
||||
def setUp(self):
|
||||
"""Get some common paths."""
|
||||
self.pose3_example_g2o_file = gtsam.findExampleDataFile(
|
||||
"pose3example.txt")
|
||||
|
||||
def test_readG2o3D(self):
|
||||
"""Test reading directly into factor graph."""
|
||||
is3D = True
|
||||
graph, initial = gtsam.readG2o(self.pose3_example_g2o_file, is3D)
|
||||
self.assertEqual(graph.size(), 6)
|
||||
self.assertEqual(initial.size(), 5)
|
||||
|
||||
def test_parse3Dfactors(self):
|
||||
"""Test parsing into data structure."""
|
||||
factors = gtsam.parse3DFactors(self.pose3_example_g2o_file)
|
||||
self.assertEqual(factors.size(), 6)
|
||||
self.assertIsInstance(factors.at(0), BetweenFactorPose3)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Unit tests for Disjoint Set Forest.
|
||||
Author: Frank Dellaert & Varun Agrawal
|
||||
"""
|
||||
# pylint: disable=invalid-name, no-name-in-module, no-member
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestDSFMap(GtsamTestCase):
|
||||
"""Tests for DSFMap."""
|
||||
|
||||
def test_all(self):
|
||||
"""Test everything in DFSMap."""
|
||||
def key(index_pair):
|
||||
return index_pair.i(), index_pair.j()
|
||||
|
||||
dsf = gtsam.DSFMapIndexPair()
|
||||
pair1 = gtsam.IndexPair(1, 18)
|
||||
self.assertEqual(key(dsf.find(pair1)), key(pair1))
|
||||
pair2 = gtsam.IndexPair(2, 2)
|
||||
dsf.merge(pair1, pair2)
|
||||
self.assertTrue(dsf.find(pair1), dsf.find(pair1))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Unit tests for 3D SLAM initialization, using rotation relaxation.
|
||||
Author: Luca Carlone and Frank Dellaert (Python)
|
||||
"""
|
||||
# pylint: disable=invalid-name, E1101, E0611
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import NonlinearFactorGraph, Point3, Pose3, Rot3, Values
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
x0, x1, x2, x3 = 0, 1, 2, 3
|
||||
|
||||
|
||||
class TestValues(GtsamTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
model = gtsam.noiseModel_Isotropic.Sigma(6, 0.1)
|
||||
|
||||
# We consider a small graph:
|
||||
# symbolic FG
|
||||
# x2 0 1
|
||||
# / | \ 1 2
|
||||
# / | \ 2 3
|
||||
# x3 | x1 2 0
|
||||
# \ | / 0 3
|
||||
# \ | /
|
||||
# x0
|
||||
#
|
||||
p0 = Point3(0, 0, 0)
|
||||
self.R0 = Rot3.Expmap(np.array([0.0, 0.0, 0.0]))
|
||||
p1 = Point3(1, 2, 0)
|
||||
self.R1 = Rot3.Expmap(np.array([0.0, 0.0, 1.570796]))
|
||||
p2 = Point3(0, 2, 0)
|
||||
self.R2 = Rot3.Expmap(np.array([0.0, 0.0, 3.141593]))
|
||||
p3 = Point3(-1, 1, 0)
|
||||
self.R3 = Rot3.Expmap(np.array([0.0, 0.0, 4.712389]))
|
||||
|
||||
pose0 = Pose3(self.R0, p0)
|
||||
pose1 = Pose3(self.R1, p1)
|
||||
pose2 = Pose3(self.R2, p2)
|
||||
pose3 = Pose3(self.R3, p3)
|
||||
|
||||
g = NonlinearFactorGraph()
|
||||
g.add(gtsam.BetweenFactorPose3(x0, x1, pose0.between(pose1), model))
|
||||
g.add(gtsam.BetweenFactorPose3(x1, x2, pose1.between(pose2), model))
|
||||
g.add(gtsam.BetweenFactorPose3(x2, x3, pose2.between(pose3), model))
|
||||
g.add(gtsam.BetweenFactorPose3(x2, x0, pose2.between(pose0), model))
|
||||
g.add(gtsam.BetweenFactorPose3(x0, x3, pose0.between(pose3), model))
|
||||
g.add(gtsam.PriorFactorPose3(x0, pose0, model))
|
||||
self.graph = g
|
||||
|
||||
def test_buildPose3graph(self):
|
||||
pose3graph = gtsam.InitializePose3.buildPose3graph(self.graph)
|
||||
|
||||
def test_orientations(self):
|
||||
pose3Graph = gtsam.InitializePose3.buildPose3graph(self.graph)
|
||||
|
||||
initial = gtsam.InitializePose3.computeOrientationsChordal(pose3Graph)
|
||||
|
||||
# comparison is up to M_PI, that's why we add some multiples of 2*M_PI
|
||||
self.gtsamAssertEquals(initial.atRot3(x0), self.R0, 1e-6)
|
||||
self.gtsamAssertEquals(initial.atRot3(x1), self.R1, 1e-6)
|
||||
self.gtsamAssertEquals(initial.atRot3(x2), self.R2, 1e-6)
|
||||
self.gtsamAssertEquals(initial.atRot3(x3), self.R3, 1e-6)
|
||||
|
||||
def test_initializePoses(self):
|
||||
g2oFile = gtsam.findExampleDataFile("pose3example-grid")
|
||||
is3D = True
|
||||
inputGraph, expectedValues = gtsam.readG2o(g2oFile, is3D)
|
||||
priorModel = gtsam.noiseModel_Unit.Create(6)
|
||||
inputGraph.add(gtsam.PriorFactorPose3(0, Pose3(), priorModel))
|
||||
|
||||
initial = gtsam.InitializePose3.initialize(inputGraph)
|
||||
# TODO(frank): very loose !!
|
||||
self.gtsamAssertEquals(initial, expectedValues, 0.1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
import gtsam
|
||||
import numpy as np
|
||||
from math import pi, cos, sin
|
||||
|
||||
|
||||
def circlePose3(numPoses=8, radius=1.0, symbolChar=0):
|
||||
"""
|
||||
circlePose3 generates a set of poses in a circle. This function
|
||||
returns those poses inside a gtsam.Values object, with sequential
|
||||
keys starting from 0. An optional character may be provided, which
|
||||
will be stored in the msb of each key (i.e. gtsam.Symbol).
|
||||
|
||||
We use aerospace/navlab convention, X forward, Y right, Z down
|
||||
First pose will be at (R,0,0)
|
||||
^y ^ X
|
||||
| |
|
||||
z-->xZ--> Y (z pointing towards viewer, Z pointing away from viewer)
|
||||
Vehicle at p0 is looking towards y axis (X-axis points towards world y)
|
||||
"""
|
||||
|
||||
# Force symbolChar to be a single character
|
||||
if type(symbolChar) is str:
|
||||
symbolChar = ord(symbolChar[0])
|
||||
|
||||
values = gtsam.Values()
|
||||
theta = 0.0
|
||||
dtheta = 2 * pi / numPoses
|
||||
gRo = gtsam.Rot3(
|
||||
np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]], order='F'))
|
||||
for i in range(numPoses):
|
||||
key = gtsam.symbol(symbolChar, i)
|
||||
gti = gtsam.Point3(radius * cos(theta), radius * sin(theta), 0)
|
||||
oRi = gtsam.Rot3.Yaw(
|
||||
-theta) # negative yaw goes counterclockwise, with Z down !
|
||||
gTi = gtsam.Pose3(gRo.compose(oRi), gti)
|
||||
values.insert(key, gTi)
|
||||
theta = theta + dtheta
|
||||
return values
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
"""Various plotting utlities."""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib import patches
|
||||
|
||||
|
||||
def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None):
|
||||
"""Plot a 2D pose on given axis 'axes' with given 'axis_length'."""
|
||||
# get rotation and translation (center)
|
||||
gRp = pose.rotation().matrix() # rotation from pose to global
|
||||
t = pose.translation()
|
||||
origin = np.array([t.x(), t.y()])
|
||||
|
||||
# draw the camera axes
|
||||
x_axis = origin + gRp[:, 0] * axis_length
|
||||
line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0)
|
||||
axes.plot(line[:, 0], line[:, 1], 'r-')
|
||||
|
||||
y_axis = origin + gRp[:, 1] * axis_length
|
||||
line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0)
|
||||
axes.plot(line[:, 0], line[:, 1], 'g-')
|
||||
|
||||
if covariance is not None:
|
||||
pPp = covariance[0:2, 0:2]
|
||||
gPp = np.matmul(np.matmul(gRp, pPp), gRp.T)
|
||||
|
||||
w, v = np.linalg.eig(gPp)
|
||||
|
||||
# k = 2.296
|
||||
k = 5.0
|
||||
|
||||
angle = np.arctan2(v[1, 0], v[0, 0])
|
||||
e1 = patches.Ellipse(origin, np.sqrt(w[0]*k), np.sqrt(w[1]*k),
|
||||
np.rad2deg(angle), fill=False)
|
||||
axes.add_patch(e1)
|
||||
|
||||
def plot_pose2(fignum, pose, axis_length=0.1, covariance=None):
|
||||
"""Plot a 2D pose on given figure with given 'axis_length'."""
|
||||
# get figure object
|
||||
fig = plt.figure(fignum)
|
||||
axes = fig.gca()
|
||||
plot_pose2_on_axes(axes, pose, axis_length, covariance)
|
||||
|
||||
|
||||
def plot_point3_on_axes(axes, point, linespec):
|
||||
"""Plot a 3D point on given axis 'axes' with given 'linespec'."""
|
||||
axes.plot([point.x()], [point.y()], [point.z()], linespec)
|
||||
|
||||
|
||||
def plot_point3(fignum, point, linespec):
|
||||
"""Plot a 3D point on given figure with given 'linespec'."""
|
||||
fig = plt.figure(fignum)
|
||||
axes = fig.gca(projection='3d')
|
||||
plot_point3_on_axes(axes, point, linespec)
|
||||
|
||||
|
||||
def plot_3d_points(fignum, values, linespec, marginals=None):
|
||||
"""
|
||||
Plots the Point3s in 'values', with optional covariances.
|
||||
Finds all the Point3 objects in the given Values object and plots them.
|
||||
If a Marginals object is given, this function will also plot marginal
|
||||
covariance ellipses for each point.
|
||||
"""
|
||||
|
||||
keys = values.keys()
|
||||
|
||||
# Plot points and covariance matrices
|
||||
for i in range(keys.size()):
|
||||
try:
|
||||
p = values.atPoint3(keys.at(i))
|
||||
# if haveMarginals
|
||||
# P = marginals.marginalCovariance(key);
|
||||
# gtsam.plot_point3(p, linespec, P);
|
||||
# else
|
||||
plot_point3(fignum, p, linespec)
|
||||
except RuntimeError:
|
||||
continue
|
||||
# I guess it's not a Point3
|
||||
|
||||
|
||||
def plot_pose3_on_axes(axes, pose, axis_length=0.1):
|
||||
"""Plot a 3D pose on given axis 'axes' with given 'axis_length'."""
|
||||
# get rotation and translation (center)
|
||||
gRp = pose.rotation().matrix() # rotation from pose to global
|
||||
t = pose.translation()
|
||||
origin = np.array([t.x(), t.y(), t.z()])
|
||||
|
||||
# draw the camera axes
|
||||
x_axis = origin + gRp[:, 0] * axis_length
|
||||
line = np.append(origin[np.newaxis], x_axis[np.newaxis], axis=0)
|
||||
axes.plot(line[:, 0], line[:, 1], line[:, 2], 'r-')
|
||||
|
||||
y_axis = origin + gRp[:, 1] * axis_length
|
||||
line = np.append(origin[np.newaxis], y_axis[np.newaxis], axis=0)
|
||||
axes.plot(line[:, 0], line[:, 1], line[:, 2], 'g-')
|
||||
|
||||
z_axis = origin + gRp[:, 2] * axis_length
|
||||
line = np.append(origin[np.newaxis], z_axis[np.newaxis], axis=0)
|
||||
axes.plot(line[:, 0], line[:, 1], line[:, 2], 'b-')
|
||||
|
||||
# plot the covariance
|
||||
# TODO (dellaert): make this work
|
||||
# if (nargin>2) && (~isempty(P))
|
||||
# pPp = P(4:6,4:6); % covariance matrix in pose coordinate frame
|
||||
# gPp = gRp*pPp*gRp'; % convert the covariance matrix to global coordinate frame
|
||||
# gtsam.covarianceEllipse3D(origin,gPp);
|
||||
# end
|
||||
|
||||
|
||||
def plot_pose3(fignum, pose, axis_length=0.1):
|
||||
"""Plot a 3D pose on given figure with given 'axis_length'."""
|
||||
# get figure object
|
||||
fig = plt.figure(fignum)
|
||||
axes = fig.gca(projection='3d')
|
||||
plot_pose3_on_axes(axes, pose, axis_length)
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
TestCase class with GTSAM assert utils.
|
||||
Author: Frank Dellaert
|
||||
"""
|
||||
import unittest
|
||||
|
||||
|
||||
class GtsamTestCase(unittest.TestCase):
|
||||
"""TestCase class with GTSAM assert utils."""
|
||||
|
||||
def gtsamAssertEquals(self, actual, expected, tol=1e-9):
|
||||
""" AssertEqual function that prints out actual and expected if not equal.
|
||||
Usage:
|
||||
self.gtsamAssertEqual(actual,expected)
|
||||
Keyword Arguments:
|
||||
tol {float} -- tolerance passed to 'equals', default 1e-9
|
||||
"""
|
||||
equal = actual.equals(expected, tol)
|
||||
if not equal:
|
||||
raise self.failureException(
|
||||
"Values are not equal:\n{}!={}".format(actual, expected))
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
from math import pi, cos, sin
|
||||
import gtsam
|
||||
|
||||
|
||||
class Options:
|
||||
"""
|
||||
Options to generate test scenario
|
||||
"""
|
||||
|
||||
def __init__(self, triangle=False, nrCameras=3, K=gtsam.Cal3_S2()):
|
||||
"""
|
||||
Options to generate test scenario
|
||||
@param triangle: generate a triangle scene with 3 points if True, otherwise
|
||||
a cube with 8 points
|
||||
@param nrCameras: number of cameras to generate
|
||||
@param K: camera calibration object
|
||||
"""
|
||||
self.triangle = triangle
|
||||
self.nrCameras = nrCameras
|
||||
|
||||
|
||||
class GroundTruth:
|
||||
"""
|
||||
Object holding generated ground-truth data
|
||||
"""
|
||||
|
||||
def __init__(self, K=gtsam.Cal3_S2(), nrCameras=3, nrPoints=4):
|
||||
self.K = K
|
||||
self.cameras = [gtsam.Pose3()] * nrCameras
|
||||
self.points = [gtsam.Point3()] * nrPoints
|
||||
|
||||
def print_(self, s=""):
|
||||
print(s)
|
||||
print("K = ", self.K)
|
||||
print("Cameras: ", len(self.cameras))
|
||||
for camera in self.cameras:
|
||||
print("\t", camera)
|
||||
print("Points: ", len(self.points))
|
||||
for point in self.points:
|
||||
print("\t", point)
|
||||
pass
|
||||
|
||||
|
||||
class Data:
|
||||
"""
|
||||
Object holding generated measurement data
|
||||
"""
|
||||
|
||||
class NoiseModels:
|
||||
pass
|
||||
|
||||
def __init__(self, K=gtsam.Cal3_S2(), nrCameras=3, nrPoints=4):
|
||||
self.K = K
|
||||
self.Z = [x[:] for x in [[gtsam.Point2()] * nrPoints] * nrCameras]
|
||||
self.J = [x[:] for x in [[0] * nrPoints] * nrCameras]
|
||||
self.odometry = [gtsam.Pose3()] * nrCameras
|
||||
|
||||
# Set Noise parameters
|
||||
self.noiseModels = Data.NoiseModels()
|
||||
self.noiseModels.posePrior = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]))
|
||||
# noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
# np.array([0.001,0.001,0.001,0.1,0.1,0.1]))
|
||||
self.noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.05, 0.05, 0.05, 0.2, 0.2, 0.2]))
|
||||
self.noiseModels.pointPrior = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
|
||||
self.noiseModels.measurement = gtsam.noiseModel_Isotropic.Sigma(2, 1.0)
|
||||
|
||||
|
||||
def generate_data(options):
|
||||
""" Generate ground-truth and measurement data. """
|
||||
|
||||
K = gtsam.Cal3_S2(500, 500, 0, 640. / 2., 480. / 2.)
|
||||
nrPoints = 3 if options.triangle else 8
|
||||
|
||||
truth = GroundTruth(K=K, nrCameras=options.nrCameras, nrPoints=nrPoints)
|
||||
data = Data(K, nrCameras=options.nrCameras, nrPoints=nrPoints)
|
||||
|
||||
# Generate simulated data
|
||||
if options.triangle: # Create a triangle target, just 3 points on a plane
|
||||
r = 10
|
||||
for j in range(len(truth.points)):
|
||||
theta = j * 2 * pi / nrPoints
|
||||
truth.points[j] = gtsam.Point3(r * cos(theta), r * sin(theta), 0)
|
||||
else: # 3D landmarks as vertices of a cube
|
||||
truth.points = [
|
||||
gtsam.Point3(10, 10, 10), gtsam.Point3(-10, 10, 10),
|
||||
gtsam.Point3(-10, -10, 10), gtsam.Point3(10, -10, 10),
|
||||
gtsam.Point3(10, 10, -10), gtsam.Point3(-10, 10, -10),
|
||||
gtsam.Point3(-10, -10, -10), gtsam.Point3(10, -10, -10)
|
||||
]
|
||||
|
||||
# Create camera cameras on a circle around the triangle
|
||||
height = 10
|
||||
r = 40
|
||||
for i in range(options.nrCameras):
|
||||
theta = i * 2 * pi / options.nrCameras
|
||||
t = gtsam.Point3(r * cos(theta), r * sin(theta), height)
|
||||
truth.cameras[i] = gtsam.SimpleCamera.Lookat(t,
|
||||
gtsam.Point3(),
|
||||
gtsam.Point3(0, 0, 1),
|
||||
truth.K)
|
||||
# Create measurements
|
||||
for j in range(nrPoints):
|
||||
# All landmarks seen in every frame
|
||||
data.Z[i][j] = truth.cameras[i].project(truth.points[j])
|
||||
data.J[i][j] = j
|
||||
|
||||
# Calculate odometry between cameras
|
||||
for i in range(1, options.nrCameras):
|
||||
data.odometry[i] = truth.cameras[i - 1].pose().between(
|
||||
truth.cameras[i].pose())
|
||||
|
||||
return data, truth
|
||||
|
|
@ -0,0 +1,131 @@
|
|||
import gtsam
|
||||
from gtsam import symbol
|
||||
|
||||
|
||||
class Options:
|
||||
""" Options for visual isam example. """
|
||||
|
||||
def __init__(self):
|
||||
self.hardConstraint = False
|
||||
self.pointPriors = False
|
||||
self.batchInitialization = True
|
||||
self.reorderInterval = 10
|
||||
self.alwaysRelinearize = False
|
||||
|
||||
|
||||
def initialize(data, truth, options):
|
||||
# Initialize iSAM
|
||||
params = gtsam.ISAM2Params()
|
||||
if options.alwaysRelinearize:
|
||||
params.setRelinearizeSkip(1)
|
||||
isam = gtsam.ISAM2(params=params)
|
||||
|
||||
# Add constraints/priors
|
||||
# TODO: should not be from ground truth!
|
||||
newFactors = gtsam.NonlinearFactorGraph()
|
||||
initialEstimates = gtsam.Values()
|
||||
for i in range(2):
|
||||
ii = symbol(ord('x'), i)
|
||||
if i == 0:
|
||||
if options.hardConstraint: # add hard constraint
|
||||
newFactors.add(
|
||||
gtsam.NonlinearEqualityPose3(ii, truth.cameras[0].pose()))
|
||||
else:
|
||||
newFactors.add(
|
||||
gtsam.PriorFactorPose3(ii, truth.cameras[i].pose(),
|
||||
data.noiseModels.posePrior))
|
||||
initialEstimates.insert(ii, truth.cameras[i].pose())
|
||||
|
||||
nextPoseIndex = 2
|
||||
|
||||
# Add visual measurement factors from two first poses and initialize
|
||||
# observed landmarks
|
||||
for i in range(2):
|
||||
ii = symbol(ord('x'), i)
|
||||
for k in range(len(data.Z[i])):
|
||||
j = data.J[i][k]
|
||||
jj = symbol(ord('l'), j)
|
||||
newFactors.add(
|
||||
gtsam.GenericProjectionFactorCal3_S2(data.Z[i][
|
||||
k], data.noiseModels.measurement, ii, jj, data.K))
|
||||
# TODO: initial estimates should not be from ground truth!
|
||||
if not initialEstimates.exists(jj):
|
||||
initialEstimates.insert(jj, truth.points[j])
|
||||
if options.pointPriors: # add point priors
|
||||
newFactors.add(
|
||||
gtsam.PriorFactorPoint3(jj, truth.points[j],
|
||||
data.noiseModels.pointPrior))
|
||||
|
||||
# Add odometry between frames 0 and 1
|
||||
newFactors.add(
|
||||
gtsam.BetweenFactorPose3(
|
||||
symbol(ord('x'), 0),
|
||||
symbol(ord('x'), 1), data.odometry[1], data.noiseModels.odometry))
|
||||
|
||||
# Update ISAM
|
||||
if options.batchInitialization: # Do a full optimize for first two poses
|
||||
batchOptimizer = gtsam.LevenbergMarquardtOptimizer(newFactors,
|
||||
initialEstimates)
|
||||
fullyOptimized = batchOptimizer.optimize()
|
||||
isam.update(newFactors, fullyOptimized)
|
||||
else:
|
||||
isam.update(newFactors, initialEstimates)
|
||||
|
||||
# figure(1)tic
|
||||
# t=toc plot(frame_i,t,'r.') tic
|
||||
result = isam.calculateEstimate()
|
||||
# t=toc plot(frame_i,t,'g.')
|
||||
|
||||
return isam, result, nextPoseIndex
|
||||
|
||||
|
||||
def step(data, isam, result, truth, currPoseIndex):
|
||||
'''
|
||||
Do one step isam update
|
||||
@param[in] data: measurement data (odometry and visual measurements and their noiseModels)
|
||||
@param[in/out] isam: current isam object, will be updated
|
||||
@param[in/out] result: current result object, will be updated
|
||||
@param[in] truth: ground truth data, used to initialize new variables
|
||||
@param[currPoseIndex]: index of the current pose
|
||||
'''
|
||||
# iSAM expects us to give it a new set of factors
|
||||
# along with initial estimates for any new variables introduced.
|
||||
newFactors = gtsam.NonlinearFactorGraph()
|
||||
initialEstimates = gtsam.Values()
|
||||
|
||||
# Add odometry
|
||||
prevPoseIndex = currPoseIndex - 1
|
||||
odometry = data.odometry[prevPoseIndex]
|
||||
newFactors.add(
|
||||
gtsam.BetweenFactorPose3(
|
||||
symbol(ord('x'), prevPoseIndex),
|
||||
symbol(ord('x'), currPoseIndex), odometry,
|
||||
data.noiseModels.odometry))
|
||||
|
||||
# Add visual measurement factors and initializations as necessary
|
||||
for k in range(len(data.Z[currPoseIndex])):
|
||||
zij = data.Z[currPoseIndex][k]
|
||||
j = data.J[currPoseIndex][k]
|
||||
jj = symbol(ord('l'), j)
|
||||
newFactors.add(
|
||||
gtsam.GenericProjectionFactorCal3_S2(
|
||||
zij, data.noiseModels.measurement,
|
||||
symbol(ord('x'), currPoseIndex), jj, data.K))
|
||||
# TODO: initialize with something other than truth
|
||||
if not result.exists(jj) and not initialEstimates.exists(jj):
|
||||
lmInit = truth.points[j]
|
||||
initialEstimates.insert(jj, lmInit)
|
||||
|
||||
# Initial estimates for the new pose.
|
||||
prevPose = result.atPose3(symbol(ord('x'), prevPoseIndex))
|
||||
initialEstimates.insert(
|
||||
symbol(ord('x'), currPoseIndex), prevPose.compose(odometry))
|
||||
|
||||
# Update ISAM
|
||||
# figure(1)tic
|
||||
isam.update(newFactors, initialEstimates)
|
||||
# t=toc plot(frame_i,t,'r.') tic
|
||||
newResult = isam.calculateEstimate()
|
||||
# t=toc plot(frame_i,t,'g.')
|
||||
|
||||
return isam, newResult
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
include(GtsamCythonWrap)
|
||||
|
||||
# Copy eigency's sources to the build folder
|
||||
# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
|
||||
# and eigency's cython pxd headers can be found when cythonizing gtsam
|
||||
file(COPY "." DESTINATION ".")
|
||||
set(OUTPUT_DIR "${PROJECT_BINARY_DIR}/cython/gtsam_eigency")
|
||||
set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
|
||||
|
||||
# This is to make the build/cython/gtsam_eigency folder a python package
|
||||
configure_file(__init__.py.in ${PROJECT_BINARY_DIR}/cython/gtsam_eigency/__init__.py)
|
||||
|
||||
# include eigency headers
|
||||
include_directories(${EIGENCY_INCLUDE_DIR})
|
||||
|
||||
# Cythonize and build eigency
|
||||
message(STATUS "Cythonize and build eigency")
|
||||
# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
|
||||
# a part of the gtsam_eigency package and generate the function call import_gtsam_igency__conversions()
|
||||
# in conversions_api.h correctly!!!
|
||||
cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
|
||||
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
||||
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
|
||||
${OUTPUT_DIR} "${EIGENCY_INCLUDE_DIR}" "" "" "")
|
||||
|
||||
# Include Eigen headers:
|
||||
target_include_directories(cythonize_eigency_conversions PUBLIC
|
||||
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
|
||||
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
|
||||
)
|
||||
target_include_directories(cythonize_eigency_core PUBLIC
|
||||
$<BUILD_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_BUILD}>
|
||||
$<INSTALL_INTERFACE:${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}>
|
||||
)
|
||||
|
||||
add_dependencies(cythonize_eigency_core cythonize_eigency_conversions)
|
||||
add_custom_target(cythonize_eigency)
|
||||
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
|
||||
|
||||
# install
|
||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}
|
||||
PATTERN "CMakeLists.txt" EXCLUDE
|
||||
PATTERN "__init__.py.in" EXCLUDE)
|
||||
install(TARGETS cythonize_eigency_core cythonize_eigency_conversions
|
||||
DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency")
|
||||
install(FILES ${OUTPUT_DIR}/conversions_api.h DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
|
||||
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
|
||||
install(FILES ${OUTPUT_DIR}/__init__.py DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
Copyright (c) 2016 Wouter Boomsma
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
import os
|
||||
import numpy as np
|
||||
|
||||
__eigen_dir__ = "${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}"
|
||||
|
||||
def get_includes(include_eigen=True):
|
||||
root = os.path.dirname(__file__)
|
||||
parent = os.path.join(root, "..")
|
||||
path = [root, parent, np.get_include()]
|
||||
if include_eigen:
|
||||
path.append(os.path.join(root, __eigen_dir__))
|
||||
return path
|
||||
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
cimport numpy as np
|
||||
|
||||
cdef api np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
|
||||
|
||||
|
|
@ -0,0 +1,327 @@
|
|||
cimport cython
|
||||
import numpy as np
|
||||
from numpy.lib.stride_tricks import as_strided
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef double[:,:] mem_view = <double[:rows,:cols]>data
|
||||
dtype = 'double'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef double[::1,:] mem_view = <double[:rows:1,:cols]>data
|
||||
dtype = 'double'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef double[:,:] mem_view = <double[:rows,:cols]>data
|
||||
dtype = 'double'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef double[::1,:] mem_view = <double[:rows:1,:cols]>data
|
||||
dtype = 'double'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef float[:,:] mem_view = <float[:rows,:cols]>data
|
||||
dtype = 'float'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef float[::1,:] mem_view = <float[:rows:1,:cols]>data
|
||||
dtype = 'float'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef float[:,:] mem_view = <float[:rows,:cols]>data
|
||||
dtype = 'float'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef float[::1,:] mem_view = <float[:rows:1,:cols]>data
|
||||
dtype = 'float'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef long[:,:] mem_view = <long[:rows,:cols]>data
|
||||
dtype = 'int_'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef long[::1,:] mem_view = <long[:rows:1,:cols]>data
|
||||
dtype = 'int_'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef long[:,:] mem_view = <long[:rows,:cols]>data
|
||||
dtype = 'int_'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef long[::1,:] mem_view = <long[:rows:1,:cols]>data
|
||||
dtype = 'int_'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned long[:,:] mem_view = <unsigned long[:rows,:cols]>data
|
||||
dtype = 'uint'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned long[::1,:] mem_view = <unsigned long[:rows:1,:cols]>data
|
||||
dtype = 'uint'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned long[:,:] mem_view = <unsigned long[:rows,:cols]>data
|
||||
dtype = 'uint'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned long[::1,:] mem_view = <unsigned long[:rows:1,:cols]>data
|
||||
dtype = 'uint'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef int[:,:] mem_view = <int[:rows,:cols]>data
|
||||
dtype = 'int'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef int[::1,:] mem_view = <int[:rows:1,:cols]>data
|
||||
dtype = 'int'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef int[:,:] mem_view = <int[:rows,:cols]>data
|
||||
dtype = 'int'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef int[::1,:] mem_view = <int[:rows:1,:cols]>data
|
||||
dtype = 'int'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned int[:,:] mem_view = <unsigned int[:rows,:cols]>data
|
||||
dtype = 'uint'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned int[::1,:] mem_view = <unsigned int[:rows:1,:cols]>data
|
||||
dtype = 'uint'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned int[:,:] mem_view = <unsigned int[:rows,:cols]>data
|
||||
dtype = 'uint'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned int[::1,:] mem_view = <unsigned int[:rows:1,:cols]>data
|
||||
dtype = 'uint'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef short[:,:] mem_view = <short[:rows,:cols]>data
|
||||
dtype = 'short'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef short[::1,:] mem_view = <short[:rows:1,:cols]>data
|
||||
dtype = 'short'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef short[:,:] mem_view = <short[:rows,:cols]>data
|
||||
dtype = 'short'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef short[::1,:] mem_view = <short[:rows:1,:cols]>data
|
||||
dtype = 'short'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned short[:,:] mem_view = <unsigned short[:rows,:cols]>data
|
||||
dtype = 'ushort'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned short[::1,:] mem_view = <unsigned short[:rows:1,:cols]>data
|
||||
dtype = 'ushort'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned short[:,:] mem_view = <unsigned short[:rows,:cols]>data
|
||||
dtype = 'ushort'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned short[::1,:] mem_view = <unsigned short[:rows:1,:cols]>data
|
||||
dtype = 'ushort'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef signed char[:,:] mem_view = <signed char[:rows,:cols]>data
|
||||
dtype = 'int8'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef signed char[::1,:] mem_view = <signed char[:rows:1,:cols]>data
|
||||
dtype = 'int8'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef signed char[:,:] mem_view = <signed char[:rows,:cols]>data
|
||||
dtype = 'int8'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef signed char[::1,:] mem_view = <signed char[:rows:1,:cols]>data
|
||||
dtype = 'int8'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned char[:,:] mem_view = <unsigned char[:rows,:cols]>data
|
||||
dtype = 'uint8'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned char[::1,:] mem_view = <unsigned char[:rows:1,:cols]>data
|
||||
dtype = 'uint8'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned char[:,:] mem_view = <unsigned char[:rows,:cols]>data
|
||||
dtype = 'uint8'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef unsigned char[::1,:] mem_view = <unsigned char[:rows:1,:cols]>data
|
||||
dtype = 'uint8'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef np.complex128_t[:,:] mem_view = <np.complex128_t[:rows,:cols]>data
|
||||
dtype = 'complex128'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef np.complex128_t[::1,:] mem_view = <np.complex128_t[:rows:1,:cols]>data
|
||||
dtype = 'complex128'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef np.complex128_t[:,:] mem_view = <np.complex128_t[:rows,:cols]>data
|
||||
dtype = 'complex128'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef np.complex128_t[::1,:] mem_view = <np.complex128_t[:rows:1,:cols]>data
|
||||
dtype = 'complex128'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef np.complex64_t[:,:] mem_view = <np.complex64_t[:rows,:cols]>data
|
||||
dtype = 'complex64'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef np.complex64_t[::1,:] mem_view = <np.complex64_t[:rows:1,:cols]>data
|
||||
dtype = 'complex64'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
|
||||
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef np.complex64_t[:,:] mem_view = <np.complex64_t[:rows,:cols]>data
|
||||
dtype = 'complex64'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
@cython.boundscheck(False)
|
||||
cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
|
||||
cdef np.complex64_t[::1,:] mem_view = <np.complex64_t[:rows:1,:cols]>data
|
||||
dtype = 'complex64'
|
||||
cdef int itemsize = np.dtype(dtype).itemsize
|
||||
return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
|
||||
|
||||
|
|
@ -0,0 +1,917 @@
|
|||
cimport cython
|
||||
cimport numpy as np
|
||||
|
||||
ctypedef signed char schar;
|
||||
ctypedef unsigned char uchar;
|
||||
|
||||
ctypedef fused dtype:
|
||||
uchar
|
||||
schar
|
||||
short
|
||||
int
|
||||
long
|
||||
float
|
||||
double
|
||||
|
||||
ctypedef fused DenseType:
|
||||
Matrix
|
||||
Array
|
||||
|
||||
ctypedef fused Rows:
|
||||
_1
|
||||
_2
|
||||
_3
|
||||
_4
|
||||
_5
|
||||
_6
|
||||
_7
|
||||
_8
|
||||
_9
|
||||
_10
|
||||
_11
|
||||
_12
|
||||
_13
|
||||
_14
|
||||
_15
|
||||
_16
|
||||
_17
|
||||
_18
|
||||
_19
|
||||
_20
|
||||
_21
|
||||
_22
|
||||
_23
|
||||
_24
|
||||
_25
|
||||
_26
|
||||
_27
|
||||
_28
|
||||
_29
|
||||
_30
|
||||
_31
|
||||
_32
|
||||
Dynamic
|
||||
|
||||
ctypedef Rows Cols
|
||||
ctypedef Rows StrideOuter
|
||||
ctypedef Rows StrideInner
|
||||
|
||||
ctypedef fused DenseTypeShort:
|
||||
Vector1i
|
||||
Vector2i
|
||||
Vector3i
|
||||
Vector4i
|
||||
VectorXi
|
||||
RowVector1i
|
||||
RowVector2i
|
||||
RowVector3i
|
||||
RowVector4i
|
||||
RowVectorXi
|
||||
Matrix1i
|
||||
Matrix2i
|
||||
Matrix3i
|
||||
Matrix4i
|
||||
MatrixXi
|
||||
Vector1f
|
||||
Vector2f
|
||||
Vector3f
|
||||
Vector4f
|
||||
VectorXf
|
||||
RowVector1f
|
||||
RowVector2f
|
||||
RowVector3f
|
||||
RowVector4f
|
||||
RowVectorXf
|
||||
Matrix1f
|
||||
Matrix2f
|
||||
Matrix3f
|
||||
Matrix4f
|
||||
MatrixXf
|
||||
Vector1d
|
||||
Vector2d
|
||||
Vector3d
|
||||
Vector4d
|
||||
VectorXd
|
||||
RowVector1d
|
||||
RowVector2d
|
||||
RowVector3d
|
||||
RowVector4d
|
||||
RowVectorXd
|
||||
Matrix1d
|
||||
Matrix2d
|
||||
Matrix3d
|
||||
Matrix4d
|
||||
MatrixXd
|
||||
Vector1cf
|
||||
Vector2cf
|
||||
Vector3cf
|
||||
Vector4cf
|
||||
VectorXcf
|
||||
RowVector1cf
|
||||
RowVector2cf
|
||||
RowVector3cf
|
||||
RowVector4cf
|
||||
RowVectorXcf
|
||||
Matrix1cf
|
||||
Matrix2cf
|
||||
Matrix3cf
|
||||
Matrix4cf
|
||||
MatrixXcf
|
||||
Vector1cd
|
||||
Vector2cd
|
||||
Vector3cd
|
||||
Vector4cd
|
||||
VectorXcd
|
||||
RowVector1cd
|
||||
RowVector2cd
|
||||
RowVector3cd
|
||||
RowVector4cd
|
||||
RowVectorXcd
|
||||
Matrix1cd
|
||||
Matrix2cd
|
||||
Matrix3cd
|
||||
Matrix4cd
|
||||
MatrixXcd
|
||||
Array22i
|
||||
Array23i
|
||||
Array24i
|
||||
Array2Xi
|
||||
Array32i
|
||||
Array33i
|
||||
Array34i
|
||||
Array3Xi
|
||||
Array42i
|
||||
Array43i
|
||||
Array44i
|
||||
Array4Xi
|
||||
ArrayX2i
|
||||
ArrayX3i
|
||||
ArrayX4i
|
||||
ArrayXXi
|
||||
Array2i
|
||||
Array3i
|
||||
Array4i
|
||||
ArrayXi
|
||||
Array22f
|
||||
Array23f
|
||||
Array24f
|
||||
Array2Xf
|
||||
Array32f
|
||||
Array33f
|
||||
Array34f
|
||||
Array3Xf
|
||||
Array42f
|
||||
Array43f
|
||||
Array44f
|
||||
Array4Xf
|
||||
ArrayX2f
|
||||
ArrayX3f
|
||||
ArrayX4f
|
||||
ArrayXXf
|
||||
Array2f
|
||||
Array3f
|
||||
Array4f
|
||||
ArrayXf
|
||||
Array22d
|
||||
Array23d
|
||||
Array24d
|
||||
Array2Xd
|
||||
Array32d
|
||||
Array33d
|
||||
Array34d
|
||||
Array3Xd
|
||||
Array42d
|
||||
Array43d
|
||||
Array44d
|
||||
Array4Xd
|
||||
ArrayX2d
|
||||
ArrayX3d
|
||||
ArrayX4d
|
||||
ArrayXXd
|
||||
Array2d
|
||||
Array3d
|
||||
Array4d
|
||||
ArrayXd
|
||||
Array22cf
|
||||
Array23cf
|
||||
Array24cf
|
||||
Array2Xcf
|
||||
Array32cf
|
||||
Array33cf
|
||||
Array34cf
|
||||
Array3Xcf
|
||||
Array42cf
|
||||
Array43cf
|
||||
Array44cf
|
||||
Array4Xcf
|
||||
ArrayX2cf
|
||||
ArrayX3cf
|
||||
ArrayX4cf
|
||||
ArrayXXcf
|
||||
Array2cf
|
||||
Array3cf
|
||||
Array4cf
|
||||
ArrayXcf
|
||||
Array22cd
|
||||
Array23cd
|
||||
Array24cd
|
||||
Array2Xcd
|
||||
Array32cd
|
||||
Array33cd
|
||||
Array34cd
|
||||
Array3Xcd
|
||||
Array42cd
|
||||
Array43cd
|
||||
Array44cd
|
||||
Array4Xcd
|
||||
ArrayX2cd
|
||||
ArrayX3cd
|
||||
ArrayX4cd
|
||||
ArrayXXcd
|
||||
Array2cd
|
||||
Array3cd
|
||||
Array4cd
|
||||
ArrayXcd
|
||||
|
||||
ctypedef fused StorageOrder:
|
||||
RowMajor
|
||||
ColMajor
|
||||
|
||||
ctypedef fused MapOptions:
|
||||
Aligned
|
||||
Unaligned
|
||||
|
||||
cdef extern from "eigency_cpp.h" namespace "eigency":
|
||||
|
||||
cdef cppclass _1 "1":
|
||||
pass
|
||||
|
||||
cdef cppclass _2 "2":
|
||||
pass
|
||||
|
||||
cdef cppclass _3 "3":
|
||||
pass
|
||||
|
||||
cdef cppclass _4 "4":
|
||||
pass
|
||||
|
||||
cdef cppclass _5 "5":
|
||||
pass
|
||||
|
||||
cdef cppclass _6 "6":
|
||||
pass
|
||||
|
||||
cdef cppclass _7 "7":
|
||||
pass
|
||||
|
||||
cdef cppclass _8 "8":
|
||||
pass
|
||||
|
||||
cdef cppclass _9 "9":
|
||||
pass
|
||||
|
||||
cdef cppclass _10 "10":
|
||||
pass
|
||||
|
||||
cdef cppclass _11 "11":
|
||||
pass
|
||||
|
||||
cdef cppclass _12 "12":
|
||||
pass
|
||||
|
||||
cdef cppclass _13 "13":
|
||||
pass
|
||||
|
||||
cdef cppclass _14 "14":
|
||||
pass
|
||||
|
||||
cdef cppclass _15 "15":
|
||||
pass
|
||||
|
||||
cdef cppclass _16 "16":
|
||||
pass
|
||||
|
||||
cdef cppclass _17 "17":
|
||||
pass
|
||||
|
||||
cdef cppclass _18 "18":
|
||||
pass
|
||||
|
||||
cdef cppclass _19 "19":
|
||||
pass
|
||||
|
||||
cdef cppclass _20 "20":
|
||||
pass
|
||||
|
||||
cdef cppclass _21 "21":
|
||||
pass
|
||||
|
||||
cdef cppclass _22 "22":
|
||||
pass
|
||||
|
||||
cdef cppclass _23 "23":
|
||||
pass
|
||||
|
||||
cdef cppclass _24 "24":
|
||||
pass
|
||||
|
||||
cdef cppclass _25 "25":
|
||||
pass
|
||||
|
||||
cdef cppclass _26 "26":
|
||||
pass
|
||||
|
||||
cdef cppclass _27 "27":
|
||||
pass
|
||||
|
||||
cdef cppclass _28 "28":
|
||||
pass
|
||||
|
||||
cdef cppclass _29 "29":
|
||||
pass
|
||||
|
||||
cdef cppclass _30 "30":
|
||||
pass
|
||||
|
||||
cdef cppclass _31 "31":
|
||||
pass
|
||||
|
||||
cdef cppclass _32 "32":
|
||||
pass
|
||||
|
||||
cdef cppclass PlainObjectBase:
|
||||
pass
|
||||
|
||||
cdef cppclass Map[DenseTypeShort](PlainObjectBase):
|
||||
Map() except +
|
||||
Map(np.ndarray array) except +
|
||||
|
||||
cdef cppclass FlattenedMap[DenseType, dtype, Rows, Cols]:
|
||||
FlattenedMap() except +
|
||||
FlattenedMap(np.ndarray array) except +
|
||||
|
||||
cdef cppclass FlattenedMapWithOrder "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder]:
|
||||
FlattenedMapWithOrder() except +
|
||||
FlattenedMapWithOrder(np.ndarray array) except +
|
||||
|
||||
cdef cppclass FlattenedMapWithStride "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder, MapOptions, StrideOuter, StrideInner]:
|
||||
FlattenedMapWithStride() except +
|
||||
FlattenedMapWithStride(np.ndarray array) except +
|
||||
|
||||
cdef np.ndarray ndarray_view(PlainObjectBase &)
|
||||
cdef np.ndarray ndarray_copy(PlainObjectBase &)
|
||||
cdef np.ndarray ndarray(PlainObjectBase &)
|
||||
|
||||
|
||||
cdef extern from "eigency_cpp.h" namespace "Eigen":
|
||||
|
||||
cdef cppclass Dynamic:
|
||||
pass
|
||||
|
||||
cdef cppclass RowMajor:
|
||||
pass
|
||||
|
||||
cdef cppclass ColMajor:
|
||||
pass
|
||||
|
||||
cdef cppclass Aligned:
|
||||
pass
|
||||
|
||||
cdef cppclass Unaligned:
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass VectorXd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector1i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector2i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector3i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector4i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass VectorXi(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector1i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector2i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector3i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector4i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVectorXi(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix1i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix2i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix3i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix4i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass MatrixXi(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector1f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector2f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector3f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector4f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass VectorXf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector1f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector2f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector3f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector4f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVectorXf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix1f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix2f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix3f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix4f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass MatrixXf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector1d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector2d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector3d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector4d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass VectorXd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector1d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector2d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector3d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector4d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVectorXd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix1d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix2d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix3d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix4d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass MatrixXd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector1cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector2cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector3cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector4cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass VectorXcf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector1cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector2cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector3cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector4cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVectorXcf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix1cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix2cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix3cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix4cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass MatrixXcf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector1cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector2cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector3cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Vector4cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass VectorXcd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector1cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector2cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector3cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVector4cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass RowVectorXcd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix1cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix2cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix3cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Matrix4cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass MatrixXcd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array22i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array23i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array24i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2Xi(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array32i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array33i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array34i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3Xi(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array42i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array43i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array44i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4Xi(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX2i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX3i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX4i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXXi(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4i(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXi(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array22f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array23f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array24f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2Xf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array32f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array33f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array34f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3Xf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array42f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array43f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array44f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4Xf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX2f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX3f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX4f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXXf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4f(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array22d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array23d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array24d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2Xd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array32d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array33d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array34d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3Xd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array42d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array43d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array44d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4Xd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX2d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX3d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX4d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXXd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4d(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array22cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array23cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array24cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2Xcf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array32cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array33cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array34cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3Xcf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array42cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array43cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array44cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4Xcf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX2cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX3cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX4cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXXcf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4cf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXcf(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array22cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array23cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array24cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2Xcd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array32cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array33cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array34cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3Xcd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array42cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array43cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array44cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4Xcd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX2cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX3cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayX4cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXXcd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array2cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array3cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass Array4cd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
cdef cppclass ArrayXcd(PlainObjectBase):
|
||||
pass
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,504 @@
|
|||
#include <Eigen/Dense>
|
||||
|
||||
#include <iostream>
|
||||
#include <stdexcept>
|
||||
#include <complex>
|
||||
|
||||
typedef ::std::complex< double > __pyx_t_double_complex;
|
||||
typedef ::std::complex< float > __pyx_t_float_complex;
|
||||
|
||||
#include "conversions_api.h"
|
||||
|
||||
#ifndef EIGENCY_CPP
|
||||
#define EIGENCY_CPP
|
||||
|
||||
namespace eigency {
|
||||
|
||||
template<typename Scalar>
|
||||
inline PyArrayObject *_ndarray_view(Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0);
|
||||
template<typename Scalar>
|
||||
inline PyArrayObject *_ndarray_copy(const Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0);
|
||||
|
||||
// Strides:
|
||||
// Eigen and numpy differ in their way of dealing with strides. Eigen has the concept of outer and
|
||||
// inner strides, which are dependent on whether the array/matrix is row-major of column-major:
|
||||
// Inner stride: denotes the offset between succeeding elements in each row (row-major) or column (column-major).
|
||||
// Outer stride: denotes the offset between succeeding rows (row-major) or succeeding columns (column-major).
|
||||
// In contrast, numpy's stride is simply a measure of how fast each dimension should be incremented.
|
||||
// Consequently, a switch in numpy storage order from row-major to column-major involves a switch
|
||||
// in strides, while it does not affect the stride in Eigen.
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<double>(double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major) {
|
||||
// Eigen row-major mode: row_stride=outer_stride, and col_stride=inner_stride
|
||||
// If no stride is given, the row_stride is set to the number of columns.
|
||||
return ndarray_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
} else {
|
||||
// Eigen column-major mode: row_stride=outer_stride, and col_stride=inner_stride
|
||||
// If no stride is given, the cow_stride is set to the number of rows.
|
||||
return ndarray_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<double>(const double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<float>(float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<float>(const float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<long>(long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<long>(const long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<unsigned long>(unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<unsigned long>(const unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<int>(int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<int>(const int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<unsigned int>(unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<unsigned int>(const unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<short>(short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<short>(const short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<unsigned short>(unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<unsigned short>(const unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<signed char>(signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<signed char>(const signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<unsigned char>(unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<unsigned char>(const unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<std::complex<double> >(std::complex<double> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<std::complex<double> >(const std::complex<double> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_view<std::complex<float> >(std::complex<float> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
template<>
|
||||
inline PyArrayObject *_ndarray_copy<std::complex<float> >(const std::complex<float> *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
|
||||
if (is_row_major)
|
||||
return ndarray_copy_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
|
||||
else
|
||||
return ndarray_copy_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
|
||||
}
|
||||
|
||||
|
||||
template <typename Derived>
|
||||
inline PyArrayObject *ndarray(Eigen::PlainObjectBase<Derived> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor);
|
||||
}
|
||||
// If C++11 is available, check if m is an r-value reference, in
|
||||
// which case a copy should always be made
|
||||
#if __cplusplus >= 201103L
|
||||
template <typename Derived>
|
||||
inline PyArrayObject *ndarray(Eigen::PlainObjectBase<Derived> &&m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor);
|
||||
}
|
||||
#endif
|
||||
template <typename Derived>
|
||||
inline PyArrayObject *ndarray(const Eigen::PlainObjectBase<Derived> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor);
|
||||
}
|
||||
template <typename Derived>
|
||||
inline PyArrayObject *ndarray_view(Eigen::PlainObjectBase<Derived> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor);
|
||||
}
|
||||
template <typename Derived>
|
||||
inline PyArrayObject *ndarray_view(const Eigen::PlainObjectBase<Derived> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_view(const_cast<typename Derived::Scalar*>(m.data()), m.rows(), m.cols(), m.IsRowMajor);
|
||||
}
|
||||
template <typename Derived>
|
||||
inline PyArrayObject *ndarray_copy(const Eigen::PlainObjectBase<Derived> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor);
|
||||
}
|
||||
|
||||
template <typename Derived, int MapOptions, typename Stride>
|
||||
inline PyArrayObject *ndarray(Eigen::Map<Derived, MapOptions, Stride> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
|
||||
}
|
||||
template <typename Derived, int MapOptions, typename Stride>
|
||||
inline PyArrayObject *ndarray(const Eigen::Map<Derived, MapOptions, Stride> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
// Since this is a map, we assume that ownership is correctly taken care
|
||||
// of, and we avoid taking a copy
|
||||
return _ndarray_view(const_cast<typename Derived::Scalar*>(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
|
||||
}
|
||||
template <typename Derived, int MapOptions, typename Stride>
|
||||
inline PyArrayObject *ndarray_view(Eigen::Map<Derived, MapOptions, Stride> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
|
||||
}
|
||||
template <typename Derived, int MapOptions, typename Stride>
|
||||
inline PyArrayObject *ndarray_view(const Eigen::Map<Derived, MapOptions, Stride> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_view(const_cast<typename Derived::Scalar*>(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
|
||||
}
|
||||
template <typename Derived, int MapOptions, typename Stride>
|
||||
inline PyArrayObject *ndarray_copy(const Eigen::Map<Derived, MapOptions, Stride> &m) {
|
||||
import_gtsam_eigency__conversions();
|
||||
return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
|
||||
}
|
||||
|
||||
|
||||
template <typename MatrixType,
|
||||
int _MapOptions = Eigen::Unaligned,
|
||||
typename _StrideType=Eigen::Stride<0,0> >
|
||||
class MapBase: public Eigen::Map<MatrixType, _MapOptions, _StrideType> {
|
||||
public:
|
||||
typedef Eigen::Map<MatrixType, _MapOptions, _StrideType> Base;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
|
||||
MapBase(Scalar* data,
|
||||
long rows,
|
||||
long cols,
|
||||
_StrideType stride=_StrideType())
|
||||
: Base(data,
|
||||
// If both dimensions are dynamic or dimensions match, accept dimensions as they are
|
||||
((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) ||
|
||||
(Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols))
|
||||
? rows
|
||||
// otherwise, test if swapping them makes them fit
|
||||
: ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows)
|
||||
? cols
|
||||
: rows),
|
||||
((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) ||
|
||||
(Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols))
|
||||
? cols
|
||||
: ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows)
|
||||
? rows
|
||||
: cols),
|
||||
stride
|
||||
) {}
|
||||
|
||||
MapBase &operator=(const MatrixType &other) {
|
||||
Base::operator=(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
virtual ~MapBase() { }
|
||||
};
|
||||
|
||||
|
||||
template <template<class,int,int,int,int,int> class EigencyDenseBase,
|
||||
typename Scalar,
|
||||
int _Rows, int _Cols,
|
||||
int _Options = Eigen::AutoAlign |
|
||||
#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
|
||||
// workaround a bug in at least gcc 3.4.6
|
||||
// the innermost ?: ternary operator is misparsed. We write it slightly
|
||||
// differently and this makes gcc 3.4.6 happy, but it's ugly.
|
||||
// The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
|
||||
// (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
|
||||
( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
|
||||
// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19
|
||||
#if EIGEN_VERSION_AT_LEAST(3,2,90)
|
||||
: !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
|
||||
#else
|
||||
: !(_Cols==1 && _Rows!=1) ? Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
|
||||
#endif
|
||||
: ColMajor ),
|
||||
#else
|
||||
( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
|
||||
: (_Cols==1 && _Rows!=1) ? Eigen::ColMajor
|
||||
// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19
|
||||
#if EIGEN_VERSION_AT_LEAST(3,2,90)
|
||||
: EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
|
||||
#else
|
||||
: Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
|
||||
#endif
|
||||
#endif
|
||||
int _MapOptions = Eigen::Unaligned,
|
||||
int _StrideOuter=0, int _StrideInner=0,
|
||||
int _MaxRows = _Rows,
|
||||
int _MaxCols = _Cols>
|
||||
class FlattenedMap: public MapBase<EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > {
|
||||
public:
|
||||
typedef MapBase<EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > Base;
|
||||
|
||||
FlattenedMap()
|
||||
: Base(NULL, 0, 0),
|
||||
object_(NULL) {}
|
||||
|
||||
FlattenedMap(Scalar *data, long rows, long cols, long outer_stride=0, long inner_stride=0)
|
||||
: Base(data, rows, cols,
|
||||
Eigen::Stride<_StrideOuter, _StrideInner>(outer_stride, inner_stride)),
|
||||
object_(NULL) {
|
||||
}
|
||||
|
||||
FlattenedMap(PyArrayObject *object)
|
||||
: Base((Scalar *)((PyArrayObject*)object)->data,
|
||||
// : Base(_from_numpy<Scalar>((PyArrayObject*)object),
|
||||
(((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1,
|
||||
(((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0],
|
||||
Eigen::Stride<_StrideOuter, _StrideInner>(_StrideOuter != Eigen::Dynamic ? _StrideOuter : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1,
|
||||
_StrideInner != Eigen::Dynamic ? _StrideInner : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0])),
|
||||
object_(object) {
|
||||
|
||||
if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object))
|
||||
throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map.");
|
||||
|
||||
Py_XINCREF(object_);
|
||||
}
|
||||
FlattenedMap &operator=(const FlattenedMap &other) {
|
||||
if (other.object_) {
|
||||
new (this) FlattenedMap(other.object_);
|
||||
} else {
|
||||
// Replace the memory that we point to (not a memory allocation)
|
||||
new (this) FlattenedMap(const_cast<Scalar*>(other.data()),
|
||||
other.rows(),
|
||||
other.cols(),
|
||||
other.outerStride(),
|
||||
other.innerStride());
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
operator Base() const {
|
||||
return static_cast<Base>(*this);
|
||||
}
|
||||
|
||||
operator Base&() const {
|
||||
return static_cast<Base&>(*this);
|
||||
}
|
||||
|
||||
operator EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>() const {
|
||||
return EigencyDenseBase<Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>(static_cast<Base>(*this));
|
||||
}
|
||||
|
||||
virtual ~FlattenedMap() {
|
||||
Py_XDECREF(object_);
|
||||
}
|
||||
|
||||
private:
|
||||
PyArrayObject * const object_;
|
||||
};
|
||||
|
||||
|
||||
template <typename MatrixType>
|
||||
class Map: public MapBase<MatrixType> {
|
||||
public:
|
||||
typedef MapBase<MatrixType> Base;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
|
||||
enum {
|
||||
RowsAtCompileTime = Base::Base::RowsAtCompileTime,
|
||||
ColsAtCompileTime = Base::Base::ColsAtCompileTime
|
||||
};
|
||||
|
||||
Map()
|
||||
: Base(NULL,
|
||||
(RowsAtCompileTime == Eigen::Dynamic) ? 0 : RowsAtCompileTime,
|
||||
(ColsAtCompileTime == Eigen::Dynamic) ? 0 : ColsAtCompileTime),
|
||||
object_(NULL) {
|
||||
}
|
||||
|
||||
Map(Scalar *data, long rows, long cols)
|
||||
: Base(data, rows, cols),
|
||||
object_(NULL) {}
|
||||
|
||||
Map(PyArrayObject *object)
|
||||
: Base((PyObject*)object == Py_None? NULL: (Scalar *)object->data,
|
||||
// ROW: If array is in row-major order, transpose (see README)
|
||||
(PyObject*)object == Py_None? 0 :
|
||||
(!PyArray_IS_F_CONTIGUOUS(object)
|
||||
? ((object->nd == 1)
|
||||
? 1 // ROW: If 1D row-major numpy array, set to 1 (row vector)
|
||||
: object->dimensions[1])
|
||||
: object->dimensions[0]),
|
||||
// COLUMN: If array is in row-major order: transpose (see README)
|
||||
(PyObject*)object == Py_None? 0 :
|
||||
(!PyArray_IS_F_CONTIGUOUS(object)
|
||||
? object->dimensions[0]
|
||||
: ((object->nd == 1)
|
||||
? 1 // COLUMN: If 1D col-major numpy array, set to length (column vector)
|
||||
: object->dimensions[1]))),
|
||||
object_(object) {
|
||||
|
||||
if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object))
|
||||
throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map.");
|
||||
Py_XINCREF(object_);
|
||||
}
|
||||
|
||||
Map &operator=(const Map &other) {
|
||||
if (other.object_) {
|
||||
new (this) Map(other.object_);
|
||||
} else {
|
||||
// Replace the memory that we point to (not a memory allocation)
|
||||
new (this) Map(const_cast<Scalar*>(other.data()),
|
||||
other.rows(),
|
||||
other.cols());
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
Map &operator=(const MatrixType &other) {
|
||||
MapBase<MatrixType>::operator=(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
operator Base() const {
|
||||
return static_cast<Base>(*this);
|
||||
}
|
||||
|
||||
operator Base&() const {
|
||||
return static_cast<Base&>(*this);
|
||||
}
|
||||
|
||||
operator MatrixType() const {
|
||||
return MatrixType(static_cast<Base>(*this));
|
||||
}
|
||||
|
||||
virtual ~Map() {
|
||||
Py_XDECREF(object_);
|
||||
}
|
||||
|
||||
private:
|
||||
PyArrayObject * const object_;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1 @@
|
|||
from .gtsam_unstable import *
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Demonstration of the fixed-lag smoothers using a planar robot example
|
||||
and multiple odometry-like sensors
|
||||
Author: Frank Dellaert (C++), Jeremy Aguilon (Python)
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam_unstable
|
||||
|
||||
|
||||
def _timestamp_key_value(key, value):
|
||||
"""
|
||||
|
||||
"""
|
||||
return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue(
|
||||
key, value
|
||||
)
|
||||
|
||||
|
||||
def BatchFixedLagSmootherExample():
|
||||
"""
|
||||
Runs a batch fixed smoother on an agent with two odometry
|
||||
sensors that is simply moving to the
|
||||
"""
|
||||
|
||||
# Define a batch fixed lag smoother, which uses
|
||||
# Levenberg-Marquardt to perform the nonlinear optimization
|
||||
lag = 2.0
|
||||
smoother_batch = gtsam_unstable.BatchFixedLagSmoother(lag)
|
||||
|
||||
# Create containers to store the factors and linearization points
|
||||
# that will be sent to the smoothers
|
||||
new_factors = gtsam.NonlinearFactorGraph()
|
||||
new_values = gtsam.Values()
|
||||
new_timestamps = gtsam_unstable.FixedLagSmootherKeyTimestampMap()
|
||||
|
||||
# Create a prior on the first pose, placing it at the origin
|
||||
prior_mean = gtsam.Pose2(0, 0, 0)
|
||||
prior_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
X1 = 0
|
||||
new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise))
|
||||
new_values.insert(X1, prior_mean)
|
||||
new_timestamps.insert(_timestamp_key_value(X1, 0.0))
|
||||
|
||||
delta_time = 0.25
|
||||
time = 0.25
|
||||
|
||||
while time <= 3.0:
|
||||
previous_key = 1000 * (time - delta_time)
|
||||
current_key = 1000 * time
|
||||
|
||||
# assign current key to the current timestamp
|
||||
new_timestamps.insert(_timestamp_key_value(current_key, time))
|
||||
|
||||
# Add a guess for this pose to the new values
|
||||
# Assume that the robot moves at 2 m/s. Position is time[s] * 2[m/s]
|
||||
current_pose = gtsam.Pose2(time * 2, 0, 0)
|
||||
new_values.insert(current_key, current_pose)
|
||||
|
||||
# Add odometry factors from two different sources with different error
|
||||
# stats
|
||||
odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02)
|
||||
odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.1, 0.1, 0.05]))
|
||||
new_factors.push_back(gtsam.BetweenFactorPose2(
|
||||
previous_key, current_key, odometry_measurement_1, odometry_noise_1
|
||||
))
|
||||
|
||||
odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01)
|
||||
odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.05, 0.05, 0.05]))
|
||||
new_factors.push_back(gtsam.BetweenFactorPose2(
|
||||
previous_key, current_key, odometry_measurement_2, odometry_noise_2
|
||||
))
|
||||
|
||||
# Update the smoothers with the new factors. In this case,
|
||||
# one iteration must pass for Levenberg-Marquardt to accurately
|
||||
# estimate
|
||||
if time >= 0.50:
|
||||
smoother_batch.update(new_factors, new_values, new_timestamps)
|
||||
print("Timestamp = " + str(time) + ", Key = " + str(current_key))
|
||||
print(smoother_batch.calculateEstimatePose2(current_key))
|
||||
|
||||
new_timestamps.clear()
|
||||
new_values.clear()
|
||||
new_factors.resize(0)
|
||||
|
||||
time += delta_time
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
BatchFixedLagSmootherExample()
|
||||
print("Example complete")
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Cal3Unified unit tests.
|
||||
Author: Frank Dellaert & Duy Nguyen Ta (Python)
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam_unstable
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
def _timestamp_key_value(key, value):
|
||||
return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue(
|
||||
key, value
|
||||
)
|
||||
|
||||
|
||||
class TestFixedLagSmootherExample(GtsamTestCase):
|
||||
'''
|
||||
Tests the fixed lag smoother wrapper
|
||||
'''
|
||||
|
||||
def test_FixedLagSmootherExample(self):
|
||||
'''
|
||||
Simple test that checks for equality between C++ example
|
||||
file and the Python implementation. See
|
||||
gtsam_unstable/examples/FixedLagSmootherExample.cpp
|
||||
'''
|
||||
# Define a batch fixed lag smoother, which uses
|
||||
# Levenberg-Marquardt to perform the nonlinear optimization
|
||||
lag = 2.0
|
||||
smoother_batch = gtsam_unstable.BatchFixedLagSmoother(lag)
|
||||
|
||||
# Create containers to store the factors and linearization points
|
||||
# that will be sent to the smoothers
|
||||
new_factors = gtsam.NonlinearFactorGraph()
|
||||
new_values = gtsam.Values()
|
||||
new_timestamps = gtsam_unstable.FixedLagSmootherKeyTimestampMap()
|
||||
|
||||
# Create a prior on the first pose, placing it at the origin
|
||||
prior_mean = gtsam.Pose2(0, 0, 0)
|
||||
prior_noise = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.3, 0.3, 0.1]))
|
||||
X1 = 0
|
||||
new_factors.push_back(
|
||||
gtsam.PriorFactorPose2(
|
||||
X1, prior_mean, prior_noise))
|
||||
new_values.insert(X1, prior_mean)
|
||||
new_timestamps.insert(_timestamp_key_value(X1, 0.0))
|
||||
|
||||
delta_time = 0.25
|
||||
time = 0.25
|
||||
|
||||
i = 0
|
||||
|
||||
ground_truth = [
|
||||
gtsam.Pose2(0.995821, 0.0231012, 0.0300001),
|
||||
gtsam.Pose2(1.49284, 0.0457247, 0.045),
|
||||
gtsam.Pose2(1.98981, 0.0758879, 0.06),
|
||||
gtsam.Pose2(2.48627, 0.113502, 0.075),
|
||||
gtsam.Pose2(2.98211, 0.158558, 0.09),
|
||||
gtsam.Pose2(3.47722, 0.211047, 0.105),
|
||||
gtsam.Pose2(3.97149, 0.270956, 0.12),
|
||||
gtsam.Pose2(4.4648, 0.338272, 0.135),
|
||||
gtsam.Pose2(4.95705, 0.41298, 0.15),
|
||||
gtsam.Pose2(5.44812, 0.495063, 0.165),
|
||||
gtsam.Pose2(5.9379, 0.584503, 0.18),
|
||||
]
|
||||
|
||||
# Iterates from 0.25s to 3.0s, adding 0.25s each loop
|
||||
# In each iteration, the agent moves at a constant speed
|
||||
# and its two odometers measure the change. The smoothed
|
||||
# result is then compared to the ground truth
|
||||
while time <= 3.0:
|
||||
previous_key = 1000 * (time - delta_time)
|
||||
current_key = 1000 * time
|
||||
|
||||
# assign current key to the current timestamp
|
||||
new_timestamps.insert(_timestamp_key_value(current_key, time))
|
||||
|
||||
# Add a guess for this pose to the new values
|
||||
# Assume that the robot moves at 2 m/s. Position is time[s] *
|
||||
# 2[m/s]
|
||||
current_pose = gtsam.Pose2(time * 2, 0, 0)
|
||||
new_values.insert(current_key, current_pose)
|
||||
|
||||
# Add odometry factors from two different sources with different
|
||||
# error stats
|
||||
odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02)
|
||||
odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.1, 0.1, 0.05]))
|
||||
new_factors.push_back(
|
||||
gtsam.BetweenFactorPose2(
|
||||
previous_key,
|
||||
current_key,
|
||||
odometry_measurement_1,
|
||||
odometry_noise_1))
|
||||
|
||||
odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01)
|
||||
odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas(
|
||||
np.array([0.05, 0.05, 0.05]))
|
||||
new_factors.push_back(
|
||||
gtsam.BetweenFactorPose2(
|
||||
previous_key,
|
||||
current_key,
|
||||
odometry_measurement_2,
|
||||
odometry_noise_2))
|
||||
|
||||
# Update the smoothers with the new factors. In this case,
|
||||
# one iteration must pass for Levenberg-Marquardt to accurately
|
||||
# estimate
|
||||
if time >= 0.50:
|
||||
smoother_batch.update(new_factors, new_values, new_timestamps)
|
||||
|
||||
estimate = smoother_batch.calculateEstimatePose2(current_key)
|
||||
self.assertTrue(estimate.equals(ground_truth[i], 1e-4))
|
||||
i += 1
|
||||
|
||||
new_timestamps.clear()
|
||||
new_values.clear()
|
||||
new_factors.resize(0)
|
||||
|
||||
time += delta_time
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
Cython>=0.25.2
|
||||
backports_abc>=0.5
|
||||
numpy>=1.12.0
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
import os
|
||||
import sys
|
||||
try:
|
||||
from setuptools import setup, find_packages
|
||||
except ImportError:
|
||||
from distutils.core import setup, find_packages
|
||||
|
||||
if 'SETUP_PY_NO_CHECK' not in os.environ:
|
||||
script_path = os.path.abspath(os.path.realpath(__file__))
|
||||
install_path = os.path.abspath(os.path.realpath(os.path.join('${GTSAM_CYTHON_INSTALL_PATH}', 'setup.py')))
|
||||
if script_path != install_path:
|
||||
print('setup.py is being run from an unexpected location: "{}"'.format(script_path))
|
||||
print('please run `make install` and run the script from there')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
packages = find_packages()
|
||||
|
||||
setup(
|
||||
name='gtsam',
|
||||
description='Georgia Tech Smoothing And Mapping library',
|
||||
url='https://bitbucket.org/gtborg/gtsam',
|
||||
version='${GTSAM_VERSION_STRING}', # https://www.python.org/dev/peps/pep-0440/
|
||||
license='Simplified BSD license',
|
||||
keywords='slam sam robotics localization mapping optimization',
|
||||
long_description='''${README_CONTENTS}''',
|
||||
# https://pypi.org/pypi?%3Aaction=list_classifiers
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Education',
|
||||
'Intended Audience :: Developers',
|
||||
'Intended Audience :: Science/Research',
|
||||
'Operating System :: MacOS',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Operating System :: POSIX',
|
||||
'License :: OSI Approved :: BSD License',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
|
||||
packages=packages,
|
||||
package_data={package:
|
||||
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.dll')]
|
||||
for package in packages
|
||||
},
|
||||
install_requires=[line.strip() for line in '''
|
||||
${CYTHON_INSTALL_REQUIREMENTS}
|
||||
'''.splitlines() if len(line.strip()) > 0 and not line.strip().startswith('#')]
|
||||
)
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
// add unary measurement factors, like GPS, on all three poses
|
||||
noiseModel::Diagonal::shared_ptr unaryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.1)); // 10cm std on x,y
|
||||
graph.add(boost::make_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise));
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue