Merge branch 'release/4.0.3'

release/4.3a0
Fan Jiang 2021-12-20 16:42:04 -05:00
commit d5f256f3e2
510 changed files with 18246 additions and 6118 deletions

14
.github/workflows/trigger-python.yml vendored Normal file
View File

@ -0,0 +1,14 @@
# This triggers Cython builds on `gtsam-manylinux-build`
name: Trigger Python Builds
on: push
jobs:
triggerCython:
runs-on: ubuntu-latest
steps:
- name: Repository Dispatch
uses: ProfFan/repository-dispatch@master
with:
token: ${{ secrets.PYTHON_CI_REPO_ACCESS_TOKEN }}
repository: borglab/gtsam-manylinux-build
event-type: cython-wrapper
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'

43
.travis.python.sh Normal file
View File

@ -0,0 +1,43 @@
#!/bin/bash
set -x -e
if [ -z ${PYTHON_VERSION+x} ]; then
echo "Please provide the Python version to build against!"
exit 127
fi
PYTHON="python${PYTHON_VERSION}"
if [[ $(uname) == "Darwin" ]]; then
brew install wget
else
# Install a system package required by our library
sudo apt-get install wget libicu-dev python3-pip python3-setuptools
fi
CURRDIR=$(pwd)
sudo $PYTHON -m pip install -r ./cython/requirements.txt
mkdir $CURRDIR/build
cd $CURRDIR/build
cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \
-DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \
-DGTSAM_USE_QUATERNIONS=OFF \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \
-DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF \
-DCMAKE_INSTALL_PREFIX=$CURRDIR/../gtsam_install
make -j$(nproc) install
cd cython
sudo $PYTHON setup.py install
cd $CURRDIR/cython/gtsam/tests
$PYTHON -m unittest discover

View File

@ -1,7 +1,39 @@
#!/bin/bash
# install TBB with _debug.so files
function install_tbb()
{
TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download
TBB_VERSION=4.4.2
TBB_DIR=tbb44_20151115oss
TBB_SAVEPATH="/tmp/tbb.tgz"
if [ "$TRAVIS_OS_NAME" == "linux" ]; then
OS_SHORT="lin"
TBB_LIB_DIR="intel64/gcc4.4"
SUDO="sudo"
elif [ "$TRAVIS_OS_NAME" == "osx" ]; then
OS_SHORT="lin"
TBB_LIB_DIR=""
SUDO=""
fi
wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH
tar -C /tmp -xf $TBB_SAVEPATH
TBBROOT=/tmp/$TBB_DIR
# Copy the needed files to the correct places.
# This works correctly for travis builds, instead of setting path variables.
# This is what Homebrew does to install TBB on Macs
$SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/
$SUDO cp -R $TBBROOT/include/ /usr/local/include/
}
# common tasks before either build or test
function prepare ()
function configure()
{
set -e # Make sure any error makes the script to return an error code
set -x # echo
@ -14,21 +46,27 @@ function prepare ()
rm -fr $BUILD_DIR || true
mkdir $BUILD_DIR && cd $BUILD_DIR
if [ -z "$CMAKE_BUILD_TYPE" ]; then
CMAKE_BUILD_TYPE=Debug
fi
if [ -z "$GTSAM_ALLOW_DEPRECATED_SINCE_V4" ]; then
GTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF
fi
install_tbb
if [ ! -z "$GCC_VERSION" ]; then
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 60 \
--slave /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION
sudo update-alternatives --set gcc /usr/bin/gcc-$GCC_VERSION
export CC=gcc-$GCC_VERSION
export CXX=g++-$GCC_VERSION
fi
# GTSAM_BUILD_WITH_MARCH_NATIVE=OFF: to avoid crashes in builder VMs
cmake $SOURCE_DIR \
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Debug} \
-DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
-DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DCMAKE_VERBOSE_MAKEFILE=OFF
}
# common tasks after either build or test
function finish ()
{
@ -41,17 +79,12 @@ function finish ()
# compile the code with the intent of populating the cache
function build ()
{
prepare
export GTSAM_BUILD_EXAMPLES_ALWAYS=ON
export GTSAM_BUILD_TESTS=OFF
cmake $SOURCE_DIR \
-DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE \
-DGTSAM_BUILD_TESTS=OFF \
-DGTSAM_BUILD_UNSTABLE=$GTSAM_BUILD_UNSTABLE \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=ON \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=$GTSAM_ALLOW_DEPRECATED_SINCE_V4
configure
# Actual build:
VERBOSE=1 make -j2
make -j2
finish
}
@ -59,14 +92,10 @@ function build ()
# run the tests
function test ()
{
prepare
export GTSAM_BUILD_EXAMPLES_ALWAYS=OFF
export GTSAM_BUILD_TESTS=ON
cmake $SOURCE_DIR \
-DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE \
-DGTSAM_BUILD_TESTS=ON \
-DGTSAM_BUILD_UNSTABLE=$GTSAM_BUILD_UNSTABLE \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF
configure
# Actual build:
make -j2 check
@ -79,7 +108,7 @@ case $1 in
-b)
build
;;
-t)
-t)
test
;;
esac

View File

@ -7,17 +7,19 @@ addons:
apt:
sources:
- ubuntu-toolchain-r-test
- sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main'
key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
packages:
- g++-8
- clang-3.8
- build-essential
- pkg-config
- g++-9
- clang-9
- build-essential pkg-config
- cmake
- libpython-dev python-numpy
- python3-dev libpython-dev
- python3-numpy
- libboost-all-dev
# before_install:
# - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update ; fi
# - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi
install:
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install ccache ; fi
@ -27,9 +29,16 @@ install:
stages:
- compile
- test
- special
env:
global:
- MAKEFLAGS="-j2"
- CCACHE_SLOPPINESS=pch_defines,time_macros
# Compile stage without building examples/tests to populate the caches.
jobs:
# -------- STAGE 1: COMPILE -----------
include:
# on Mac, GCC
- stage: compile
@ -68,46 +77,65 @@ jobs:
- stage: compile
os: linux
compiler: clang
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Linux, with deprecated ON to make sure that path still compiles/tests
- stage: special
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V4=ON
script: bash .travis.sh -b
# on Linux, with GTSAM_WITH_TBB on to make sure GTSAM still compiles/tests
- stage: special
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON
script: bash .travis.sh -t
# -------- STAGE 2: TESTS -----------
# on Mac, GCC
- stage: test
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Linux, with deprecated ON to make sure that path still compiles
- stage: compile
script: bash .travis.sh -t
- stage: test
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -t
- stage: test
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
- stage: test
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -t
- stage: test
os: linux
compiler: clang
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V4=ON
script: bash .travis.sh -b
# Matrix configuration:
os:
- osx
- linux
compiler:
- gcc
- clang
env:
global:
- MAKEFLAGS="-j2"
- CCACHE_SLOPPINESS=pch_defines,time_macros
- GTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF
- GTSAM_BUILD_UNSTABLE=ON
matrix:
- CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
- CMAKE_BUILD_TYPE=Release
script:
- bash .travis.sh -t
matrix:
exclude:
# Exclude g++ debug on Linux as it consistently times out
- os: linux
compiler: gcc
env : CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
# Exclude clang on Linux/clang in release until issue #57 is solved
- os: linux
compiler: clang
env : CMAKE_BUILD_TYPE=Release
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
# on Linux, with quaternions ON to make sure that path still compiles/tests
- stage: special
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON
script: bash .travis.sh -t
- stage: special
os: linux
compiler: gcc
env: PYTHON_VERSION=3
script: bash .travis.python.sh
- stage: special
os: osx
compiler: clang
env: PYTHON_VERSION=3
script: bash .travis.python.sh

View File

@ -10,7 +10,7 @@ endif()
# Set the version number for the library
set (GTSAM_VERSION_MAJOR 4)
set (GTSAM_VERSION_MINOR 0)
set (GTSAM_VERSION_PATCH 2)
set (GTSAM_VERSION_PATCH 3)
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
@ -22,6 +22,7 @@ set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
include(GtsamMakeConfigFile)
include(GNUInstallDirs)
# Record the root dir for gtsam - needed during external builds, e.g., ROS
set(GTSAM_SOURCE_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR})
@ -67,13 +68,13 @@ if(GTSAM_UNSTABLE_AVAILABLE)
endif()
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." OFF)
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." OFF)
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions deprecated in GTSAM 4" ON)
option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typedef Point2 and Point3 to Eigen::Vector equivalents" OFF)
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
@ -88,7 +89,7 @@ if(NOT MSVC AND NOT XCODE_VERSION)
# Set the GTSAM_BUILD_TAG variable.
# If build type is Release, set to blank (""), else set to the build type.
if("${CMAKE_BUILD_TYPE_UPPER}" STREQUAL "RELEASE")
if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE")
set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory
else()
set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}")
@ -171,63 +172,45 @@ if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILE
message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.")
endif()
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
# JLBC: This was once updated to target-based names (Boost::xxx), but it caused
# problems with Boost versions newer than FindBoost.cmake was prepared to handle,
# so we downgraded this to classic filenames-based variables, and manually adding
# the target_include_directories(xxx ${Boost_INCLUDE_DIR})
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
set(GTSAM_BOOST_LIBRARIES
optimized ${Boost_SERIALIZATION_LIBRARY_RELEASE}
optimized ${Boost_SYSTEM_LIBRARY_RELEASE}
optimized ${Boost_FILESYSTEM_LIBRARY_RELEASE}
optimized ${Boost_THREAD_LIBRARY_RELEASE}
optimized ${Boost_DATE_TIME_LIBRARY_RELEASE}
optimized ${Boost_REGEX_LIBRARY_RELEASE}
debug ${Boost_SERIALIZATION_LIBRARY_DEBUG}
debug ${Boost_SYSTEM_LIBRARY_DEBUG}
debug ${Boost_FILESYSTEM_LIBRARY_DEBUG}
debug ${Boost_THREAD_LIBRARY_DEBUG}
debug ${Boost_DATE_TIME_LIBRARY_DEBUG}
debug ${Boost_REGEX_LIBRARY_DEBUG}
Boost::serialization
Boost::system
Boost::filesystem
Boost::thread
Boost::date_time
Boost::regex
)
message(STATUS "GTSAM_BOOST_LIBRARIES: ${GTSAM_BOOST_LIBRARIES}")
if (GTSAM_DISABLE_NEW_TIMERS)
message("WARNING: GTSAM timing instrumentation manually disabled")
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS)
else()
if(Boost_TIMER_LIBRARY)
list(APPEND GTSAM_BOOST_LIBRARIES
optimized ${Boost_TIMER_LIBRARY_RELEASE}
optimized ${Boost_CHRONO_LIBRARY_RELEASE}
debug ${Boost_TIMER_LIBRARY_DEBUG}
debug ${Boost_CHRONO_LIBRARY_DEBUG}
)
list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono)
else()
list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt
message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.")
endif()
endif()
if(NOT (${Boost_VERSION} LESS 105600))
message("Ignoring Boost restriction on optional lvalue assignment from rvalues")
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_OPTIONAL_ALLOW_BINDING_TO_RVALUES BOOST_OPTIONAL_CONFIG_ALLOW_BINDING_TO_RVALUES)
endif()
###############################################################################
# Find TBB
find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
# Set up variables if we're using TBB
if(TBB_FOUND AND GTSAM_WITH_TBB)
set(GTSAM_USE_TBB 1) # This will go into config.h
# all definitions and link requisites will go via imported targets:
# tbb & tbbmalloc
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
set(GTSAM_USE_TBB 1) # This will go into config.h
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
set(TBB_GREATER_EQUAL_2020 1)
else()
set(TBB_GREATER_EQUAL_2020 0)
endif()
# all definitions and link requisites will go via imported targets:
# tbb & tbbmalloc
list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
else()
set(GTSAM_USE_TBB 0) # This will go into config.h
set(GTSAM_USE_TBB 0) # This will go into config.h
endif()
###############################################################################
@ -362,6 +345,11 @@ if (MSVC)
list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen
endif()
if (APPLE AND BUILD_SHARED_LIBS)
# Set the default install directory on macOS
set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib")
endif()
###############################################################################
# Global compile options
@ -434,6 +422,10 @@ add_subdirectory(CppUnitLite)
# Build wrap
if (GTSAM_BUILD_WRAP)
add_subdirectory(wrap)
# suppress warning of cython line being too long
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-misleading-indentation")
endif()
endif(GTSAM_BUILD_WRAP)
# Build GTSAM library
@ -451,7 +443,7 @@ add_subdirectory(timing)
# Build gtsam_unstable
if (GTSAM_BUILD_UNSTABLE)
add_subdirectory(gtsam_unstable)
endif(GTSAM_BUILD_UNSTABLE)
endif()
# Matlab toolbox
if (GTSAM_INSTALL_MATLAB_TOOLBOX)
@ -462,12 +454,11 @@ endif()
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
# Set up cache options
set(GTSAM_CYTHON_INSTALL_PATH "" CACHE PATH "Cython toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/cython")
if(NOT GTSAM_CYTHON_INSTALL_PATH)
set(GTSAM_CYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/cython")
endif()
# Cython install path appended with Build type (e.g. cython, cythonDebug, etc).
# This does not override custom values set from the command line
set(GTSAM_CYTHON_INSTALL_PATH "${PROJECT_BINARY_DIR}/cython${GTSAM_BUILD_TAG}" CACHE PATH "Cython toolbox destination, blank defaults to PROJECT_BINARY_DIR/cython<GTSAM_BUILD_TAG>")
set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
add_subdirectory(cython)
add_subdirectory(cython ${GTSAM_CYTHON_INSTALL_PATH})
else()
set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
endif()
@ -545,48 +536,54 @@ endif()
print_build_options_for_target(gtsam)
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
if(GTSAM_USE_TBB)
message(STATUS " Use Intel TBB : Yes")
message(STATUS " Use Intel TBB : Yes")
elseif(TBB_FOUND)
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
else()
message(STATUS " Use Intel TBB : TBB not found")
message(STATUS " Use Intel TBB : TBB not found")
endif()
if(GTSAM_USE_EIGEN_MKL)
message(STATUS " Eigen will use MKL : Yes")
message(STATUS " Eigen will use MKL : Yes")
elseif(MKL_FOUND)
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
else()
message(STATUS " Eigen will use MKL : MKL not found")
message(STATUS " Eigen will use MKL : MKL not found")
endif()
if(GTSAM_USE_EIGEN_MKL_OPENMP)
message(STATUS " Eigen will use MKL and OpenMP : Yes")
message(STATUS " Eigen will use MKL and OpenMP : Yes")
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
elseif(OPENMP_FOUND)
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
else()
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
endif()
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
message(STATUS " Cheirality exceptions enabled : YES")
else()
message(STATUS " Cheirality exceptions enabled : NO")
endif()
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
if(NOT MSVC AND NOT XCODE_VERSION)
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
message(STATUS " Build with ccache : Yes")
message(STATUS " Build with ccache : Yes")
elseif(CCACHE_FOUND)
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
else()
message(STATUS " Build with ccache : No")
message(STATUS " Build with ccache : No")
endif()
endif()
message(STATUS "Packaging flags ")
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
message(STATUS "GTSAM flags ")
print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
@ -597,15 +594,19 @@ print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 al
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
message(STATUS "MATLAB toolbox flags ")
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
if (${GTSAM_INSTALL_MATLAB_TOOLBOX})
message(STATUS " MATLAB root : ${MATLAB_ROOT}")
message(STATUS " MEX binary : ${MEX_COMMAND}")
endif()
message(STATUS "Cython toolbox flags ")
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
endif()
message(STATUS "===============================================================")

View File

@ -6,12 +6,12 @@ file(GLOB cppunitlite_src "*.cpp")
add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers})
list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite)
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
target_include_directories(CppUnitLite PUBLIC ${Boost_INCLUDE_DIR}) # boost/lexical_cast.h
target_link_libraries(CppUnitLite PUBLIC Boost::boost) # boost/lexical_cast.h
gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure
option(GTSAM_INSTALL_CPPUNITLITE "Enable/Disable installation of CppUnitLite library" ON)
if (GTSAM_INSTALL_CPPUNITLITE)
install(FILES ${cppunitlite_headers} DESTINATION include/CppUnitLite)
install(TARGETS CppUnitLite EXPORT GTSAM-exports ARCHIVE DESTINATION lib)
install(FILES ${cppunitlite_headers} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/CppUnitLite)
install(TARGETS CppUnitLite EXPORT GTSAM-exports ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif(GTSAM_INSTALL_CPPUNITLITE)

View File

@ -72,9 +72,9 @@ A Lie group is both a manifold *and* a group. Hence, a LIE_GROUP type should imp
However, we now also need to be able to evaluate the derivatives of compose and inverse.
Hence, we have the following extra valid static functions defined in the struct `gtsam::traits<T>`:
* `r = traits<T>::Compose(p,q,Hq,Hp)`
* `r = traits<T>::Compose(p,q,Hp,Hq)`
* `q = traits<T>::Inverse(p,Hp)`
* `r = traits<T>::Between(p,q,Hq,H2p)`
* `r = traits<T>::Between(p,q,Hp,Hq)`
where above the *H* arguments stand for optional Jacobian arguments.
That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor).

View File

@ -1,17 +1,20 @@
[![Build Status](https://travis-ci.com/borglab/gtsam.svg?branch=develop)](https://travis-ci.com/borglab/gtsam/)
# README - Georgia Tech Smoothing and Mapping library
# README - Georgia Tech Smoothing and Mapping Library
## What is GTSAM?
GTSAM is a library of C++ classes that implement smoothing and
mapping (SAM) in robotics and vision, using factor graphs and Bayes
networks as the underlying computing paradigm rather than sparse
GTSAM is a C++ library that implements smoothing and
mapping (SAM) in robotics and vision, using Factor Graphs and Bayes
Networks as the underlying computing paradigm rather than sparse
matrices.
On top of the C++ library, GTSAM includes a MATLAB interface (enable
GTSAM_INSTALL_MATLAB_TOOLBOX in CMake to build it). A Python interface
is under development.
| Platform | Build Status |
|:---------:|:-------------:|
| gcc/clang | [![Build Status](https://travis-ci.com/borglab/gtsam.svg?branch=develop)](https://travis-ci.com/borglab/gtsam/) |
| MSVC | [![Build status](https://ci.appveyor.com/api/projects/status/3enllitj52jsxwfg/branch/develop?svg=true)](https://ci.appveyor.com/project/dellaert/gtsam) |
On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](##Wrappers).
## Quickstart
@ -41,9 +44,9 @@ Optional prerequisites - used automatically if findable by CMake:
## GTSAM 4 Compatibility
GTSAM 4 will introduce several new features, most notably Expressions and a python toolbox. We will also deprecate some legacy functionality and wrongly named methods, but by default the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 is enabled, allowing anyone to just pull V4 and compile. To build the python toolbox, however, you will have to explicitly disable that flag.
GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. We also deprecate some legacy functionality and wrongly named methods, but by default the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 is enabled, allowing anyone to just pull V4 and compile. To build the python toolbox, however, you will have to explicitly disable that flag.
Also, GTSAM 4 introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we will also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 will be deprecated, so please be aware that this might render functions using their default constructor incorrect.
Also, GTSAM 4 introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect.
## Wrappers
@ -62,7 +65,7 @@ Our implementation improves on this using integration on the manifold, as detail
If you are using the factor in academic work, please cite the publications above.
In GTSAM 4 a new and more efficient implementation, based on integrating on the NavState tangent space and detailed in docs/ImuFactor.pdf, is enabled by default. To switch to the RSS 2015 version, set the flag **GTSAM_TANGENT_PREINTEGRATION** to OFF.
In GTSAM 4 a new and more efficient implementation, based on integrating on the NavState tangent space and detailed in [this document](doc/ImuFactor.pdf), is enabled by default. To switch to the RSS 2015 version, set the flag **GTSAM_TANGENT_PREINTEGRATION** to OFF.
## Additional Information
@ -79,4 +82,4 @@ GTSAM is open source under the BSD license, see the [`LICENSE`](LICENSE) and [`L
Please see the [`examples/`](examples) directory and the [`USAGE`](USAGE.md) file for examples on how to use GTSAM.
GTSAM was developed in the lab of [Frank Dellaert](http://www.cc.gatech.edu/~dellaert) at the [Georgia Institute of Technology](http://www.gatech.edu), with the help of many contributors over the years, see [THANKS](THANKS).
GTSAM was developed in the lab of [Frank Dellaert](http://www.cc.gatech.edu/~dellaert) at the [Georgia Institute of Technology](http://www.gatech.edu), with the help of many contributors over the years, see [THANKS](THANKS.md).

33
appveyor.yml Normal file
View File

@ -0,0 +1,33 @@
# version format
version: 4.0.3-{branch}-build{build}
os: Visual Studio 2019
clone_folder: c:\projects\gtsam
platform: x64
configuration: Release
environment:
CTEST_OUTPUT_ON_FAILURE: 1
BOOST_ROOT: C:/Libraries/boost_1_71_0
build_script:
- cd c:\projects\gtsam\build
# As of Dec 2019, not all unit tests build cleanly for MSVC, so we'll just
# check that parts of GTSAM build correctly:
#- cmake --build .
- cmake --build . --config Release --target gtsam
- cmake --build . --config Release --target gtsam_unstable
- cmake --build . --config Release --target wrap
#- cmake --build . --target check
- cmake --build . --config Release --target check.base
- cmake --build . --config Release --target check.base_unstable
- cmake --build . --config Release --target check.linear
before_build:
- cd c:\projects\gtsam
- mkdir build
- cd build
# Disable examples to avoid AppVeyor timeout
- cmake -G "Visual Studio 16 2019" .. -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF

View File

@ -20,6 +20,7 @@ install(FILES
GtsamPythonWrap.cmake
GtsamCythonWrap.cmake
GtsamTesting.cmake
GtsamPrinting.cmake
FindCython.cmake
FindNumPy.cmake
README.html

2347
cmake/FindBoost.cmake Normal file

File diff suppressed because it is too large Load Diff

View File

@ -115,7 +115,7 @@ IF(WIN32 AND MKL_ROOT_DIR)
IF (MKL_INCLUDE_DIR MATCHES "10.3")
SET(MKL_LIBS ${MKL_LIBS} libiomp5md)
ENDIF()
FOREACH (LIB ${MKL_LIBS})
FIND_LIBRARY(${LIB}_PATH ${LIB} PATHS ${MKL_LIB_SEARCHPATH} ENV LIBRARY_PATH)
IF(${LIB}_PATH)
@ -147,7 +147,7 @@ ELSEIF(MKL_ROOT_DIR) # UNIX and macOS
${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR}
${MKL_ROOT_DIR}/lib/
)
# MKL on Mac OS doesn't ship with GNU thread versions, only Intel versions (see above)
IF(NOT APPLE)
FIND_LIBRARY(MKL_GNUTHREAD_LIBRARY
@ -231,6 +231,7 @@ ELSEIF(MKL_ROOT_DIR) # UNIX and macOS
FIND_LIBRARY(MKL_IOMP5_LIBRARY
iomp5
PATHS
${MKL_ROOT_DIR}/lib/intel64
${MKL_ROOT_DIR}/../lib/intel64
)
ELSE()
@ -254,7 +255,7 @@ ELSEIF(MKL_ROOT_DIR) # UNIX and macOS
ELSE()
SET(MKL_LIBRARIES ${MKL_LP_GNUTHREAD_LIBRARIES})
ENDIF()
MARK_AS_ADVANCED(MKL_CORE_LIBRARY MKL_LP_LIBRARY MKL_ILP_LIBRARY
MKL_SEQUENTIAL_LIBRARY MKL_INTELTHREAD_LIBRARY MKL_GNUTHREAD_LIBRARY)
ENDIF()
@ -266,4 +267,4 @@ find_package_handle_standard_args(MKL DEFAULT_MSG MKL_INCLUDE_DIR MKL_LIBRARIES)
# LINK_DIRECTORIES(${MKL_ROOT_DIR}/lib/${MKL_ARCH_DIR}) # hack
#endif()
MARK_AS_ADVANCED(MKL_INCLUDE_DIR MKL_LIBRARIES)
MARK_AS_ADVANCED(MKL_INCLUDE_DIR MKL_LIBRARIES)

View File

@ -1,3 +1,8 @@
# Set cmake policy to recognize the AppleClang compiler
# independently from the Clang compiler.
if(POLICY CMP0025)
cmake_policy(SET CMP0025 NEW)
endif()
# function: list_append_cache(var [new_values ...])
# Like "list(APPEND ...)" but working for CACHE variables.
@ -81,6 +86,11 @@ if(MSVC)
WINDOWS_LEAN_AND_MEAN
NOMINMAX
)
# Avoid literally hundreds to thousands of warnings:
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC
/wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data
)
endif()
# Other (non-preprocessor macros) compiler flags:
@ -94,7 +104,8 @@ if(MSVC)
set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING /MD /O2 CACHE STRING "(User editable) Private compiler flags for Timing configuration.")
else()
# Common to all configurations, next for each configuration:
set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON -Wall CACHE STRING "(User editable) Private compiler flags for all configurations.")
# "-fPIC" is to ensure proper code generation for shared libraries
set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON -Wall -fPIC CACHE STRING "(User editable) Private compiler flags for all configurations.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG -g -fno-inline CACHE STRING "(User editable) Private compiler flags for Debug configuration.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELWITHDEBINFO -g -O3 CACHE STRING "(User editable) Private compiler flags for RelWithDebInfo configuration.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELEASE -O3 CACHE STRING "(User editable) Private compiler flags for Release configuration.")

View File

@ -5,13 +5,17 @@ unset(PYTHON_EXECUTABLE CACHE)
unset(CYTHON_EXECUTABLE CACHE)
unset(PYTHON_INCLUDE_DIR CACHE)
unset(PYTHON_MAJOR_VERSION CACHE)
unset(PYTHON_LIBRARY CACHE)
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
find_package(PythonInterp REQUIRED)
find_package(PythonLibs REQUIRED)
else()
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
# Allow override from command line
if(NOT DEFINED GTSAM_USE_CUSTOM_PYTHON_LIBRARY)
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
find_package(PythonInterp REQUIRED)
find_package(PythonLibs REQUIRED)
else()
find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
endif()
endif()
find_package(Cython 0.25.2 REQUIRED)
@ -37,9 +41,8 @@ execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
# Paths for generated files
get_filename_component(module_name "${interface_header}" NAME_WE)
set(generated_files_path "${PROJECT_BINARY_DIR}/cython/${module_name}")
set(generated_files_path "${install_path}")
wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
install_cython_wrapped_library("${interface_header}" "${generated_files_path}" "${install_path}")
endfunction()
function(set_up_required_cython_packages)
@ -83,6 +86,15 @@ endfunction()
# - output_dir: The output directory
function(build_cythonized_cpp target cpp_file output_lib_we output_dir)
add_library(${target} MODULE ${cpp_file})
if(WIN32)
# Use .pyd extension instead of .dll on Windows
set_target_properties(${target} PROPERTIES SUFFIX ".pyd")
# Add full path to the Python library
target_link_libraries(${target} ${PYTHON_LIBRARIES})
endif()
if(APPLE)
set(link_flags "-undefined dynamic_lookup")
endif()
@ -125,6 +137,10 @@ function(cythonize target pyx_file output_lib_we output_dir include_dirs libs in
target_link_libraries(${target} "${libs}")
endif()
add_dependencies(${target} ${target}_pyx2cpp)
if(TARGET ${python_install_target})
add_dependencies(${python_install_target} ${target})
endif()
endfunction()
# Internal function that wraps a library and compiles the wrapper
@ -137,9 +153,12 @@ function(wrap_library_cython interface_header generated_files_path extra_imports
get_filename_component(module_name "${interface_header}" NAME_WE)
# Wrap module to Cython pyx
message(STATUS "Cython wrapper generating ${module_name}.pyx")
message(STATUS "Cython wrapper generating ${generated_files_path}/${module_name}.pyx")
set(generated_pyx "${generated_files_path}/${module_name}.pyx")
file(MAKE_DIRECTORY "${generated_files_path}")
if(NOT EXISTS ${generated_files_path})
file(MAKE_DIRECTORY "${generated_files_path}")
endif()
add_custom_command(
OUTPUT ${generated_pyx}
DEPENDS ${interface_header} wrap
@ -162,46 +181,6 @@ function(wrap_library_cython interface_header generated_files_path extra_imports
COMMAND cmake -E remove_directory ${generated_files_path})
endfunction()
# Internal function that installs a wrap toolbox
function(install_cython_wrapped_library interface_header generated_files_path install_path)
get_filename_component(module_name "${interface_header}" NAME_WE)
# NOTE: only installs .pxd and .pyx and binary files (not .cpp) - the trailing slash on the directory name
# here prevents creating the top-level module name directory in the destination.
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH/subdirectory if there is one
get_filename_component(location "${install_path}" PATH)
get_filename_component(name "${install_path}" NAME)
message(STATUS "Installing Cython Toolbox to ${location}${GTSAM_BUILD_TAG}/${name}") #${GTSAM_CYTHON_INSTALL_PATH}"
if(GTSAM_BUILD_TYPE_POSTFIXES)
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${build_type}" build_type_upper)
if(${build_type_upper} STREQUAL "RELEASE")
set(build_type_tag "") # Don't create release mode tag on installed directory
else()
set(build_type_tag "${build_type}")
endif()
install(DIRECTORY "${generated_files_path}/" DESTINATION "${location}${build_type_tag}/${name}"
CONFIGURATIONS "${build_type}"
PATTERN "build" EXCLUDE
PATTERN "CMakeFiles" EXCLUDE
PATTERN "Makefile" EXCLUDE
PATTERN "*.cmake" EXCLUDE
PATTERN "*.cpp" EXCLUDE
PATTERN "*.py" EXCLUDE)
endforeach()
else()
install(DIRECTORY "${generated_files_path}/" DESTINATION ${install_path}
PATTERN "build" EXCLUDE
PATTERN "CMakeFiles" EXCLUDE
PATTERN "Makefile" EXCLUDE
PATTERN "*.cmake" EXCLUDE
PATTERN "*.cpp" EXCLUDE
PATTERN "*.py" EXCLUDE)
endif()
endfunction()
# Helper function to install Cython scripts and handle multiple build types where the scripts
# should be installed to all build type toolboxes
#
@ -219,50 +198,7 @@ function(install_cython_scripts source_directory dest_directory patterns)
foreach(pattern ${patterns})
list(APPEND patterns_args PATTERN "${pattern}")
endforeach()
if(GTSAM_BUILD_TYPE_POSTFIXES)
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${build_type}" build_type_upper)
if(${build_type_upper} STREQUAL "RELEASE")
set(build_type_tag "") # Don't create release mode tag on installed directory
else()
set(build_type_tag "${build_type}")
endif()
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
get_filename_component(location "${dest_directory}" PATH)
get_filename_component(name "${dest_directory}" NAME)
install(DIRECTORY "${source_directory}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}"
file(COPY "${source_directory}" DESTINATION "${dest_directory}"
FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
endforeach()
else()
install(DIRECTORY "${source_directory}" DESTINATION "${dest_directory}" FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
endif()
endfunction()
# Helper function to install specific files and handle multiple build types where the scripts
# should be installed to all build type toolboxes
#
# Arguments:
# source_files: The source files to be installed.
# dest_directory: The destination directory to install to.
function(install_cython_files source_files dest_directory)
if(GTSAM_BUILD_TYPE_POSTFIXES)
foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${build_type}" build_type_upper)
if(${build_type_upper} STREQUAL "RELEASE")
set(build_type_tag "") # Don't create release mode tag on installed directory
else()
set(build_type_tag "${build_type}")
endif()
# Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
get_filename_component(location "${dest_directory}" PATH)
get_filename_component(name "${dest_directory}" NAME)
install(FILES "${source_files}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}")
endforeach()
else()
install(FILES "${source_files}" DESTINATION "${dest_directory}")
endif()
endfunction()

View File

@ -15,7 +15,7 @@ function(GtsamMakeConfigFile PACKAGE_NAME)
get_filename_component(name "${ARGV1}" NAME_WE)
set(EXTRA_FILE "${name}.cmake")
configure_file(${ARGV1} "${PROJECT_BINARY_DIR}/${EXTRA_FILE}" @ONLY)
install(FILES "${PROJECT_BINARY_DIR}/${EXTRA_FILE}" DESTINATION "${CMAKE_INSTALL_PREFIX}/${DEF_INSTALL_CMAKE_DIR}")
install(FILES "${PROJECT_BINARY_DIR}/${EXTRA_FILE}" DESTINATION "${DEF_INSTALL_CMAKE_DIR}")
else()
set(EXTRA_FILE "_does_not_exist_")
endif()

View File

@ -33,7 +33,6 @@
// Here we will use Between factors for the relative motion described by odometry measurements.
// We will also use a Between Factor to encode the loop closure constraint
// Also, we will initialize the robot at the origin using a Prior factor.
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
// When the factors are created, we will add them to a Factor Graph. As the factors we are using
@ -69,7 +68,7 @@ int main(int argc, char** argv) {
// 2a. Add a prior on the first pose, setting it to the origin
// A prior factor consists of a mean and a noise model (covariance matrix)
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.emplace_shared<PriorFactor<Pose2> >(1, Pose2(0, 0, 0), priorNoise);
graph.addPrior(1, Pose2(0, 0, 0), priorNoise);
// For simplicity, we will use the same noise model for odometry and loop closures
noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));

View File

@ -3,11 +3,32 @@ include(GtsamCythonWrap)
# Create the cython toolbox for the gtsam library
if (GTSAM_INSTALL_CYTHON_TOOLBOX)
# Add the new make target command
set(python_install_target python-install)
add_custom_target(${python_install_target}
COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_CYTHON_INSTALL_PATH}/setup.py install
WORKING_DIRECTORY ${GTSAM_CYTHON_INSTALL_PATH})
# build and include the eigency version of eigency
add_subdirectory(gtsam_eigency)
include_directories(${PROJECT_BINARY_DIR}/cython/gtsam_eigency)
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
# wrap gtsam
# Fix for error "C1128: number of sections exceeded object file format limit"
if(MSVC)
add_compile_options(/bigobj)
endif()
# First set up all the package related files.
# This also ensures the below wrap operations work correctly.
set(CYTHON_INSTALL_REQUIREMENTS_FILE "${PROJECT_SOURCE_DIR}/cython/requirements.txt")
# Install the custom-generated __init__.py
# This makes the cython (sub-)directories into python packages, so gtsam can be found while wrapping gtsam_unstable
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam/__init__.py COPYONLY)
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable/__init__.py COPYONLY)
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${GTSAM_CYTHON_INSTALL_PATH}/setup.py)
# Wrap gtsam
add_custom_target(gtsam_header DEPENDS "../gtsam.h")
wrap_and_install_library_cython("../gtsam.h" # interface_header
"" # extra imports
@ -15,8 +36,9 @@ if (GTSAM_INSTALL_CYTHON_TOOLBOX)
gtsam # library to link with
"wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
)
add_dependencies(${python_install_target} gtsam gtsam_header)
# wrap gtsam_unstable
# Wrap gtsam_unstable
if(GTSAM_BUILD_UNSTABLE)
add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
@ -25,17 +47,9 @@ if (GTSAM_INSTALL_CYTHON_TOOLBOX)
gtsam_unstable # library to link with
"gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
)
add_dependencies(${python_install_target} gtsam_unstable gtsam_unstable_header)
endif()
file(READ "${PROJECT_SOURCE_DIR}/cython/requirements.txt" CYTHON_INSTALL_REQUIREMENTS)
file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
# Install the custom-generated __init__.py
# This is to make the build/cython/gtsam folder a python package, so gtsam can be found while wrapping gtsam_unstable
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py COPYONLY)
configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py COPYONLY)
configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${PROJECT_BINARY_DIR}/cython/setup.py)
install_cython_files("${PROJECT_BINARY_DIR}/cython/setup.py" "${GTSAM_CYTHON_INSTALL_PATH}")
# install scripts and tests
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")

View File

@ -1,43 +1,51 @@
# Python Wrapper
This is the Cython/Python wrapper around the GTSAM C++ library.
This is the Python wrapper around the GTSAM C++ library. We use Cython to generate the bindings to the underlying C++ code.
## Requirements
- If you want to build the GTSAM python library for a specific python version (eg 3.6),
use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used.
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment),
then the environment should be active while building GTSAM.
- This wrapper needs `Cython(>=0.25.2)`, `backports_abc(>=0.5)`, and `numpy(>=1.11.0)`. These can be installed as follows:
```bash
pip install -r <gtsam_folder>/cython/requirements.txt
```
- For compatibility with GTSAM's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
named `gtsam_eigency`, to interface between C++'s Eigen and Python's numpy.
## Install
- if you want to build the gtsam python library for a specific python version (eg 2.7), use the `-DGTSAM_PYTHON_VERSION=2.7` option when running `cmake` otherwise the default interpreter will be used.
- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment) then the environment should be active while building gtsam.
- This wrapper needs Cython(>=0.25.2), backports_abc>=0.5, and numpy. These can be installed as follows:
- Run cmake with the `GTSAM_INSTALL_CYTHON_TOOLBOX` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory defined by `GTSAM_CYTHON_INSTALL_PATH`, which is by default `<PROJECT_BINARY_DIR>/cython` in Release mode and `<PROJECT_BINARY_DIR>/cython<CMAKE_BUILD_TYPE>` for other modes.
```bash
pip install -r <gtsam_folder>/cython/requirements.txt
```
- Build GTSAM and the wrapper with `make`.
- For compatibility with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
named **gtsam_eigency**, to interface between C++'s Eigen and Python's numpy.
- To install, simply run `make python-install`.
- The same command can be used to install into a virtual environment if it is active.
- **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory.
- Build and install gtsam using cmake with `GTSAM_INSTALL_CYTHON_TOOLBOX` enabled.
The wrapped module will be installed to `GTSAM_CYTHON_INSTALL_PATH`, which is
by default: `<your CMAKE_INSTALL_PREFIX>/cython`
- To use the library without installing system-wide: modify your `PYTHONPATH` to include the `GTSAM_CYTHON_INSTALL_PATH`:
```bash
export PYTHONPATH=$PYTHONPATH:<GTSAM_CYTHON_INSTALL_PATH>
```
- To install system-wide: run `make install` then navigate to `GTSAM_CYTHON_INSTALL_PATH` and run `python setup.py install`
- (the same command can be used to install into a virtual environment if it is active)
- note: if you don't want gtsam to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install gtsam to a subdirectory of the build directory.
- if you run `setup.py` from the build directory rather than the installation directory, the script will warn you with the message: `setup.py is being run from an unexpected location`.
Before `make install` is run, not all the components of the package have been copied across, so running `setup.py` from the build directory would result in an incomplete package.
- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly.
## Unit Tests
The Cython toolbox also has a small set of unit tests located in the
test directory. To run them:
```bash
cd <your GTSAM_CYTHON_INSTALL_PATH>
python -m unittest discover
```
```bash
cd <GTSAM_CYTHON_INSTALL_PATH>
python -m unittest discover
```
## Utils
TODO
## Examples
TODO
## Writing Your Own Scripts
@ -46,79 +54,63 @@ See the tests for examples.
### Some Important Notes:
- Vector/Matrix:
+ GTSAM expects double-precision floating point vectors and matrices.
Hence, you should pass numpy matrices with dtype=float, or 'float64'.
+ Also, GTSAM expects *column-major* matrices, unlike the default storage
scheme in numpy. Hence, you should pass column-major matrices to gtsam using
- GTSAM expects double-precision floating point vectors and matrices.
Hence, you should pass numpy matrices with `dtype=float`, or `float64`.
- Also, GTSAM expects _column-major_ matrices, unlike the default storage
scheme in numpy. Hence, you should pass column-major matrices to GTSAM using
the flag order='F'. And you always get column-major matrices back.
For more details, see: https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed
+ Passing row-major matrices of different dtype, e.g. 'int', will also work
For more details, see [this link](https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed).
- Passing row-major matrices of different dtype, e.g. `int`, will also work
as the wrapper converts them to column-major and dtype float for you,
using numpy.array.astype(float, order='F', copy=False).
However, this will result a copy if your matrix is not in the expected type
and storage order.
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>_ in Python.
Examples: noiseModel_Gaussian, noiseModel_mEstimator_Tukey
- Inner namespace: Classes in inner namespace will be prefixed by <innerNamespace>\_ in Python.
Examples: `noiseModel_Gaussian`, `noiseModel_mEstimator_Tukey`
- Casting from a base class to a derive class must be done explicitly.
Examples:
```Python
noiseBase = factor.noiseModel()
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
```
## Wrapping Your Own Project That Uses GTSAM
Examples:
- Set PYTHONPATH to include ${GTSAM_CYTHON_INSTALL_PATH}
+ so that it can find gtsam Cython header: gtsam/gtsam.pxd
```python
noiseBase = factor.noiseModel()
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
```
- In your CMakeList.txt
```cmake
find_package(GTSAM REQUIRED) # Make sure gtsam's install folder is in your PATH
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${GTSAM_DIR}/../GTSAMCMakeTools")
## Wrapping Custom GTSAM-based Project
# Wrap
include(GtsamCythonWrap)
include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
wrap_and_install_library_cython("your_project_interface.h"
"from gtsam.gtsam cimport *" # extra import of gtsam/gtsam.pxd Cython header
"your_install_path"
"libraries_to_link_with_the_cython_module"
"dependencies_which_need_to_be_built_before_the_wrapper"
)
#Optional: install_cython_scripts and install_cython_files. See GtsamCythonWrap.cmake.
```
Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python).
## KNOWN ISSUES
- Doesn't work with python3 installed from homebrew
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
- Guess: 64 vs 32b? disutils Compiler flags?
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
- Upgrading to 0.25 solves the problem
- Need default constructor and default copy constructor for almost every classes... :(
- support these constructors by default and declare "delete" for special classes?
- Doesn't work with python3 installed from homebrew
- size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
- Guess: 64 vs 32b? disutils Compiler flags?
- Bug with Cython 0.24: instantiated factor classes return FastVector<size_t> for keys(), which can't be casted to FastVector<Key>
- Upgrading to 0.25 solves the problem
- Need default constructor and default copy constructor for almost every classes... :(
- support these constructors by default and declare "delete" for special classes?
### TODO
- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in gtsam?)
- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in GTSAM?)
- [ ] inner namespaces ==> inner packages?
- [ ] Wrap fixed-size Matrices/Vectors?
### Completed/Cancelled:
- [x] Fix Python tests: don't use " import <package> * ": Bad style!!! (18-03-17 19:50)
- [x] Fix Python tests: don't use " import <package> \* ": Bad style!!! (18-03-17 19:50)
- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
- [x] Wrap unstable @done (18-03-17 15:30)
- [x] Unify cython/gtsam.h and the original gtsam.h @done (18-03-17 15:30)
- [x] Unify cython/GTSAM.h and the original GTSAM.h @done (18-03-17 15:30)
- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
- [x] 06-03-17: manage to remove the requirements for default and copy constructors
- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector<Key>, to FastVector<size_t>.
- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: ```gtsam::JointMarginal __pyx_t_1;``` Users don't have to wrap this constructor, however.
- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: `GTSAM::JointMarginal __pyx_t_1;` Users don't have to wrap this constructor, however.
- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
- [x] CMake install script @done (25-11-16 02:30)
@ -132,7 +124,7 @@ wrap_and_install_library_cython("your_project_interface.h"
- [x] Casting from parent and grandparents @done (16-11-16 17:00)
- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
- [x] Support "print obj" @done (16-11-16 17:00)
- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
- [x] Cython: Key and size_t: traits<size_t> doesn't exist @done (16-09-12 18:34)
- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
- [x] [Nice to have] parse typedef @done (16-09-13 17:19)

View File

@ -0,0 +1,56 @@
"""
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
Simple robot motion example, with prior and one GPS measurements
Author: Mandy Xie
"""
# pylint: disable=invalid-name, E1101
from __future__ import print_function
import numpy as np
import gtsam
import matplotlib.pyplot as plt
import gtsam.utils.plot as gtsam_plot
# ENU Origin is where the plane was in hold next to runway
lat0 = 33.86998
lon0 = -84.30626
h0 = 274
# Create noise models
GPS_NOISE = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
PRIOR_NOISE = gtsam.noiseModel_Isotropic.Sigma(6, 0.25)
# Create an empty nonlinear factor graph
graph = gtsam.NonlinearFactorGraph()
# Add a prior on the first point, setting it to the origin
# A prior factor consists of a mean and a noise model (covariance matrix)
priorMean = gtsam.Pose3() # prior at origin
graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE))
# Add GPS factors
gps = gtsam.Point3(lat0, lon0, h0)
graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE))
print("\nFactor Graph:\n{}".format(graph))
# Create the data structure to hold the initialEstimate estimate to the solution
# For illustrative purposes, these have been deliberately set to incorrect values
initial = gtsam.Values()
initial.insert(1, gtsam.Pose3())
print("\nInitial Estimate:\n{}".format(initial))
# optimize using Levenberg-Marquardt optimization
params = gtsam.LevenbergMarquardtParams()
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = optimizer.optimize()
print("\nFinal Result:\n{}".format(result))

View File

@ -5,32 +5,27 @@ All Rights Reserved
See LICENSE for the license information
A script validating the ImuFactor inference.
A script validating and demonstrating the ImuFactor inference.
Author: Frank Dellaert, Varun Agrawal
"""
from __future__ import print_function
import math
import gtsam
import matplotlib.pyplot as plt
import numpy as np
from gtsam import symbol_shorthand_B as B
from gtsam import symbol_shorthand_V as V
from gtsam import symbol_shorthand_X as X
from gtsam.utils.plot import plot_pose3
from mpl_toolkits.mplot3d import Axes3D
import gtsam
from gtsam.utils.plot import plot_pose3
from PreintegrationExample import POSES_FIG, PreintegrationExample
BIAS_KEY = int(gtsam.symbol(ord('b'), 0))
def X(key):
"""Create symbol for pose key."""
return gtsam.symbol(ord('x'), key)
def V(key):
"""Create symbol for velocity key."""
return gtsam.symbol(ord('v'), key)
BIAS_KEY = B(0)
np.set_printoptions(precision=3, suppress=True)
@ -76,8 +71,14 @@ class ImuFactorExample(PreintegrationExample):
initial.insert(BIAS_KEY, self.actualBias)
for i in range(num_poses):
state_i = self.scenario.navState(float(i))
initial.insert(X(i), state_i.pose())
initial.insert(V(i), state_i.velocity())
poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1)
pose = state_i.pose().compose(poseNoise)
velocity = state_i.velocity() + np.random.randn(3)*0.1
initial.insert(X(i), pose)
initial.insert(V(i), velocity)
# simulate the loop
i = 0 # state index
@ -88,6 +89,12 @@ class ImuFactorExample(PreintegrationExample):
measuredAcc = self.runner.measuredSpecificForce(t)
pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)
poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1)
actual_state_i = gtsam.NavState(
actual_state_i.pose().compose(poseNoise),
actual_state_i.velocity() + np.random.randn(3)*0.1)
# Plot IMU many times
if k % 10 == 0:
self.plotImu(t, measuredOmega, measuredAcc)
@ -133,6 +140,9 @@ class ImuFactorExample(PreintegrationExample):
pose_i = result.atPose3(X(i))
plot_pose3(POSES_FIG, pose_i, 0.1)
i += 1
gtsam.utils.plot.set_axes_equal(POSES_FIG)
print(result.atimuBias_ConstantBias(BIAS_KEY))
plt.ioff()

View File

@ -8,22 +8,19 @@ from __future__ import print_function
import math
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
import gtsam
import gtsam.utils.plot as gtsam_plot
def X(key):
"""Create symbol for pose key."""
return gtsam.symbol(ord('x'), key)
def V(key):
"""Create symbol for velocity key."""
return gtsam.symbol(ord('v'), key)
import matplotlib.pyplot as plt
import numpy as np
from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2,
ConstantTwistScenario, ImuFactor, NonlinearFactorGraph,
PinholeCameraCal3_S2, Point3, Pose3,
PriorFactorConstantBias, PriorFactorPose3,
PriorFactorVector, Rot3, Values)
from gtsam import symbol_shorthand_B as B
from gtsam import symbol_shorthand_V as V
from gtsam import symbol_shorthand_X as X
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
def vector3(x, y, z):
@ -69,8 +66,8 @@ PARAMS.setUse2ndOrderCoriolis(False)
PARAMS.setOmegaCoriolis(vector3(0, 0, 0))
BIAS_COVARIANCE = gtsam.noiseModel_Isotropic.Variance(6, 0.1)
DELTA = gtsam.Pose3(gtsam.Rot3.Rodrigues(0, 0, 0),
gtsam.Point3(0.05, -0.10, 0.20))
DELTA = Pose3(Rot3.Rodrigues(0, 0, 0),
Point3(0.05, -0.10, 0.20))
def IMU_example():
@ -78,10 +75,10 @@ def IMU_example():
# Start with a camera on x-axis looking at origin
radius = 30
up = gtsam.Point3(0, 0, 1)
target = gtsam.Point3(0, 0, 0)
position = gtsam.Point3(radius, 0, 0)
camera = gtsam.SimpleCamera.Lookat(position, target, up, gtsam.Cal3_S2())
up = Point3(0, 0, 1)
target = Point3(0, 0, 0)
position = Point3(radius, 0, 0)
camera = PinholeCameraCal3_S2.Lookat(position, target, up, Cal3_S2())
pose_0 = camera.pose()
# Create the set of ground-truth landmarks and poses
@ -90,37 +87,37 @@ def IMU_example():
angular_velocity_vector = vector3(0, -angular_velocity, 0)
linear_velocity_vector = vector3(radius * angular_velocity, 0, 0)
scenario = gtsam.ConstantTwistScenario(
scenario = ConstantTwistScenario(
angular_velocity_vector, linear_velocity_vector, pose_0)
# Create a factor graph
newgraph = gtsam.NonlinearFactorGraph()
newgraph = NonlinearFactorGraph()
# Create (incremental) ISAM2 solver
isam = gtsam.ISAM2()
isam = ISAM2()
# Create the initial estimate to the solution
# Intentionally initialize the variables off from the ground truth
initialEstimate = gtsam.Values()
initialEstimate = Values()
# Add a prior on pose x0. This indirectly specifies where the origin is.
# 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
noise = gtsam.noiseModel_Diagonal.Sigmas(
np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
newgraph.push_back(gtsam.PriorFactorPose3(X(0), pose_0, noise))
np.array([0.1, 0.1, 0.1, 0.3, 0.3, 0.3]))
newgraph.push_back(PriorFactorPose3(X(0), pose_0, noise))
# Add imu priors
biasKey = gtsam.symbol(ord('b'), 0)
biasKey = B(0)
biasnoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1)
biasprior = gtsam.PriorFactorConstantBias(biasKey, gtsam.imuBias_ConstantBias(),
biasnoise)
biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias_ConstantBias(),
biasnoise)
newgraph.push_back(biasprior)
initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias())
velnoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
# Calculate with correct initial velocity
n_velocity = vector3(0, angular_velocity * radius, 0)
velprior = gtsam.PriorFactorVector(V(0), n_velocity, velnoise)
velprior = PriorFactorVector(V(0), n_velocity, velnoise)
newgraph.push_back(velprior)
initialEstimate.insert(V(0), n_velocity)
@ -141,7 +138,7 @@ def IMU_example():
# Add Bias variables periodically
if i % 5 == 0:
biasKey += 1
factor = gtsam.BetweenFactorConstantBias(
factor = BetweenFactorConstantBias(
biasKey - 1, biasKey, gtsam.imuBias_ConstantBias(), BIAS_COVARIANCE)
newgraph.add(factor)
initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias())
@ -154,8 +151,7 @@ def IMU_example():
accum.integrateMeasurement(measuredAcc, measuredOmega, delta_t)
# Add Imu Factor
imufac = gtsam.ImuFactor(
X(i - 1), V(i - 1), X(i), V(i), biasKey, accum)
imufac = ImuFactor(X(i - 1), V(i - 1), X(i), V(i), biasKey, accum)
newgraph.add(imufac)
# insert new velocity, which is wrong
@ -168,7 +164,7 @@ def IMU_example():
ISAM2_plot(result)
# reset
newgraph = gtsam.NonlinearFactorGraph()
newgraph = NonlinearFactorGraph()
initialEstimate.clear()

View File

@ -13,9 +13,10 @@ Author: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
from __future__ import print_function
import numpy as np
import gtsam
import numpy as np
from gtsam import symbol_shorthand_L as L
from gtsam import symbol_shorthand_X as X
# Create noise models
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
@ -26,11 +27,11 @@ MEASUREMENT_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.2]))
graph = gtsam.NonlinearFactorGraph()
# Create the keys corresponding to unknown variables in the factor graph
X1 = gtsam.symbol(ord('x'), 1)
X2 = gtsam.symbol(ord('x'), 2)
X3 = gtsam.symbol(ord('x'), 3)
L1 = gtsam.symbol(ord('l'), 4)
L2 = gtsam.symbol(ord('l'), 5)
X1 = X(1)
X2 = X(2)
X3 = X(3)
L1 = L(4)
L2 = L(5)
# Add a prior on pose X1 at the origin. A prior factor consists of a mean and a noise model
graph.add(gtsam.PriorFactorPose2(X1, gtsam.Pose2(0.0, 0.0, 0.0), PRIOR_NOISE))

View File

@ -53,16 +53,15 @@ graph, initial = gtsam.readG2o(g2oFile, is3D)
assert args.kernel == "none", "Supplied kernel type is not yet implemented"
# Add prior on the pose having index (key) = 0
graphWithPrior = graph
priorModel = gtsam.noiseModel_Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8))
graphWithPrior.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))
graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))
params = gtsam.GaussNewtonParams()
params.setVerbosity("Termination")
params.setMaxIterations(maxIterations)
# parameters.setRelativeErrorTol(1e-5)
# Create the optimizer ...
optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params)
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
# ... and optimize
result = optimizer.optimize()
@ -83,7 +82,7 @@ else:
print ("Done!")
if args.plot:
resultPoses = gtsam.extractPose2(result)
resultPoses = gtsam.utilities_extractPose2(result)
for i in range(resultPoses.shape[0]):
plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :]))
plt.show()

View File

@ -43,18 +43,17 @@ priorModel = gtsam.noiseModel_Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6,
1e-4, 1e-4, 1e-4))
print("Adding prior to g2o file ")
graphWithPrior = graph
firstKey = initial.keys().at(0)
graphWithPrior.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel))
graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel))
params = gtsam.GaussNewtonParams()
params.setVerbosity("Termination") # this will show info about stopping conds
optimizer = gtsam.GaussNewtonOptimizer(graphWithPrior, initial, params)
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
result = optimizer.optimize()
print("Optimization complete")
print("initial error = ", graphWithPrior.error(initial))
print("final error = ", graphWithPrior.error(result))
print("initial error = ", graph.error(initial))
print("final error = ", graph.error(result))
if args.output is None:
print("Final Result:\n{}".format(result))
@ -66,7 +65,7 @@ else:
print ("Done!")
if args.plot:
resultPoses = gtsam.allPose3s(result)
resultPoses = gtsam.utilities_allPose3s(result)
for i in range(resultPoses.size()):
plot.plot_pose3(1, resultPoses.atPose3(i))
plt.show()

View File

@ -1,7 +1,7 @@
These examples are almost identical to the old handwritten python wrapper
examples. However, there are just some slight name changes, for example
`noiseModel.Diagonal` becomes `noiseModel_Diagonal` etc...
Also, annoyingly, instead of `gtsam.Symbol('b', 0)` we now need to say `gtsam.symbol(ord('b'), 0))`
Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthand_B(0)` or `B(0)` if we use python aliasing.
# Porting Progress

View File

@ -11,17 +11,17 @@ A structure-from-motion problem on a simulated dataset
from __future__ import print_function
import gtsam
import matplotlib.pyplot as plt
import numpy as np
from gtsam import symbol_shorthand_L as L
from gtsam import symbol_shorthand_X as X
from gtsam.examples import SFMdata
from gtsam.gtsam import (Cal3_S2, DoglegOptimizer,
GenericProjectionFactorCal3_S2, NonlinearFactorGraph,
Point3, Pose3, PriorFactorPoint3, PriorFactorPose3,
Rot3, SimpleCamera, Values)
def symbol(name: str, index: int) -> int:
""" helper for creating a symbol without explicitly casting 'name' from str to int """
return gtsam.symbol(ord(name), index)
GenericProjectionFactorCal3_S2, Marginals,
NonlinearFactorGraph, PinholeCameraCal3_S2, Point3,
Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3,
SimpleCamera, Values)
from gtsam.utils import plot
def main():
@ -70,23 +70,23 @@ def main():
# Add a prior on pose x1. This indirectly specifies where the origin is.
# 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z
pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise)
factor = PriorFactorPose3(X(0), poses[0], pose_noise)
graph.push_back(factor)
# Simulated measurements from each camera pose, adding them to the factor graph
for i, pose in enumerate(poses):
camera = SimpleCamera(pose, K)
camera = PinholeCameraCal3_S2(pose, K)
for j, point in enumerate(points):
measurement = camera.project(point)
factor = GenericProjectionFactorCal3_S2(
measurement, measurement_noise, symbol('x', i), symbol('l', j), K)
measurement, measurement_noise, X(i), L(j), K)
graph.push_back(factor)
# Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained
# Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance
# between the first camera and the first landmark. All other landmark positions are interpreted using this scale.
point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise)
factor = PriorFactorPoint3(L(0), points[0], point_noise)
graph.push_back(factor)
graph.print_('Factor Graph:\n')
@ -94,13 +94,11 @@ def main():
# Intentionally initialize the variables off from the ground truth
initial_estimate = Values()
for i, pose in enumerate(poses):
r = Rot3.Rodrigues(-0.1, 0.2, 0.25)
t = Point3(0.05, -0.10, 0.20)
transformed_pose = pose.compose(Pose3(r, t))
initial_estimate.insert(symbol('x', i), transformed_pose)
transformed_pose = pose.retract(0.1*np.random.randn(6,1))
initial_estimate.insert(X(i), transformed_pose)
for j, point in enumerate(points):
transformed_point = Point3(point.vector() + np.array([-0.25, 0.20, 0.15]))
initial_estimate.insert(symbol('l', j), transformed_point)
transformed_point = Point3(point.vector() + 0.1*np.random.randn(3))
initial_estimate.insert(L(j), transformed_point)
initial_estimate.print_('Initial Estimates:\n')
# Optimize the graph and print results
@ -113,6 +111,11 @@ def main():
print('initial error = {}'.format(graph.error(initial_estimate)))
print('final error = {}'.format(graph.error(result)))
marginals = Marginals(graph, result)
plot.plot_3d_points(1, result, marginals=marginals)
plot.plot_trajectory(1, result, marginals=marginals, scale=8)
plot.set_axes_equal(1)
plt.show()
if __name__ == '__main__':
main()

View File

@ -25,14 +25,15 @@ def createPoints():
def createPoses(K):
# Create the set of ground-truth poses
radius = 30.0
radius = 40.0
height = 10.0
angles = np.linspace(0, 2*np.pi, 8, endpoint=False)
up = gtsam.Point3(0, 0, 1)
target = gtsam.Point3(0, 0, 0)
poses = []
for theta in angles:
position = gtsam.Point3(radius*np.cos(theta),
radius*np.sin(theta), 0.0)
radius*np.sin(theta), height)
camera = gtsam.PinholeCameraCal3_S2.Lookat(position, target, up, K)
poses.append(camera.pose())
return poses

View File

@ -10,8 +10,9 @@ This example will perform a relatively trivial optimization on
a single variable with a single factor.
"""
import numpy as np
import gtsam
import numpy as np
from gtsam import symbol_shorthand_X as X
def main():
@ -33,7 +34,7 @@ def main():
prior = gtsam.Rot2.fromAngle(np.deg2rad(30))
prior.print_('goal angle')
model = gtsam.noiseModel_Isotropic.Sigma(dim=1, sigma=np.deg2rad(1))
key = gtsam.symbol(ord('x'), 1)
key = X(1)
factor = gtsam.PriorFactorRot2(key, prior, model)
"""

View File

@ -13,23 +13,14 @@ Author: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
import gtsam
import gtsam.utils.plot as gtsam_plot
import matplotlib.pyplot as plt
import numpy as np
from gtsam import symbol_shorthand_L as L
from gtsam import symbol_shorthand_X as X
from gtsam.examples import SFMdata
def X(i):
"""Create key for pose i."""
return int(gtsam.symbol(ord('x'), i))
def L(j):
"""Create key for landmark j."""
return int(gtsam.symbol(ord('l'), j))
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611
def visual_ISAM2_plot(result):
@ -120,7 +111,7 @@ def visual_ISAM2_example():
if i == 0:
# Add a prior on pose x0
pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array(
[0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
[0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], pose_noise))
# Add a prior on landmark l0

View File

@ -18,12 +18,9 @@ from gtsam.examples import SFMdata
from gtsam.gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2,
NonlinearFactorGraph, NonlinearISAM, Point3, Pose3,
PriorFactorPoint3, PriorFactorPose3, Rot3,
SimpleCamera, Values)
def symbol(name: str, index: int) -> int:
""" helper for creating a symbol without explicitly casting 'name' from str to int """
return gtsam.symbol(ord(name), index)
PinholeCameraCal3_S2, Values)
from gtsam import symbol_shorthand_L as L
from gtsam import symbol_shorthand_X as X
def main():
@ -54,11 +51,11 @@ def main():
# Loop over the different poses, adding the observations to iSAM incrementally
for i, pose in enumerate(poses):
camera = SimpleCamera(pose, K)
camera = PinholeCameraCal3_S2(pose, K)
# Add factors for each landmark observation
for j, point in enumerate(points):
measurement = camera.project(point)
factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, symbol('x', i), symbol('l', j), K)
factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, X(i), L(j), K)
graph.push_back(factor)
# Intentionally initialize the variables off from the ground truth
@ -66,7 +63,7 @@ def main():
initial_xi = pose.compose(noise)
# Add an initial guess for the current pose
initial_estimate.insert(symbol('x', i), initial_xi)
initial_estimate.insert(X(i), initial_xi)
# If this is the first iteration, add a prior on the first pose to set the coordinate frame
# and a prior on the first landmark to set the scale
@ -75,12 +72,12 @@ def main():
if i == 0:
# Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z
pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
factor = PriorFactorPose3(symbol('x', 0), poses[0], pose_noise)
factor = PriorFactorPose3(X(0), poses[0], pose_noise)
graph.push_back(factor)
# Add a prior on landmark l0
point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
factor = PriorFactorPoint3(symbol('l', 0), points[0], point_noise)
factor = PriorFactorPoint3(L(0), points[0], point_noise)
graph.push_back(factor)
# Add initial guesses to all observed landmarks
@ -88,7 +85,7 @@ def main():
for j, point in enumerate(points):
# Intentionally initialize the variables off from the ground truth
initial_lj = points[j].vector() + noise
initial_estimate.insert(symbol('l', j), Point3(initial_lj))
initial_estimate.insert(L(j), Point3(initial_lj))
else:
# Update iSAM with the new factors
isam.update(graph, initial_estimate)

View File

@ -0,0 +1,56 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
FrobeniusFactor unit tests.
Author: Frank Dellaert
"""
# pylint: disable=no-name-in-module, import-error, invalid-name
import unittest
import numpy as np
from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4,
FrobeniusWormholeFactor, SOn)
id = SO4()
v1 = np.array([0, 0, 0, 0.1, 0, 0])
Q1 = SO4.Expmap(v1)
v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03])
Q2 = SO4.Expmap(v2)
class TestFrobeniusFactorSO4(unittest.TestCase):
"""Test FrobeniusFactor factors."""
def test_frobenius_factor(self):
"""Test creation of a factor that calculates the Frobenius norm."""
factor = FrobeniusFactorSO4(1, 2)
actual = factor.evaluateError(Q1, Q2)
expected = (Q2.matrix()-Q1.matrix()).transpose().reshape((16,))
np.testing.assert_array_equal(actual, expected)
def test_frobenius_between_factor(self):
"""Test creation of a Frobenius BetweenFactor."""
factor = FrobeniusBetweenFactorSO4(1, 2, Q1.between(Q2))
actual = factor.evaluateError(Q1, Q2)
expected = np.zeros((16,))
np.testing.assert_array_almost_equal(actual, expected)
def test_frobenius_wormhole_factor(self):
"""Test creation of a factor that calculates Shonan error."""
R1 = SO3.Expmap(v1[3:])
R2 = SO3.Expmap(v2[3:])
factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4)
I4 = SOn(4)
Q1 = I4.retract(v1)
Q2 = I4.retract(v2)
actual = factor.evaluateError(Q1, Q2)
expected = np.zeros((12,))
np.testing.assert_array_almost_equal(actual, expected, decimal=4)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,92 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Unit tests for Linear Factor Graphs.
Author: Frank Dellaert & Gerry Chen
"""
# pylint: disable=invalid-name, no-name-in-module, no-member
from __future__ import print_function
import unittest
import gtsam
import numpy as np
from gtsam import symbol_shorthand_X as X
from gtsam.utils.test_case import GtsamTestCase
def create_graph():
"""Create a basic linear factor graph for testing"""
graph = gtsam.GaussianFactorGraph()
x0 = X(0)
x1 = X(1)
x2 = X(2)
BETWEEN_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1))
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1))
graph.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE)
graph.add(x2, np.eye(1), x1, -np.eye(1), 2*np.ones(1), BETWEEN_NOISE)
graph.add(x0, np.eye(1), np.zeros(1), PRIOR_NOISE)
return graph, (x0, x1, x2)
class TestGaussianFactorGraph(GtsamTestCase):
"""Tests for Gaussian Factor Graphs."""
def test_fg(self):
"""Test solving a linear factor graph"""
graph, X = create_graph()
result = graph.optimize()
EXPECTEDX = [0, 1, 3]
# check solutions
self.assertAlmostEqual(EXPECTEDX[0], result.at(X[0]), delta=1e-8)
self.assertAlmostEqual(EXPECTEDX[1], result.at(X[1]), delta=1e-8)
self.assertAlmostEqual(EXPECTEDX[2], result.at(X[2]), delta=1e-8)
def test_convertNonlinear(self):
"""Test converting a linear factor graph to a nonlinear one"""
graph, X = create_graph()
EXPECTEDM = [1, 2, 3]
# create nonlinear factor graph for marginalization
nfg = gtsam.LinearContainerFactor.ConvertLinearGraph(graph)
optimizer = gtsam.LevenbergMarquardtOptimizer(nfg, gtsam.Values())
nlresult = optimizer.optimizeSafely()
# marginalize
marginals = gtsam.Marginals(nfg, nlresult)
m = [marginals.marginalCovariance(x) for x in X]
# check linear marginalizations
self.assertAlmostEqual(EXPECTEDM[0], m[0], delta=1e-8)
self.assertAlmostEqual(EXPECTEDM[1], m[1], delta=1e-8)
self.assertAlmostEqual(EXPECTEDM[2], m[2], delta=1e-8)
def test_linearMarginalization(self):
"""Marginalize a linear factor graph"""
graph, X = create_graph()
result = graph.optimize()
EXPECTEDM = [1, 2, 3]
# linear factor graph marginalize
marginals = gtsam.Marginals(graph, result)
m = [marginals.marginalCovariance(x) for x in X]
# check linear marginalizations
self.assertAlmostEqual(EXPECTEDM[0], m[0], delta=1e-8)
self.assertAlmostEqual(EXPECTEDM[1], m[1], delta=1e-8)
self.assertAlmostEqual(EXPECTEDM[2], m[2], delta=1e-8)
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,80 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
KarcherMeanFactor unit tests.
Author: Frank Dellaert
"""
# pylint: disable=invalid-name, no-name-in-module, no-member
import unittest
import gtsam
import numpy as np
from gtsam.utils.test_case import GtsamTestCase
KEY = 0
MODEL = gtsam.noiseModel_Unit.Create(3)
def find_Karcher_mean_Rot3(rotations):
"""Find the Karcher mean of given values."""
# Cost function C(R) = \sum PriorFactor(R_i)::error(R)
# No closed form solution.
graph = gtsam.NonlinearFactorGraph()
for R in rotations:
graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
initial = gtsam.Values()
initial.insert(KEY, gtsam.Rot3())
result = gtsam.GaussNewtonOptimizer(graph, initial).optimize()
return result.atRot3(KEY)
# Rot3 version
R = gtsam.Rot3.Expmap(np.array([0.1, 0, 0]))
class TestKarcherMean(GtsamTestCase):
def test_find(self):
# Check that optimizing for Karcher mean (which minimizes Between distance)
# gets correct result.
rotations = {R, R.inverse()}
expected = gtsam.Rot3()
actual = find_Karcher_mean_Rot3(rotations)
self.gtsamAssertEquals(expected, actual)
def test_factor(self):
"""Check that the InnerConstraint factor leaves the mean unchanged."""
# Make a graph with two variables, one between, and one InnerConstraint
# The optimal result should satisfy the between, while moving the other
# variable to make the mean the same as before.
# Mean of R and R' is identity. Let's make a BetweenFactor making R21 =
# R*R*R, i.e. geodesic length is 3 rather than 2.
graph = gtsam.NonlinearFactorGraph()
R12 = R.compose(R.compose(R))
graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL))
keys = gtsam.KeyVector()
keys.push_back(1)
keys.push_back(2)
graph.add(gtsam.KarcherMeanFactorRot3(keys))
initial = gtsam.Values()
initial.insert(1, R.inverse())
initial.insert(2, R)
expected = find_Karcher_mean_Rot3([R, R.inverse()])
result = gtsam.GaussNewtonOptimizer(graph, initial).optimize()
actual = find_Karcher_mean_Rot3(
[result.atRot3(1), result.atRot3(2)])
self.gtsamAssertEquals(expected, actual)
self.gtsamAssertEquals(
R12, result.atRot3(1).between(result.atRot3(2)))
if __name__ == "__main__":
unittest.main()

View File

@ -17,7 +17,8 @@ import unittest
import gtsam
from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer,
GaussNewtonParams, LevenbergMarquardtOptimizer,
LevenbergMarquardtParams, NonlinearFactorGraph, Ordering,
LevenbergMarquardtParams, PCGSolverParameters,
DummyPreconditionerParameters, NonlinearFactorGraph, Ordering,
Point2, PriorFactorPoint2, Values)
from gtsam.utils.test_case import GtsamTestCase
@ -61,6 +62,16 @@ class TestScenario(GtsamTestCase):
fg, initial_values, lmParams).optimize()
self.assertAlmostEqual(0, fg.error(actual2))
# Levenberg-Marquardt
lmParams = LevenbergMarquardtParams.CeresDefaults()
lmParams.setLinearSolverType("ITERATIVE")
cgParams = PCGSolverParameters()
cgParams.setPreconditionerParams(DummyPreconditionerParameters())
lmParams.setIterativeParams(cgParams)
actual2 = LevenbergMarquardtOptimizer(
fg, initial_values, lmParams).optimize()
self.assertAlmostEqual(0, fg.error(actual2))
# Dogleg
dlParams = DoglegParams()
dlParams.setOrdering(ordering)

View File

@ -6,8 +6,9 @@ All Rights Reserved
See LICENSE for the license information
Pose3 unit tests.
Author: Frank Dellaert & Duy Nguyen Ta (Python)
Author: Frank Dellaert, Duy Nguyen Ta
"""
# pylint: disable=no-name-in-module
import math
import unittest

View File

@ -35,5 +35,27 @@ class TestPriorFactor(GtsamTestCase):
values.insert(key, priorVector)
self.assertEqual(factor.error(values), 0)
def test_AddPrior(self):
"""
Test adding prior factors directly to factor graph via the .addPrior method.
"""
# define factor graph
graph = gtsam.NonlinearFactorGraph()
# define and add Pose3 prior
key = 5
priorPose3 = gtsam.Pose3()
model = gtsam.noiseModel_Unit.Create(6)
graph.addPriorPose3(key, priorPose3, model)
self.assertEqual(graph.size(), 1)
# define and add Vector prior
key = 3
priorVector = np.array([0., 0., 0.])
model = gtsam.noiseModel_Unit.Create(3)
graph.addPriorVector(key, priorVector, model)
self.assertEqual(graph.size(), 2)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,59 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
SO4 unit tests.
Author: Frank Dellaert
"""
# pylint: disable=no-name-in-module, import-error
import unittest
import numpy as np
from gtsam import SO4
I4 = SO4()
v1 = np.array([0, 0, 0, .1, 0, 0])
v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03])
Q1 = SO4.Expmap(v1)
Q2 = SO4.Expmap(v2)
class TestSO4(unittest.TestCase):
"""Test selected SO4 methods."""
def test_constructor(self):
"""Construct from matrix."""
matrix = np.eye(4)
so4 = SO4(matrix)
self.assertIsInstance(so4, SO4)
def test_retract(self):
"""Test retraction to manifold."""
v = np.zeros((6,), np.float)
actual = I4.retract(v)
self.assertTrue(actual.equals(I4, 1e-9))
def test_local(self):
"""Check localCoordinates for trivial case."""
v0 = np.zeros((6,), np.float)
actual = I4.localCoordinates(I4)
np.testing.assert_array_almost_equal(actual, v0)
def test_compose(self):
"""Check compose works in subgroup."""
expected = SO4.Expmap(2*v1)
actual = Q1.compose(Q1)
self.assertTrue(actual.equals(expected, 1e-9))
def test_vec(self):
"""Check wrapping of vec."""
expected = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1])
actual = I4.vec()
np.testing.assert_array_equal(actual, expected)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,59 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Dynamic SO(n) unit tests.
Author: Frank Dellaert
"""
# pylint: disable=no-name-in-module, import-error
import unittest
import numpy as np
from gtsam import SOn
I4 = SOn(4)
v1 = np.array([0, 0, 0, .1, 0, 0])
v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03])
Q1 = I4.retract(v1)
Q2 = I4.retract(v2)
class TestSO4(unittest.TestCase):
"""Test selected SOn methods."""
def test_constructor(self):
"""Construct from matrix."""
matrix = np.eye(4)
so4 = SOn.FromMatrix(matrix)
self.assertIsInstance(so4, SOn)
def test_retract(self):
"""Test retraction to manifold."""
v = np.zeros((6,), np.float)
actual = I4.retract(v)
self.assertTrue(actual.equals(I4, 1e-9))
def test_local(self):
"""Check localCoordinates for trivial case."""
v0 = np.zeros((6,), np.float)
actual = I4.localCoordinates(I4)
np.testing.assert_array_almost_equal(actual, v0)
def test_compose(self):
"""Check compose works in subgroup."""
expected = I4.retract(2*v1)
actual = Q1.compose(Q1)
self.assertTrue(actual.equals(expected, 1e-3)) # not exmap so only approximate
def test_vec(self):
"""Check wrapping of vec."""
expected = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1])
actual = I4.vec()
np.testing.assert_array_equal(actual, expected)
if __name__ == "__main__":
unittest.main()

View File

@ -5,7 +5,7 @@ All Rights Reserved
See LICENSE for the license information
SimpleCamera unit tests.
PinholeCameraCal3_S2 unit tests.
Author: Frank Dellaert & Duy Nguyen Ta (Python)
"""
import math
@ -14,7 +14,7 @@ import unittest
import numpy as np
import gtsam
from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, SimpleCamera
from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, PinholeCameraCal3_S2
from gtsam.utils.test_case import GtsamTestCase
K = Cal3_S2(625, 625, 0, 0, 0)
@ -23,14 +23,14 @@ class TestSimpleCamera(GtsamTestCase):
def test_constructor(self):
pose1 = Pose3(Rot3(np.diag([1, -1, -1])), Point3(0, 0, 0.5))
camera = SimpleCamera(pose1, K)
camera = PinholeCameraCal3_S2(pose1, K)
self.gtsamAssertEquals(camera.calibration(), K, 1e-9)
self.gtsamAssertEquals(camera.pose(), pose1, 1e-9)
def test_level2(self):
# Create a level camera, looking in Y-direction
pose2 = Pose2(0.4,0.3,math.pi/2.0)
camera = SimpleCamera.Level(K, pose2, 0.1)
camera = PinholeCameraCal3_S2.Level(K, pose2, 0.1)
# expected
x = Point3(1,0,0)

View File

@ -30,8 +30,10 @@ class TestDSFMap(GtsamTestCase):
pair1 = gtsam.IndexPair(1, 18)
self.assertEqual(key(dsf.find(pair1)), key(pair1))
pair2 = gtsam.IndexPair(2, 2)
# testing the merge feature of dsf
dsf.merge(pair1, pair2)
self.assertTrue(dsf.find(pair1), dsf.find(pair1))
self.assertEqual(key(dsf.find(pair1)), key(dsf.find(pair2)))
if __name__ == '__main__':

View File

@ -0,0 +1,108 @@
"""
Unit tests for optimization that logs to comet.ml.
Author: Jing Wu and Frank Dellaert
"""
# pylint: disable=invalid-name
import sys
if sys.version_info.major >= 3:
from io import StringIO
else:
from cStringIO import StringIO
import unittest
from datetime import datetime
import gtsam
import numpy as np
from gtsam import Rot3
from gtsam.utils.test_case import GtsamTestCase
from gtsam.utils.logging_optimizer import gtsam_optimize
KEY = 0
MODEL = gtsam.noiseModel_Unit.Create(3)
class TestOptimizeComet(GtsamTestCase):
"""Check correct logging to comet.ml."""
def setUp(self):
"""Set up a small Karcher mean optimization example."""
# Grabbed from KarcherMeanFactor unit tests.
R = Rot3.Expmap(np.array([0.1, 0, 0]))
rotations = {R, R.inverse()} # mean is the identity
self.expected = Rot3()
graph = gtsam.NonlinearFactorGraph()
for R in rotations:
graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
initial = gtsam.Values()
initial.insert(KEY, R)
self.params = gtsam.GaussNewtonParams()
self.optimizer = gtsam.GaussNewtonOptimizer(
graph, initial, self.params)
self.lmparams = gtsam.LevenbergMarquardtParams()
self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer(
graph, initial, self.lmparams
)
# setup output capture
self.capturedOutput = StringIO()
sys.stdout = self.capturedOutput
def tearDown(self):
"""Reset print capture."""
sys.stdout = sys.__stdout__
def test_simple_printing(self):
"""Test with a simple hook."""
# Provide a hook that just prints
def hook(_, error):
print(error)
# Only thing we require from optimizer is an iterate method
gtsam_optimize(self.optimizer, self.params, hook)
# Check that optimizing yields the identity.
actual = self.optimizer.values()
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
def test_lm_simple_printing(self):
"""Make sure we are properly terminating LM"""
def hook(_, error):
print(error)
gtsam_optimize(self.lmoptimizer, self.lmparams, hook)
actual = self.lmoptimizer.values()
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
@unittest.skip("Not a test we want run every time, as needs comet.ml account")
def test_comet(self):
"""Test with a comet hook."""
from comet_ml import Experiment
comet = Experiment(project_name="Testing",
auto_output_logging="native")
comet.log_dataset_info(name="Karcher", path="shonan")
comet.add_tag("GaussNewton")
comet.log_parameter("method", "GaussNewton")
time = datetime.now()
comet.set_name("GaussNewton-" + str(time.month) + "/" + str(time.day) + " "
+ str(time.hour)+":"+str(time.minute)+":"+str(time.second))
# I want to do some comet thing here
def hook(optimizer, error):
comet.log_metric("Karcher error",
error, optimizer.iterations())
gtsam_optimize(self.optimizer, self.params, hook)
comet.end()
actual = self.optimizer.values()
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,52 @@
"""
Optimization with logging via a hook.
Author: Jing Wu and Frank Dellaert
"""
# pylint: disable=invalid-name
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
import gtsam
def optimize(optimizer, check_convergence, hook):
""" Given an optimizer and a convergence check, iterate until convergence.
After each iteration, hook(optimizer, error) is called.
After the function, use values and errors to get the result.
Arguments:
optimizer (T): needs an iterate and an error function.
check_convergence: T * float * float -> bool
hook -- hook function to record the error
"""
# the optimizer is created with default values which incur the error below
current_error = optimizer.error()
hook(optimizer, current_error)
# Iterative loop
while True:
# Do next iteration
optimizer.iterate()
new_error = optimizer.error()
hook(optimizer, new_error)
if check_convergence(optimizer, current_error, new_error):
return
current_error = new_error
def gtsam_optimize(optimizer,
params,
hook):
""" Given an optimizer and params, iterate until convergence.
After each iteration, hook(optimizer) is called.
After the function, use values and errors to get the result.
Arguments:
optimizer {NonlinearOptimizer} -- Nonlinear optimizer
params {NonlinearOptimizarParams} -- Nonlinear optimizer parameters
hook -- hook function to record the error
"""
def check_convergence(optimizer, current_error, new_error):
return (optimizer.iterations() >= params.getMaxIterations()) or (
gtsam.checkConvergence(params.getRelativeErrorTol(), params.getAbsoluteErrorTol(), params.getErrorTol(),
current_error, new_error)) or (
isinstance(optimizer, gtsam.LevenbergMarquardtOptimizer) and optimizer.lambda_() > params.getlambdaUpperBound())
optimize(optimizer, check_convergence, hook)
return optimizer.values()

View File

@ -3,10 +3,109 @@
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from mpl_toolkits.mplot3d import Axes3D
import gtsam
def set_axes_equal(fignum):
"""
Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Args:
fignum (int): An integer representing the figure number for Matplotlib.
"""
fig = plt.figure(fignum)
ax = fig.gca(projection='3d')
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),
])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def ellipsoid(xc, yc, zc, rx, ry, rz, n):
"""
Numpy equivalent of Matlab's ellipsoid function.
Args:
xc (double): Center of ellipsoid in X-axis.
yc (double): Center of ellipsoid in Y-axis.
zc (double): Center of ellipsoid in Z-axis.
rx (double): Radius of ellipsoid in X-axis.
ry (double): Radius of ellipsoid in Y-axis.
rz (double): Radius of ellipsoid in Z-axis.
n (int): The granularity of the ellipsoid plotted.
Returns:
tuple[numpy.ndarray]: The points in the x, y and z axes to use for the surface plot.
"""
u = np.linspace(0, 2*np.pi, n+1)
v = np.linspace(0, np.pi, n+1)
x = -rx * np.outer(np.cos(u), np.sin(v)).T
y = -ry * np.outer(np.sin(u), np.sin(v)).T
z = -rz * np.outer(np.ones_like(u), np.cos(v)).T
return x, y, z
def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5):
"""
Plots a Gaussian as an uncertainty ellipse
Based on Maybeck Vol 1, page 366
k=2.296 corresponds to 1 std, 68.26% of all probability
k=11.82 corresponds to 3 std, 99.74% of all probability
Args:
axes (matplotlib.axes.Axes): Matplotlib axes.
origin (gtsam.Point3): The origin in the world frame.
P (numpy.ndarray): The marginal covariance matrix of the 3D point which will be represented as an ellipse.
scale (float): Scaling factor of the radii of the covariance ellipse.
n (int): Defines the granularity of the ellipse. Higher values indicate finer ellipses.
alpha (float): Transparency value for the plotted surface in the range [0, 1].
"""
k = 11.82
U, S, _ = np.linalg.svd(P)
radii = k * np.sqrt(S)
radii = radii * scale
rx, ry, rz = radii
# generate data for "unrotated" ellipsoid
xc, yc, zc = ellipsoid(0, 0, 0, rx, ry, rz, n)
# rotate data with orientation matrix U and center c
data = np.kron(U[:, 0:1], xc) + np.kron(U[:, 1:2], yc) + \
np.kron(U[:, 2:3], zc)
n = data.shape[1]
x = data[0:n, :] + origin[0]
y = data[n:2*n, :] + origin[1]
z = data[2*n:, :] + origin[2]
axes.plot_surface(x, y, z, alpha=alpha, cmap='hot')
def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None):
"""Plot a 2D pose on given axis 'axes' with given 'axis_length'."""
"""
Plot a 2D pose on given axis `axes` with given `axis_length`.
Args:
axes (matplotlib.axes.Axes): Matplotlib axes.
pose (gtsam.Pose2): The pose to be plotted.
axis_length (float): The length of the camera axes.
covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
"""
# get rotation and translation (center)
gRp = pose.rotation().matrix() # rotation from pose to global
t = pose.translation()
@ -35,32 +134,66 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None):
np.rad2deg(angle), fill=False)
axes.add_patch(e1)
def plot_pose2(fignum, pose, axis_length=0.1, covariance=None):
"""Plot a 2D pose on given figure with given 'axis_length'."""
"""
Plot a 2D pose on given figure with given `axis_length`.
Args:
fignum (int): Integer representing the figure number to use for plotting.
pose (gtsam.Pose2): The pose to be plotted.
axis_length (float): The length of the camera axes.
covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
"""
# get figure object
fig = plt.figure(fignum)
axes = fig.gca()
plot_pose2_on_axes(axes, pose, axis_length, covariance)
plot_pose2_on_axes(axes, pose, axis_length=axis_length,
covariance=covariance)
def plot_point3_on_axes(axes, point, linespec):
"""Plot a 3D point on given axis 'axes' with given 'linespec'."""
def plot_point3_on_axes(axes, point, linespec, P=None):
"""
Plot a 3D point on given axis `axes` with given `linespec`.
Args:
axes (matplotlib.axes.Axes): Matplotlib axes.
point (gtsam.Point3): The point to be plotted.
linespec (string): String representing formatting options for Matplotlib.
P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
"""
axes.plot([point.x()], [point.y()], [point.z()], linespec)
if P is not None:
plot_covariance_ellipse_3d(axes, point.vector(), P)
def plot_point3(fignum, point, linespec):
"""Plot a 3D point on given figure with given 'linespec'."""
def plot_point3(fignum, point, linespec, P=None):
"""
Plot a 3D point on given figure with given `linespec`.
Args:
fignum (int): Integer representing the figure number to use for plotting.
point (gtsam.Point3): The point to be plotted.
linespec (string): String representing formatting options for Matplotlib.
P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
"""
fig = plt.figure(fignum)
axes = fig.gca(projection='3d')
plot_point3_on_axes(axes, point, linespec)
plot_point3_on_axes(axes, point, linespec, P)
def plot_3d_points(fignum, values, linespec, marginals=None):
def plot_3d_points(fignum, values, linespec="g*", marginals=None):
"""
Plots the Point3s in 'values', with optional covariances.
Plots the Point3s in `values`, with optional covariances.
Finds all the Point3 objects in the given Values object and plots them.
If a Marginals object is given, this function will also plot marginal
covariance ellipses for each point.
Args:
fignum (int): Integer representing the figure number to use for plotting.
values (gtsam.Values): Values dictionary consisting of points to be plotted.
linespec (string): String representing formatting options for Matplotlib.
covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
"""
keys = values.keys()
@ -68,23 +201,33 @@ def plot_3d_points(fignum, values, linespec, marginals=None):
# Plot points and covariance matrices
for i in range(keys.size()):
try:
p = values.atPoint3(keys.at(i))
# if haveMarginals
# P = marginals.marginalCovariance(key);
# gtsam.plot_point3(p, linespec, P);
# else
plot_point3(fignum, p, linespec)
key = keys.at(i)
point = values.atPoint3(key)
if marginals is not None:
covariance = marginals.marginalCovariance(key)
else:
covariance = None
plot_point3(fignum, point, linespec, covariance)
except RuntimeError:
continue
# I guess it's not a Point3
def plot_pose3_on_axes(axes, pose, axis_length=0.1):
"""Plot a 3D pose on given axis 'axes' with given 'axis_length'."""
def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1):
"""
Plot a 3D pose on given axis `axes` with given `axis_length`.
Args:
axes (matplotlib.axes.Axes): Matplotlib axes.
point (gtsam.Point3): The point to be plotted.
linespec (string): String representing formatting options for Matplotlib.
P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
"""
# get rotation and translation (center)
gRp = pose.rotation().matrix() # rotation from pose to global
t = pose.translation()
origin = np.array([t.x(), t.y(), t.z()])
origin = pose.translation().vector()
# draw the camera axes
x_axis = origin + gRp[:, 0] * axis_length
@ -100,17 +243,83 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1):
axes.plot(line[:, 0], line[:, 1], line[:, 2], 'b-')
# plot the covariance
# TODO (dellaert): make this work
# if (nargin>2) && (~isempty(P))
# pPp = P(4:6,4:6); % covariance matrix in pose coordinate frame
# gPp = gRp*pPp*gRp'; % convert the covariance matrix to global coordinate frame
# gtsam.covarianceEllipse3D(origin,gPp);
# end
if P is not None:
# covariance matrix in pose coordinate frame
pPp = P[3:6, 3:6]
# convert the covariance matrix to global coordinate frame
gPp = gRp @ pPp @ gRp.T
plot_covariance_ellipse_3d(axes, origin, gPp)
def plot_pose3(fignum, pose, axis_length=0.1):
"""Plot a 3D pose on given figure with given 'axis_length'."""
def plot_pose3(fignum, pose, axis_length=0.1, P=None):
"""
Plot a 3D pose on given figure with given `axis_length`.
Args:
fignum (int): Integer representing the figure number to use for plotting.
pose (gtsam.Pose3): 3D pose to be plotted.
linespec (string): String representing formatting options for Matplotlib.
P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation.
"""
# get figure object
fig = plt.figure(fignum)
axes = fig.gca(projection='3d')
plot_pose3_on_axes(axes, pose, axis_length)
plot_pose3_on_axes(axes, pose, P=P,
axis_length=axis_length)
def plot_trajectory(fignum, values, scale=1, marginals=None):
"""
Plot a complete 3D trajectory using poses in `values`.
Args:
fignum (int): Integer representing the figure number to use for plotting.
values (gtsam.Values): Values dict containing the poses.
scale (float): Value to scale the poses by.
marginals (gtsam.Marginals): Marginalized probability values of the estimation.
Used to plot uncertainty bounds.
"""
pose3Values = gtsam.utilities_allPose3s(values)
keys = gtsam.KeyVector(pose3Values.keys())
lastIndex = None
for i in range(keys.size()):
key = keys.at(i)
try:
pose = pose3Values.atPose3(key)
except:
print("Warning: no Pose3 at key: {0}".format(key))
if lastIndex is not None:
lastKey = keys.at(lastIndex)
try:
lastPose = pose3Values.atPose3(lastKey)
except:
print("Warning: no Pose3 at key: {0}".format(lastKey))
pass
if marginals:
covariance = marginals.marginalCovariance(lastKey)
else:
covariance = None
plot_pose3(fignum, lastPose, P=covariance,
axis_length=scale)
lastIndex = i
# Draw final pose
if lastIndex is not None:
lastKey = keys.at(lastIndex)
try:
lastPose = pose3Values.atPose3(lastKey)
if marginals:
covariance = marginals.marginalCovariance(lastKey)
else:
covariance = None
plot_pose3(fignum, lastPose, P=covariance,
axis_length=scale)
except:
pass

View File

@ -1,8 +1,9 @@
from __future__ import print_function
import numpy as np
from math import pi, cos, sin
import gtsam
from gtsam import Cal3_S2, PinholeCameraCal3_S2, Point2, Point3, Pose3
class Options:
@ -10,7 +11,7 @@ class Options:
Options to generate test scenario
"""
def __init__(self, triangle=False, nrCameras=3, K=gtsam.Cal3_S2()):
def __init__(self, triangle=False, nrCameras=3, K=Cal3_S2()):
"""
Options to generate test scenario
@param triangle: generate a triangle scene with 3 points if True, otherwise
@ -27,10 +28,10 @@ class GroundTruth:
Object holding generated ground-truth data
"""
def __init__(self, K=gtsam.Cal3_S2(), nrCameras=3, nrPoints=4):
def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4):
self.K = K
self.cameras = [gtsam.Pose3()] * nrCameras
self.points = [gtsam.Point3()] * nrPoints
self.cameras = [Pose3()] * nrCameras
self.points = [Point3()] * nrPoints
def print_(self, s=""):
print(s)
@ -52,11 +53,11 @@ class Data:
class NoiseModels:
pass
def __init__(self, K=gtsam.Cal3_S2(), nrCameras=3, nrPoints=4):
def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4):
self.K = K
self.Z = [x[:] for x in [[gtsam.Point2()] * nrPoints] * nrCameras]
self.Z = [x[:] for x in [[Point2()] * nrPoints] * nrCameras]
self.J = [x[:] for x in [[0] * nrPoints] * nrCameras]
self.odometry = [gtsam.Pose3()] * nrCameras
self.odometry = [Pose3()] * nrCameras
# Set Noise parameters
self.noiseModels = Data.NoiseModels()
@ -73,7 +74,7 @@ class Data:
def generate_data(options):
""" Generate ground-truth and measurement data. """
K = gtsam.Cal3_S2(500, 500, 0, 640. / 2., 480. / 2.)
K = Cal3_S2(500, 500, 0, 640. / 2., 480. / 2.)
nrPoints = 3 if options.triangle else 8
truth = GroundTruth(K=K, nrCameras=options.nrCameras, nrPoints=nrPoints)
@ -83,26 +84,26 @@ def generate_data(options):
if options.triangle: # Create a triangle target, just 3 points on a plane
r = 10
for j in range(len(truth.points)):
theta = j * 2 * pi / nrPoints
truth.points[j] = gtsam.Point3(r * cos(theta), r * sin(theta), 0)
theta = j * 2 * np.pi / nrPoints
truth.points[j] = Point3(r * np.cos(theta), r * np.sin(theta), 0)
else: # 3D landmarks as vertices of a cube
truth.points = [
gtsam.Point3(10, 10, 10), gtsam.Point3(-10, 10, 10),
gtsam.Point3(-10, -10, 10), gtsam.Point3(10, -10, 10),
gtsam.Point3(10, 10, -10), gtsam.Point3(-10, 10, -10),
gtsam.Point3(-10, -10, -10), gtsam.Point3(10, -10, -10)
Point3(10, 10, 10), Point3(-10, 10, 10),
Point3(-10, -10, 10), Point3(10, -10, 10),
Point3(10, 10, -10), Point3(-10, 10, -10),
Point3(-10, -10, -10), Point3(10, -10, -10)
]
# Create camera cameras on a circle around the triangle
height = 10
r = 40
for i in range(options.nrCameras):
theta = i * 2 * pi / options.nrCameras
t = gtsam.Point3(r * cos(theta), r * sin(theta), height)
truth.cameras[i] = gtsam.SimpleCamera.Lookat(t,
gtsam.Point3(),
gtsam.Point3(0, 0, 1),
truth.K)
theta = i * 2 * np.pi / options.nrCameras
t = Point3(r * np.cos(theta), r * np.sin(theta), height)
truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t,
Point3(),
Point3(0, 0, 1),
truth.K)
# Create measurements
for j in range(nrPoints):
# All landmarks seen in every frame

View File

@ -4,11 +4,11 @@ include(GtsamCythonWrap)
# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
# and eigency's cython pxd headers can be found when cythonizing gtsam
file(COPY "." DESTINATION ".")
set(OUTPUT_DIR "${PROJECT_BINARY_DIR}/cython/gtsam_eigency")
set(OUTPUT_DIR "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency")
set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
# This is to make the build/cython/gtsam_eigency folder a python package
configure_file(__init__.py.in ${PROJECT_BINARY_DIR}/cython/gtsam_eigency/__init__.py)
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
# include eigency headers
include_directories(${EIGENCY_INCLUDE_DIR})
@ -16,8 +16,8 @@ include_directories(${EIGENCY_INCLUDE_DIR})
# Cythonize and build eigency
message(STATUS "Cythonize and build eigency")
# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
# a part of the gtsam_eigency package and generate the function call import_gtsam_igency__conversions()
# in conversions_api.h correctly!!!
# a part of the gtsam_eigency package and generate the function call import_gtsam_eigency__conversions()
# in conversions_api.h correctly!
cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
"${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
@ -37,13 +37,6 @@ add_dependencies(cythonize_eigency_core cythonize_eigency_conversions)
add_custom_target(cythonize_eigency)
add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
# install
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}"
PATTERN "CMakeLists.txt" EXCLUDE
PATTERN "__init__.py.in" EXCLUDE)
install(TARGETS cythonize_eigency_core cythonize_eigency_conversions
DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency")
install(FILES ${OUTPUT_DIR}/conversions_api.h DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
install(FILES ${OUTPUT_DIR}/__init__.py DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
if(TARGET ${python_install_target})
add_dependencies(${python_install_target} cythonize_eigency)
endif()

View File

@ -0,0 +1,128 @@
"""
GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
Track a moving object "Time of Arrival" measurements at 4 microphones.
Author: Frank Dellaert
"""
# pylint: disable=invalid-name, no-name-in-module
from gtsam import (LevenbergMarquardtOptimizer, LevenbergMarquardtParams,
NonlinearFactorGraph, Point3, Values, noiseModel_Isotropic)
from gtsam_unstable import Event, TimeOfArrival, TOAFactor
# units
MS = 1e-3
CM = 1e-2
# Instantiate functor with speed of sound value
TIME_OF_ARRIVAL = TimeOfArrival(330)
def define_microphones():
"""Create microphones."""
height = 0.5
microphones = []
microphones.append(Point3(0, 0, height))
microphones.append(Point3(403 * CM, 0, height))
microphones.append(Point3(403 * CM, 403 * CM, height))
microphones.append(Point3(0, 403 * CM, 2 * height))
return microphones
def create_trajectory(n):
"""Create ground truth trajectory."""
trajectory = []
timeOfEvent = 10
# simulate emitting a sound every second while moving on straight line
for key in range(n):
trajectory.append(
Event(timeOfEvent, 245 * CM + key * 1.0, 201.5 * CM, (212 - 45) * CM))
timeOfEvent += 1
return trajectory
def simulate_one_toa(microphones, event):
"""Simulate time-of-arrival measurements for a single event."""
return [TIME_OF_ARRIVAL.measure(event, microphones[i])
for i in range(len(microphones))]
def simulate_toa(microphones, trajectory):
"""Simulate time-of-arrival measurements for an entire trajectory."""
return [simulate_one_toa(microphones, event)
for event in trajectory]
def create_graph(microphones, simulatedTOA):
"""Create factor graph."""
graph = NonlinearFactorGraph()
# Create a noise model for the TOA error
model = noiseModel_Isotropic.Sigma(1, 0.5 * MS)
K = len(microphones)
key = 0
for toa in simulatedTOA:
for i in range(K):
factor = TOAFactor(key, microphones[i], toa[i], model)
graph.push_back(factor)
key += 1
return graph
def create_initial_estimate(n):
"""Create initial estimate for n events."""
initial = Values()
zero = Event()
for key in range(n):
TOAFactor.InsertEvent(key, zero, initial)
return initial
def toa_example():
"""Run example with 4 microphones and 5 events in a straight line."""
# Create microphones
microphones = define_microphones()
K = len(microphones)
for i in range(K):
print("mic {} = {}".format(i, microphones[i]))
# Create a ground truth trajectory
n = 5
groundTruth = create_trajectory(n)
for event in groundTruth:
print(event)
# Simulate time-of-arrival measurements
simulatedTOA = simulate_toa(microphones, groundTruth)
for key in range(n):
for i in range(K):
print("z_{}{} = {} ms".format(key, i, simulatedTOA[key][i] / MS))
# create factor graph
graph = create_graph(microphones, simulatedTOA)
print(graph.at(0))
# Create initial estimate
initial_estimate = create_initial_estimate(n)
print(initial_estimate)
# Optimize using Levenberg-Marquardt optimization.
params = LevenbergMarquardtParams()
params.setAbsoluteErrorTol(1e-10)
params.setVerbosityLM("SUMMARY")
optimizer = LevenbergMarquardtOptimizer(graph, initial_estimate, params)
result = optimizer.optimize()
print("Final Result:\n", result)
if __name__ == '__main__':
toa_example()
print("Example complete")

View File

@ -1,3 +1,3 @@
Cython>=0.25.2
backports_abc>=0.5
numpy>=1.12.0
numpy>=1.11.0

View File

@ -5,17 +5,24 @@ try:
except ImportError:
from distutils.core import setup, find_packages
if 'SETUP_PY_NO_CHECK' not in os.environ:
script_path = os.path.abspath(os.path.realpath(__file__))
install_path = os.path.abspath(os.path.realpath(os.path.join('${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}', 'setup.py')))
if script_path != install_path:
print('setup.py is being run from an unexpected location: "{}"'.format(script_path))
print('please run `make install` and run the script from there')
sys.exit(1)
packages = find_packages()
package_data = {
package:
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')]
for package in packages
}
cython_install_requirements = open("${CYTHON_INSTALL_REQUIREMENTS_FILE}").readlines()
install_requires = [line.strip() \
for line in cython_install_requirements \
if len(line.strip()) > 0 and not line.strip().startswith('#')
]
# Cleaner to read in the contents rather than copy them over.
readme_contents = open("${PROJECT_SOURCE_DIR}/README.md").read()
setup(
name='gtsam',
description='Georgia Tech Smoothing And Mapping library',
@ -25,7 +32,9 @@ setup(
author_email='frank.dellaert@gtsam.org',
license='Simplified BSD license',
keywords='slam sam robotics localization mapping optimization',
long_description='''${README_CONTENTS}''',
long_description=readme_contents,
long_description_content_type='text/markdown',
python_requires='>=2.7',
# https://pypi.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
@ -41,11 +50,6 @@ setup(
],
packages=packages,
package_data={package:
[f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.dll')]
for package in packages
},
install_requires=[line.strip() for line in '''
${CYTHON_INSTALL_REQUIREMENTS}
'''.splitlines() if len(line.strip()) > 0 and not line.strip().startswith('#')]
package_data=package_data,
install_requires=install_requires
)

12
debian/README.md vendored
View File

@ -1,12 +0,0 @@
# How to build a GTSAM debian package
To use the ``debuild`` command, install the ``devscripts`` package
sudo apt install devscripts
Change into the gtsam directory, then run:
debuild -us -uc -j4
Adjust the ``-j4`` depending on how many CPUs you want to build on in
parallel.

5
debian/changelog vendored
View File

@ -1,5 +0,0 @@
gtsam (4.0.0-1berndpfrommer) bionic; urgency=medium
* initial release
-- Bernd Pfrommer <bernd.pfrommer@gmail.com> Wed, 18 Jul 2018 20:36:44 -0400

1
debian/compat vendored
View File

@ -1 +0,0 @@
9

15
debian/control vendored
View File

@ -1,15 +0,0 @@
Source: gtsam
Section: libs
Priority: optional
Maintainer: Frank Dellaert <frank@cc.gatech.edu>
Uploaders: Jose Luis Blanco Claraco <joseluisblancoc@gmail.com>, Bernd Pfrommer <bernd.pfrommer@gmail.com>
Build-Depends: cmake, libboost-all-dev (>= 1.58), libeigen3-dev, libtbb-dev, debhelper (>=9)
Standards-Version: 3.9.7
Homepage: https://github.com/borglab/gtsam
Vcs-Browser: https://github.com/borglab/gtsam
Package: libgtsam-dev
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
Description: Georgia Tech Smoothing and Mapping Library
gtsam: Georgia Tech Smoothing and Mapping library for SLAM type applications

15
debian/copyright vendored
View File

@ -1,15 +0,0 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: gtsam
Source: https://bitbucket.org/gtborg/gtsam.git
Files: *
Copyright: 2017, Frank Dellaert
License: BSD
Files: gtsam/3rdparty/CCOLAMD/*
Copyright: 2005-2011, Univ. of Florida. Authors: Timothy A. Davis, Sivasankaran Rajamanickam, and Stefan Larimore. Closely based on COLAMD by Davis, Stefan Larimore, in collaboration with Esmond Ng, and John Gilbert. http://www.cise.ufl.edu/research/sparse
License: GNU LESSER GENERAL PUBLIC LICENSE
Files: gtsam/3rdparty/Eigen/*
Copyright: 2017, Multiple Authors
License: MPL2

View File

25
debian/rules vendored
View File

@ -1,25 +0,0 @@
#!/usr/bin/make -f
# See debhelper(7) (uncomment to enable)
# output every command that modifies files on the build system.
export DH_VERBOSE = 1
# see FEATURE AREAS in dpkg-buildflags(1)
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
# see ENVIRONMENT in dpkg-buildflags(1)
# package maintainers to append CFLAGS
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
# package maintainers to append LDFLAGS
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
%:
dh $@ --parallel
# dh_make generated override targets
# This is example for Cmake (See https://bugs.debian.org/641051 )
override_dh_auto_configure:
dh_auto_configure -- -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_WRAP=OFF -DGTSAM_BUILD_DOCS=OFF -DGTSAM_INSTALL_CPPUNITLITE=OFF -DGTSAM_INSTALL_GEOGRAPHICLIB=OFF -DGTSAM_BUILD_TYPE_POSTFIXES=OFF

View File

@ -1 +0,0 @@
3.0 (quilt)

View File

@ -5,7 +5,7 @@ NonlinearFactorGraph graph;
Pose2 priorMean(0.0, 0.0, 0.0);
noiseModel::Diagonal::shared_ptr priorNoise =
noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.add(PriorFactor<Pose2>(1, priorMean, priorNoise));
graph.addPrior(1, priorMean, priorNoise);
// Add two odometry factors
Pose2 odometry(2.0, 0.0, 0.0);

View File

@ -1,7 +1,7 @@
NonlinearFactorGraph graph;
noiseModel::Diagonal::shared_ptr priorNoise =
noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.add(PriorFactor<Pose2>(1, Pose2(0, 0, 0), priorNoise));
graph.addPrior(1, Pose2(0, 0, 0), priorNoise);
// Add odometry factors
noiseModel::Diagonal::shared_ptr model =

View File

@ -2291,15 +2291,11 @@ uncalibration
used in the residual).
\end_layout
\begin_layout Standard
\begin_inset Note Note
status collapsed
\begin_layout Section
Noise models of prior factors
\end_layout
\begin_layout Plain Layout
\begin_layout Standard
The simplest way to describe noise models is by an example.
Let's take a prior factor on a 3D pose
\begin_inset Formula $x\in\SE 3$
@ -2353,7 +2349,7 @@ e\left(x\right)=\norm{h\left(x\right)}_{\Sigma}^{2}=h\left(x\right)^{\t}\Sigma^{
useful answer out quickly ]
\end_layout
\begin_layout Plain Layout
\begin_layout Standard
The density induced by a noise model on the prior factor is Gaussian in
the tangent space about the linearization point.
Suppose that the pose is linearized at
@ -2431,7 +2427,7 @@ Here we see that the update
.
\end_layout
\begin_layout Plain Layout
\begin_layout Standard
This means that to draw random pose samples, we actually draw random samples
of
\begin_inset Formula $\delta x$
@ -2456,7 +2452,7 @@ This means that to draw random pose samples, we actually draw random samples
Noise models of between factors
\end_layout
\begin_layout Plain Layout
\begin_layout Standard
The noise model of a BetweenFactor is a bit more complicated.
The unwhitened error is
\begin_inset Formula
@ -2516,11 +2512,6 @@ e\left(\delta x_{1}\right) & \approx\norm{\log\left(z^{-1}\left(x_{1}\exp\delta
\end_inset
\end_layout
\end_inset
\end_layout
\end_body

Binary file not shown.

View File

@ -1,7 +1,9 @@
#LyX 2.1 created this file. For more info see http://www.lyx.org/
\lyxformat 474
#LyX 2.3 created this file. For more info see http://www.lyx.org/
\lyxformat 544
\begin_document
\begin_header
\save_transient_properties true
\origin unavailable
\textclass article
\use_default_options false
\begin_modules
@ -14,16 +16,18 @@ theorems-ams-bytype
\language_package default
\inputencoding auto
\fontencoding global
\font_roman times
\font_sans default
\font_typewriter default
\font_math auto
\font_roman "times" "default"
\font_sans "default" "default"
\font_typewriter "default" "default"
\font_math "auto" "auto"
\font_default_family rmdefault
\use_non_tex_fonts false
\font_sc false
\font_osf false
\font_sf_scale 100
\font_tt_scale 100
\font_sf_scale 100 100
\font_tt_scale 100 100
\use_microtype false
\use_dash_ligatures true
\graphics default
\default_output_format default
\output_sync 0
@ -53,6 +57,7 @@ theorems-ams-bytype
\suppress_date false
\justification true
\use_refstyle 0
\use_minted 0
\index Index
\shortcut idx
\color #008000
@ -65,7 +70,10 @@ theorems-ams-bytype
\tocdepth 3
\paragraph_separation indent
\paragraph_indentation default
\quotes_language english
\is_math_indent 0
\math_numbering_side default
\quotes_style english
\dynamic_quotes 0
\papercolumns 1
\papersides 1
\paperpagestyle default
@ -98,6 +106,11 @@ width "100col%"
special "none"
height "1in"
height_special "totalheight"
thickness "0.4pt"
separation "3pt"
shadowsize "4pt"
framecolor "black"
backgroundcolor "none"
status collapsed
\begin_layout Plain Layout
@ -654,6 +667,7 @@ reference "eq:LocalBehavior"
\begin_inset CommandInset citation
LatexCommand cite
key "Spivak65book"
literal "true"
\end_inset
@ -934,6 +948,7 @@ See
\begin_inset CommandInset citation
LatexCommand cite
key "Spivak65book"
literal "true"
\end_inset
@ -1025,6 +1040,7 @@ See
\begin_inset CommandInset citation
LatexCommand cite
key "Spivak65book"
literal "true"
\end_inset
@ -2209,6 +2225,7 @@ instantaneous velocity
LatexCommand cite
after "page 51 for rotations, page 419 for SE(3)"
key "Murray94book"
literal "true"
\end_inset
@ -2965,7 +2982,7 @@ B^{T} & I_{3}\end{array}\right]
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "sub:Pushforward-of-Between"
name "subsec:Pushforward-of-Between"
\end_inset
@ -3419,6 +3436,7 @@ A retraction
\begin_inset CommandInset citation
LatexCommand cite
key "Absil07book"
literal "true"
\end_inset
@ -3873,7 +3891,7 @@ BetweenFactor
, derived in Section
\begin_inset CommandInset ref
LatexCommand ref
reference "sub:Pushforward-of-Between"
reference "subsec:Pushforward-of-Between"
\end_inset
@ -4430,7 +4448,7 @@ In the case of
\begin_inset Formula $\SOthree$
\end_inset
the vector space is
the vector space is
\begin_inset Formula $\Rthree$
\end_inset
@ -4502,7 +4520,7 @@ reference "Th:InverseAction"
\begin_layout Subsection
\begin_inset CommandInset label
LatexCommand label
name "sub:3DAngularVelocities"
name "subsec:3DAngularVelocities"
\end_inset
@ -4695,6 +4713,7 @@ Absil
LatexCommand cite
after "page 58"
key "Absil07book"
literal "true"
\end_inset
@ -5395,6 +5414,7 @@ While not a Lie group, we can define an exponential map, which is given
\begin_inset CommandInset citation
LatexCommand cite
key "Ma01ijcv"
literal "true"
\end_inset
@ -5402,6 +5422,7 @@ key "Ma01ijcv"
\begin_inset CommandInset href
LatexCommand href
name "http://stat.fsu.edu/~anuj/CVPR_Tutorial/Part2.pdf"
literal "false"
\end_inset
@ -5605,6 +5626,7 @@ The exponential map uses
\begin_inset CommandInset citation
LatexCommand cite
key "Absil07book"
literal "true"
\end_inset
@ -6293,7 +6315,7 @@ d^{c}=R_{w}^{c}\left(d^{w}+(t^{w}v^{w})v^{w}-t^{w}\right)
\end_layout
\begin_layout Section
Line3 (Ocaml)
Line3
\end_layout
\begin_layout Standard
@ -6345,6 +6367,14 @@ R'=R(I+\Omega)
\end_inset
\end_layout
\begin_layout Subsection
Projecting Line3
\end_layout
\begin_layout Standard
Projecting a line to 2D can be done easily, as both
\begin_inset Formula $v$
\end_inset
@ -6430,13 +6460,21 @@ or the
\end_layout
\begin_layout Subsection
Action of
\begin_inset Formula $\SEthree$
\end_inset
on the line
\end_layout
\begin_layout Standard
Transforming a 3D line
\begin_inset Formula $(R,(a,b))$
\end_inset
from a world coordinate frame to a camera frame
\begin_inset Formula $(R_{w}^{c},t^{w})$
\begin_inset Formula $T_{c}^{w}=(R_{c}^{w},t^{w})$
\end_inset
is done by
@ -6466,17 +6504,115 @@ b'=b-R_{2}^{T}t^{w}
\end_inset
Again, we need to redo the derivatives, as R is incremented from the right.
The first argument is incremented from the left, but the result is incremented
on the right:
where
\begin_inset Formula $R_{1}$
\end_inset
and
\begin_inset Formula $R_{2}$
\end_inset
are the columns of
\begin_inset Formula $R$
\end_inset
, as before.
\end_layout
\begin_layout Standard
To find the derivatives, the transformation of a line
\begin_inset Formula $l^{w}=(R,a,b)$
\end_inset
from world coordinates to a camera coordinate frame
\begin_inset Formula $T_{c}^{w}$
\end_inset
, specified in world coordinates, can be written as a function
\begin_inset Formula $f:\SEthree\times L\rightarrow L$
\end_inset
, as given above, i.e.,
\begin_inset Formula
\begin{eqnarray*}
R'(I+\Omega')=(AB)(I+\Omega') & = & (I+\Skew{S\omega})AB\\
I+\Omega' & = & (AB)^{T}(I+\Skew{S\omega})(AB)\\
\Omega' & = & R'^{T}\Skew{S\omega}R'\\
\Omega' & = & \Skew{R'^{T}S\omega}\\
\omega' & = & R'^{T}S\omega
\end{eqnarray*}
\[
f(T_{c}^{w},l^{w})=\left(\left(R_{c}^{w}\right)^{T}R,a-R_{1}^{T}t^{w},b-R_{2}^{T}t^{w}\right).
\]
\end_inset
Let us find the Jacobian
\begin_inset Formula $J_{1}$
\end_inset
of
\begin_inset Formula $f$
\end_inset
with respect to the first argument
\begin_inset Formula $T_{c}^{w}$
\end_inset
, which should obey
\begin_inset Formula
\begin{align*}
f(T_{c}^{w}e^{\xihat},l^{w}) & \approx f(T_{c}^{w},l^{w})+J_{1}\xi
\end{align*}
\end_inset
Note that
\begin_inset Formula
\[
T_{c}^{w}e^{\xihat}\approx\left[\begin{array}{cc}
R_{c}^{w}\left(I_{3}+\Skew{\omega}\right) & t^{w}+R_{c}^{w}v\\
0 & 1
\end{array}\right]
\]
\end_inset
Let's write this out separately for each of
\begin_inset Formula $R,a,b$
\end_inset
:
\begin_inset Formula
\begin{align*}
\left(R_{c}^{w}\left(I_{3}+\Skew{\omega}\right)\right)^{T}R & \approx\left(R_{c}^{w}\right)^{T}R(I+\left[J_{R\omega}\omega\right]_{\times})\\
a-R_{1}^{T}\left(t^{w}+R_{c}^{w}v\right) & \approx a-R_{1}^{T}t^{w}+J_{av}v\\
b-R_{2}^{T}\left(t^{w}+R_{c}^{w}v\right) & \approx b-R_{2}^{T}t^{w}+J_{bv}v
\end{align*}
\end_inset
Simplifying, we get:
\begin_inset Formula
\begin{align*}
-\Skew{\omega}R' & \approx R'\left[J_{R\omega}\omega\right]_{\times}\\
-R_{1}^{T}R_{c}^{w} & \approx J_{av}\\
-R_{2}^{T}R_{c}^{w} & \approx J_{bv}
\end{align*}
\end_inset
which gives the expressions for
\begin_inset Formula $J_{av}$
\end_inset
and
\begin_inset Formula $J_{bv}$
\end_inset
.
The top line can be further simplified:
\begin_inset Formula
\begin{align*}
-\Skew{\omega}R' & \approx R'\left[J_{R\omega}\omega\right]_{\times}\\
-R'^{T}\Skew{\omega}R' & \approx\left[J_{R\omega}\omega\right]_{\times}\\
-\Skew{R'^{T}\omega} & \approx\left[J_{R\omega}\omega\right]_{\times}\\
-R'^{T} & \approx J_{R\omega}
\end{align*}
\end_inset
@ -6687,6 +6823,7 @@ Spivak
\begin_inset CommandInset citation
LatexCommand cite
key "Spivak65book"
literal "true"
\end_inset
@ -6795,6 +6932,7 @@ The following is adapted from Appendix A in
\begin_inset CommandInset citation
LatexCommand cite
key "Murray94book"
literal "true"
\end_inset
@ -6924,6 +7062,7 @@ might be
LatexCommand cite
after "page 45"
key "Hall00book"
literal "true"
\end_inset

Binary file not shown.

BIN
doc/robust.pdf Normal file

Binary file not shown.

21
docker/README.md Normal file
View File

@ -0,0 +1,21 @@
# Instructions
Build all docker images, in order:
```bash
(cd ubuntu-boost-tbb && ./build.sh)
(cd ubuntu-gtsam && ./build.sh)
(cd ubuntu-gtsam-python && ./build.sh)
(cd ubuntu-gtsam-python-vnc && ./build.sh)
```
Then launch with:
docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic
Then open a remote VNC X client, for example:
sudo apt-get install tigervnc-viewer
xtigervncviewer :5900

View File

@ -1,18 +0,0 @@
# Get the base Ubuntu image from Docker Hub
FROM ubuntu:bionic
# Update apps on the base image
RUN apt-get -y update && apt-get install -y
# Install C++
RUN apt-get -y install build-essential
# Install boost and cmake
RUN apt-get -y install libboost-all-dev cmake
# Install TBB
RUN apt-get -y install libtbb-dev
# Install latest Eigen
RUN apt-get install -y libeigen3-dev

View File

@ -0,0 +1,19 @@
# Basic Ubuntu 18.04 image with Boost and TBB installed. To be used for building further downstream packages.
# Get the base Ubuntu image from Docker Hub
FROM ubuntu:bionic
# Disable GUI prompts
ENV DEBIAN_FRONTEND noninteractive
# Update apps on the base image
RUN apt-get -y update && apt-get -y install
# Install C++
RUN apt-get -y install build-essential apt-utils
# Install boost and cmake
RUN apt-get -y install libboost-all-dev cmake
# Install TBB
RUN apt-get -y install libtbb-dev

View File

@ -0,0 +1,3 @@
# Build command for Docker image
# TODO(dellaert): use docker compose and/or cmake
docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic .

View File

@ -0,0 +1,20 @@
# This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction.
# Get the base Ubuntu/GTSAM image from Docker Hub
FROM dellaert/ubuntu-gtsam-python:bionic
# Things needed to get a python GUI
ENV DEBIAN_FRONTEND noninteractive
RUN apt install -y python-tk
RUN python3 -m pip install matplotlib
# Install a VNC X-server, Frame buffer, and windows manager
RUN apt install -y x11vnc xvfb fluxbox
# Finally, install wmctrl needed for bootstrap script
RUN apt install -y wmctrl
# Copy bootstrap script and make sure it runs
COPY bootstrap.sh /
CMD '/bootstrap.sh'

View File

@ -0,0 +1,111 @@
#!/bin/bash
# Based on: http://www.richud.com/wiki/Ubuntu_Fluxbox_GUI_with_x11vnc_and_Xvfb
main() {
log_i "Starting xvfb virtual display..."
launch_xvfb
log_i "Starting window manager..."
launch_window_manager
log_i "Starting VNC server..."
run_vnc_server
}
launch_xvfb() {
local xvfbLockFilePath="/tmp/.X1-lock"
if [ -f "${xvfbLockFilePath}" ]
then
log_i "Removing xvfb lock file '${xvfbLockFilePath}'..."
if ! rm -v "${xvfbLockFilePath}"
then
log_e "Failed to remove xvfb lock file"
exit 1
fi
fi
# Set defaults if the user did not specify envs.
export DISPLAY=${XVFB_DISPLAY:-:1}
local screen=${XVFB_SCREEN:-0}
local resolution=${XVFB_RESOLUTION:-1280x960x24}
local timeout=${XVFB_TIMEOUT:-5}
# Start and wait for either Xvfb to be fully up or we hit the timeout.
Xvfb ${DISPLAY} -screen ${screen} ${resolution} &
local loopCount=0
until xdpyinfo -display ${DISPLAY} > /dev/null 2>&1
do
loopCount=$((loopCount+1))
sleep 1
if [ ${loopCount} -gt ${timeout} ]
then
log_e "xvfb failed to start"
exit 1
fi
done
}
launch_window_manager() {
local timeout=${XVFB_TIMEOUT:-5}
# Start and wait for either fluxbox to be fully up or we hit the timeout.
fluxbox &
local loopCount=0
until wmctrl -m > /dev/null 2>&1
do
loopCount=$((loopCount+1))
sleep 1
if [ ${loopCount} -gt ${timeout} ]
then
log_e "fluxbox failed to start"
exit 1
fi
done
}
run_vnc_server() {
local passwordArgument='-nopw'
if [ -n "${VNC_SERVER_PASSWORD}" ]
then
local passwordFilePath="${HOME}/.x11vnc.pass"
if ! x11vnc -storepasswd "${VNC_SERVER_PASSWORD}" "${passwordFilePath}"
then
log_e "Failed to store x11vnc password"
exit 1
fi
passwordArgument=-"-rfbauth ${passwordFilePath}"
log_i "The VNC server will ask for a password"
else
log_w "The VNC server will NOT ask for a password"
fi
x11vnc -ncache 10 -ncache_cr -display ${DISPLAY} -forever ${passwordArgument} &
wait $!
}
log_i() {
log "[INFO] ${@}"
}
log_w() {
log "[WARN] ${@}"
}
log_e() {
log "[ERROR] ${@}"
}
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] ${@}"
}
control_c() {
echo ""
exit
}
trap control_c SIGINT SIGTERM SIGHUP
main
exit

View File

@ -0,0 +1,4 @@
# Build command for Docker image
# TODO(dellaert): use docker compose and/or cmake
# Needs to be run in docker/ubuntu-gtsam-python-vnc directory
docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic .

View File

@ -0,0 +1,5 @@
# After running this script, connect VNC client to 0.0.0.0:5900
docker run -it \
--workdir="/usr/src/gtsam" \
-p 5900:5900 \
dellaert/ubuntu-gtsam-python-vnc:bionic

View File

@ -0,0 +1,31 @@
# GTSAM Ubuntu image with Python wrapper support.
# Get the base Ubuntu/GTSAM image from Docker Hub
FROM dellaert/ubuntu-gtsam:bionic
# Install pip
RUN apt-get install -y python3-pip python3-dev
# Install python wrapper requirements
RUN python3 -m pip install -U -r /usr/src/gtsam/cython/requirements.txt
# Run cmake again, now with cython toolbox on
WORKDIR /usr/src/gtsam/build
RUN cmake \
-DCMAKE_BUILD_TYPE=Release \
-DGTSAM_WITH_EIGEN_MKL=OFF \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \
-DGTSAM_BUILD_TESTS=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \
-DGTSAM_PYTHON_VERSION=3\
..
# Build again, as ubuntu-gtsam image cleaned
RUN make -j4 install && make clean
# Needed to run python wrapper:
RUN echo 'export PYTHONPATH=/usr/local/cython/:$PYTHONPATH' >> /root/.bashrc
# Run bash
CMD ["bash"]

View File

@ -0,0 +1,3 @@
# Build command for Docker image
# TODO(dellaert): use docker compose and/or cmake
docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic .

View File

@ -0,0 +1,36 @@
# Ubuntu image with GTSAM installed. Configured with Boost and TBB support.
# Get the base Ubuntu image from Docker Hub
FROM dellaert/ubuntu-boost-tbb:bionic
# Install git
RUN apt-get update && \
apt-get install -y git
# Install compiler
RUN apt-get install -y build-essential
# Clone GTSAM (develop branch)
WORKDIR /usr/src/
RUN git clone --single-branch --branch develop https://github.com/borglab/gtsam.git
# Change to build directory. Will be created automatically.
WORKDIR /usr/src/gtsam/build
# Run cmake
RUN cmake \
-DCMAKE_BUILD_TYPE=Release \
-DGTSAM_WITH_EIGEN_MKL=OFF \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_BUILD_TIMING_ALWAYS=OFF \
-DGTSAM_BUILD_TESTS=OFF \
-DGTSAM_INSTALL_CYTHON_TOOLBOX=OFF \
..
# Build
RUN make -j4 install && make clean
# Needed to link with GTSAM
RUN echo 'export LD_LIBRARY_PATH=/usr/local/lib:LD_LIBRARY_PATH' >> /root/.bashrc
# Run bash
CMD ["bash"]

3
docker/ubuntu-gtsam/build.sh Executable file
View File

@ -0,0 +1,3 @@
# Build command for Docker image
# TODO(dellaert): use docker compose and/or cmake
docker build --no-cache -t dellaert/ubuntu-gtsam:bionic .

View File

@ -1,7 +1,4 @@
set (excluded_examples
DiscreteBayesNet_FG.cpp
UGM_chain.cpp
UGM_small.cpp
elaboratePoint2KalmanFilter.cpp
)

View File

@ -18,7 +18,8 @@
#include <gtsam/inference/Symbol.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/geometry/SimpleCamera.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/geometry/Cal3_S2.h>
#include <boost/make_shared.hpp>
using namespace gtsam;
@ -47,7 +48,7 @@ public:
/// evaluate the error
virtual Vector evaluateError(const Pose3& pose, boost::optional<Matrix&> H =
boost::none) const {
SimpleCamera camera(pose, *K_);
PinholeCamera<Cal3_S2> camera(pose, *K_);
return camera.project(P_, H, boost::none, boost::none) - p_;
}
};

View File

@ -31,19 +31,19 @@ void createExampleBALFile(const string& filename, const vector<Point3>& P,
Cal3Bundler()) {
// Class that will gather all data
SfM_data data;
SfmData data;
// Create two cameras
Rot3 aRb = Rot3::Yaw(M_PI_2);
Point3 aTb(0.1, 0, 0);
Pose3 identity, aPb(aRb, aTb);
data.cameras.push_back(SfM_Camera(pose1, K));
data.cameras.push_back(SfM_Camera(pose2, K));
data.cameras.push_back(SfmCamera(pose1, K));
data.cameras.push_back(SfmCamera(pose2, K));
for(const Point3& p: P) {
// Create the track
SfM_Track track;
SfmTrack track;
track.p = p;
track.r = 1;
track.g = 1;

6
examples/Data/Klaus3.g2o Normal file
View File

@ -0,0 +1,6 @@
VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109
VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809
VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403
EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0

View File

@ -0,0 +1,9 @@
VERTEX_SE3:QUAT 0 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1.000000
VERTEX_SE3:QUAT 1 1.001367 0.015390 0.004948 0.190253 0.283162 -0.392318 0.854230
VERTEX_SE3:QUAT 2 1.993500 0.023275 0.003793 -0.351729 -0.597838 0.584174 0.421446
VERTEX_SE3:QUAT 3 2.004291 1.024305 0.018047 0.331798 -0.200659 0.919323 0.067024
VERTEX_SE3:QUAT 4 0.999908 1.055073 0.020212 -0.035697 -0.462490 0.445933 0.765488
EDGE_SE3:QUAT 0 1 1.001367 0.015390 0.004948 0.190253 0.283162 -0.392318 0.854230 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000
EDGE_SE3:QUAT 1 2 0.523923 0.776654 0.326659 0.311512 0.656877 -0.678505 0.105373 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000
EDGE_SE3:QUAT 2 3 0.910927 0.055169 -0.411761 0.595795 -0.561677 0.079353 0.568551 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000
EDGE_SE3:QUAT 3 4 0.775288 0.228798 -0.596923 -0.592077 0.303380 -0.513226 0.542221 10000.000000 0.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 0.000000 10000.000000 0.000000 0.000000 10000.000000 0.000000 10000.000000

11
examples/Data/toyExample.g2o Executable file
View File

@ -0,0 +1,11 @@
VERTEX_SE3:QUAT 0 0 0 0 0 0 0 1
VERTEX_SE3:QUAT 1 0 0 0 0 0 0 1
VERTEX_SE3:QUAT 2 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
VERTEX_SE3:QUAT 3 0 0 0 -0.00499994 -0.00499994 -0.00499994 0.999963
VERTEX_SE3:QUAT 4 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
EDGE_SE3:QUAT 1 2 1 2 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
EDGE_SE3:QUAT 2 3 -3.26795e-07 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
EDGE_SE3:QUAT 3 4 1 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
EDGE_SE3:QUAT 3 1 6.9282e-07 2 0 0 0 1 1.73205e-07 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
EDGE_SE3:QUAT 1 4 -1 1 0 0 0 -0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
EDGE_SE3:QUAT 0 1 0 0 0 0 0 0 1 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100

View File

@ -0,0 +1,83 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file DiscreteBayesNetExample.cpp
* @brief Discrete Bayes Net example with famous Asia Bayes Network
* @author Frank Dellaert
* @date JULY 10, 2020
*/
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/discrete/DiscreteMarginals.h>
#include <gtsam/inference/BayesNet.h>
#include <iomanip>
using namespace std;
using namespace gtsam;
int main(int argc, char **argv) {
DiscreteBayesNet asia;
DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2),
Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
asia.add(Asia % "99/1");
asia.add(Smoking % "50/50");
asia.add(Tuberculosis | Asia = "99/1 95/5");
asia.add(LungCancer | Smoking = "99/1 90/10");
asia.add(Bronchitis | Smoking = "70/30 40/60");
asia.add((Either | Tuberculosis, LungCancer) = "F T T T");
asia.add(XRay | Either = "95/5 2/98");
asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9");
// print
vector<string> pretty = {"Asia", "Dyspnea", "XRay", "Tuberculosis",
"Smoking", "Either", "LungCancer", "Bronchitis"};
auto formatter = [pretty](Key key) { return pretty[key]; };
asia.print("Asia", formatter);
// Convert to factor graph
DiscreteFactorGraph fg(asia);
// Create solver and eliminate
Ordering ordering;
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
// solve
DiscreteFactor::sharedValues mpe = chordal->optimize();
GTSAM_PRINT(*mpe);
// We can also build a Bayes tree (directed junction tree).
// The elimination order above will do fine:
auto bayesTree = fg.eliminateMultifrontal(ordering);
bayesTree->print("bayesTree", formatter);
// add evidence, we were in Asia and we have dyspnea
fg.add(Asia, "0 1");
fg.add(Dyspnea, "0 1");
// solve again, now with evidence
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
DiscreteFactor::sharedValues mpe2 = chordal2->optimize();
GTSAM_PRINT(*mpe2);
// We can also sample from it
cout << "\n10 samples:" << endl;
for (size_t i = 0; i < 10; i++) {
DiscreteFactor::sharedValues sample = chordal2->sample();
GTSAM_PRINT(*sample);
}
return 0;
}

View File

@ -15,105 +15,106 @@
* @author Abhijit
* @date Jun 4, 2012
*
* We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, p529]
* You may be familiar with other graphical model packages like BNT (available
* at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this is used as an
* example. The following demo is same as that in the above link, except that
* everything is using GTSAM.
* We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009,
* p529] You may be familiar with other graphical model packages like BNT
* (available at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this
* is used as an example. The following demo is same as that in the above link,
* except that everything is using GTSAM.
*/
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/discrete/DiscreteSequentialSolver.h>
#include <gtsam/discrete/DiscreteMarginals.h>
#include <iomanip>
using namespace std;
using namespace gtsam;
int main(int argc, char **argv) {
// Define keys and a print function
Key C(1), S(2), R(3), W(4);
auto print = [=](DiscreteFactor::sharedValues values) {
cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C])
<< " Sprinkler = " << static_cast<bool>((*values)[S])
<< " Rain = " << boolalpha << static_cast<bool>((*values)[R])
<< " WetGrass = " << static_cast<bool>((*values)[W]) << endl;
};
// We assume binary state variables
// we have 0 == "False" and 1 == "True"
const size_t nrStates = 2;
// define variables
DiscreteKey Cloudy(1, nrStates), Sprinkler(2, nrStates), Rain(3, nrStates),
WetGrass(4, nrStates);
DiscreteKey Cloudy(C, nrStates), Sprinkler(S, nrStates), Rain(R, nrStates),
WetGrass(W, nrStates);
// create Factor Graph of the bayes net
DiscreteFactorGraph graph;
// add factors
graph.add(Cloudy, "0.5 0.5"); //P(Cloudy)
graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); //P(Sprinkler | Cloudy)
graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); //P(Rain | Cloudy)
graph.add(Cloudy, "0.5 0.5"); // P(Cloudy)
graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); // P(Sprinkler | Cloudy)
graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); // P(Rain | Cloudy)
graph.add(Sprinkler & Rain & WetGrass,
"1 0 0.1 0.9 0.1 0.9 0.001 0.99"); //P(WetGrass | Sprinkler, Rain)
"1 0 0.1 0.9 0.1 0.9 0.001 0.99"); // P(WetGrass | Sprinkler, Rain)
// Alternatively we can also create a DiscreteBayesNet, add DiscreteConditional
// factors and create a FactorGraph from it. (See testDiscreteBayesNet.cpp)
// Alternatively we can also create a DiscreteBayesNet, add
// DiscreteConditional factors and create a FactorGraph from it. (See
// testDiscreteBayesNet.cpp)
// Since this is a relatively small distribution, we can as well print
// the whole distribution..
cout << "Distribution of Example: " << endl;
cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10)
<< "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
<< endl;
<< "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
<< endl;
for (size_t a = 0; a < nrStates; a++)
for (size_t m = 0; m < nrStates; m++)
for (size_t h = 0; h < nrStates; h++)
for (size_t c = 0; c < nrStates; c++) {
DiscreteFactor::Values values;
values[Cloudy.first] = c;
values[Sprinkler.first] = h;
values[Rain.first] = m;
values[WetGrass.first] = a;
values[C] = c;
values[S] = h;
values[R] = m;
values[W] = a;
double prodPot = graph(values);
cout << boolalpha << setw(8) << (bool) c << setw(14)
<< (bool) h << setw(12) << (bool) m << setw(13)
<< (bool) a << setw(16) << prodPot << endl;
cout << setw(8) << static_cast<bool>(c) << setw(14)
<< static_cast<bool>(h) << setw(12) << static_cast<bool>(m)
<< setw(13) << static_cast<bool>(a) << setw(16) << prodPot
<< endl;
}
// "Most Probable Explanation", i.e., configuration with largest value
DiscreteSequentialSolver solver(graph);
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
cout <<"\nMost Probable Explanation (MPE):" << endl;
cout << boolalpha << "Cloudy = " << (bool)(*optimalDecoding)[Cloudy.first]
<< " Sprinkler = " << (bool)(*optimalDecoding)[Sprinkler.first]
<< " Rain = " << boolalpha << (bool)(*optimalDecoding)[Rain.first]
<< " WetGrass = " << (bool)(*optimalDecoding)[WetGrass.first]<< endl;
DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize();
cout << "\nMost Probable Explanation (MPE):" << endl;
print(mpe);
// "Inference" We show an inference query like: probability that the Sprinkler
// was on; given that the grass is wet i.e. P( S | C=0) = ?
// "Inference" We show an inference query like: probability that the Sprinkler was on;
// given that the grass is wet i.e. P( S | W=1) =?
cout << "\nInference Query: Probability of Sprinkler being on given Grass is Wet" << endl;
// add evidence that it is not Cloudy
graph.add(Cloudy, "1 0");
// Method 1: we can compute the joint marginal P(S,W) and from that we can compute
// P(S | W=1) = P(S,W=1)/P(W=1) We do this in following three steps..
// solve again, now with evidence
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize();
//Step1: Compute P(S,W)
DiscreteFactorGraph jointFG;
jointFG = *solver.jointFactorGraph(DiscreteKeys(Sprinkler & WetGrass).indices());
DecisionTreeFactor probSW = jointFG.product();
cout << "\nMPE given C=0:" << endl;
print(mpe_with_evidence);
//Step2: Compute P(W)
DiscreteFactor::shared_ptr probW = solver.marginalFactor(WetGrass.first);
//Step3: Computer P(S | W=1) = P(S,W=1)/P(W=1)
DiscreteFactor::Values values;
values[WetGrass.first] = 1;
//print P(S=0|W=1)
values[Sprinkler.first] = 0;
cout << "P(S=0|W=1) = " << probSW(values)/(*probW)(values) << endl;
//print P(S=1|W=1)
values[Sprinkler.first] = 1;
cout << "P(S=1|W=1) = " << probSW(values)/(*probW)(values) << endl;
// TODO: Method 2 : One way is to modify the factor graph to
// incorporate the evidence node and compute the marginal
// TODO: graph.addEvidence(Cloudy,0);
// we can also calculate arbitrary marginals:
DiscreteMarginals marginals(graph);
cout << "\nP(S=1|C=0):" << marginals.marginalProbabilities(Sprinkler)[1]
<< endl;
cout << "\nP(R=0|C=0):" << marginals.marginalProbabilities(Rain)[0] << endl;
cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1]
<< endl;
// We can also sample from it
cout << "\n10 samples:" << endl;
for (size_t i = 0; i < 10; i++) {
DiscreteFactor::sharedValues sample = chordal->sample();
print(sample);
}
return 0;
}

130
examples/FisheyeExample.cpp Normal file
View File

@ -0,0 +1,130 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file FisheyeExample.cpp
* @brief A visualSLAM example for the structure-from-motion problem on a
* simulated dataset. This version uses a fisheye camera model and a GaussNewton
* solver to solve the graph in one batch
* @author ghaggin
* @Date Apr 9,2020
*/
/**
* A structure-from-motion example with landmarks
* - The landmarks form a 10 meter cube
* - The robot rotates around the landmarks, always facing towards the cube
*/
// For loading the data
#include "SFMdata.h"
// Camera observations of landmarks will be stored as Point2 (x, y).
#include <gtsam/geometry/Point2.h>
// Each variable in the system (poses and landmarks) must be identified with a
// unique key. We can either use simple integer keys (1, 2, 3, ...) or symbols
// (X1, X2, L1). Here we will use Symbols
#include <gtsam/inference/Symbol.h>
// Use GaussNewtonOptimizer to solve graph
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/Values.h>
// In GTSAM, measurement functions are represented as 'factors'. Several common
// factors have been provided with the library for solving robotics/SLAM/Bundle
// Adjustment problems. Here we will use Projection factors to model the
// camera's landmark observations. Also, we will initialize the robot at some
// location using a Prior factor.
#include <gtsam/geometry/Cal3Fisheye.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/ProjectionFactor.h>
#include <fstream>
#include <vector>
using namespace std;
using namespace gtsam;
using symbol_shorthand::L; // for landmarks
using symbol_shorthand::X; // for poses
/* ************************************************************************* */
int main(int argc, char *argv[]) {
// Define the camera calibration parameters
auto K = boost::make_shared<Cal3Fisheye>(
278.66, 278.48, 0.0, 319.75, 241.96, -0.013721808247486035,
0.020727425669427896, -0.012786476702685545, 0.0025242267320687625);
// Define the camera observation noise model, 1 pixel stddev
auto measurementNoise = noiseModel::Isotropic::Sigma(2, 1.0);
// Create the set of ground-truth landmarks
const vector<Point3> points = createPoints();
// Create the set of ground-truth poses
const vector<Pose3> poses = createPoses();
// Create a Factor Graph and Values to hold the new data
NonlinearFactorGraph graph;
Values initialEstimate;
// Add a prior on pose x0, 0.1 rad on roll,pitch,yaw, and 30cm std on x,y,z
auto posePrior = noiseModel::Diagonal::Sigmas(
(Vector(6) << Vector3::Constant(0.1), Vector3::Constant(0.3)).finished());
graph.emplace_shared<PriorFactor<Pose3>>(X(0), poses[0], posePrior);
// Add a prior on landmark l0
auto pointPrior = noiseModel::Isotropic::Sigma(3, 0.1);
graph.emplace_shared<PriorFactor<Point3>>(L(0), points[0], pointPrior);
// Add initial guesses to all observed landmarks
// Intentionally initialize the variables off from the ground truth
static const Point3 kDeltaPoint(-0.25, 0.20, 0.15);
for (size_t j = 0; j < points.size(); ++j)
initialEstimate.insert<Point3>(L(j), points[j] + kDeltaPoint);
// Loop over the poses, adding the observations to the graph
for (size_t i = 0; i < poses.size(); ++i) {
// Add factors for each landmark observation
for (size_t j = 0; j < points.size(); ++j) {
PinholeCamera<Cal3Fisheye> camera(poses[i], *K);
Point2 measurement = camera.project(points[j]);
graph.emplace_shared<GenericProjectionFactor<Pose3, Point3, Cal3Fisheye>>(
measurement, measurementNoise, X(i), L(j), K);
}
// Add an initial guess for the current pose
// Intentionally initialize the variables off from the ground truth
static const Pose3 kDeltaPose(Rot3::Rodrigues(-0.1, 0.2, 0.25),
Point3(0.05, -0.10, 0.20));
initialEstimate.insert(X(i), poses[i] * kDeltaPose);
}
GaussNewtonParams params;
params.setVerbosity("TERMINATION");
params.maxIterations = 10000;
std::cout << "Optimizing the factor graph" << std::endl;
GaussNewtonOptimizer optimizer(graph, initialEstimate, params);
Values result = optimizer.optimize();
std::cout << "Optimization complete" << std::endl;
std::cout << "initial error=" << graph.error(initialEstimate) << std::endl;
std::cout << "final error=" << graph.error(result) << std::endl;
std::ofstream os("examples/vio_batch.dot");
graph.saveGraph(os, result);
return 0;
}
/* ************************************************************************* */

94
examples/HMMExample.cpp Normal file
View File

@ -0,0 +1,94 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file DiscreteBayesNetExample.cpp
* @brief Hidden Markov Model example, discrete.
* @author Frank Dellaert
* @date July 12, 2020
*/
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/discrete/DiscreteMarginals.h>
#include <gtsam/inference/BayesNet.h>
#include <iomanip>
#include <sstream>
using namespace std;
using namespace gtsam;
int main(int argc, char **argv) {
const int nrNodes = 4;
const size_t nrStates = 3;
// Define variables as well as ordering
Ordering ordering;
vector<DiscreteKey> keys;
for (int k = 0; k < nrNodes; k++) {
DiscreteKey key_i(k, nrStates);
keys.push_back(key_i);
ordering.emplace_back(k);
}
// Create HMM as a DiscreteBayesNet
DiscreteBayesNet hmm;
// Define backbone
const string transition = "8/1/1 1/8/1 1/1/8";
for (int k = 1; k < nrNodes; k++) {
hmm.add(keys[k] | keys[k - 1] = transition);
}
// Add some measurements, not needed for all time steps!
hmm.add(keys[0] % "7/2/1");
hmm.add(keys[1] % "1/9/0");
hmm.add(keys.back() % "5/4/1");
// print
hmm.print("HMM");
// Convert to factor graph
DiscreteFactorGraph factorGraph(hmm);
// Create solver and eliminate
// This will create a DAG ordered with arrow of time reversed
DiscreteBayesNet::shared_ptr chordal =
factorGraph.eliminateSequential(ordering);
chordal->print("Eliminated");
// solve
DiscreteFactor::sharedValues mpe = chordal->optimize();
GTSAM_PRINT(*mpe);
// We can also sample from it
cout << "\n10 samples:" << endl;
for (size_t k = 0; k < 10; k++) {
DiscreteFactor::sharedValues sample = chordal->sample();
GTSAM_PRINT(*sample);
}
// Or compute the marginals. This re-eliminates the FG into a Bayes tree
cout << "\nComputing Node Marginals .." << endl;
DiscreteMarginals marginals(factorGraph);
for (int k = 0; k < nrNodes; k++) {
Vector margProbs = marginals.marginalProbabilities(keys[k]);
stringstream ss;
ss << "marginal " << k;
print(margProbs, ss.str());
}
// TODO(frank): put in the glue to have DiscreteMarginals produce *arbitrary*
// joints efficiently, by the Bayes tree shortcut magic. All the code is there
// but it's not yet connected.
return 0;
}

View File

@ -0,0 +1,359 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file IMUKittiExampleGPS
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics
*/
// GTSAM related includes.
#include <gtsam/navigation/CombinedImuFactor.h>
#include <gtsam/navigation/GPSFactor.h>
#include <gtsam/navigation/ImuFactor.h>
#include <gtsam/slam/dataset.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/nonlinear/ISAM2.h>
#include <gtsam/nonlinear/ISAM2Params.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/inference/Symbol.h>
#include <cstring>
#include <fstream>
#include <iostream>
using namespace std;
using namespace gtsam;
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
struct KittiCalibration {
double body_ptx;
double body_pty;
double body_ptz;
double body_prx;
double body_pry;
double body_prz;
double accelerometer_sigma;
double gyroscope_sigma;
double integration_sigma;
double accelerometer_bias_sigma;
double gyroscope_bias_sigma;
double average_delta_t;
};
struct ImuMeasurement {
double time;
double dt;
Vector3 accelerometer;
Vector3 gyroscope; // omega
};
struct GpsMeasurement {
double time;
Vector3 position; // x,y,z
};
const string output_filename = "IMUKittiExampleGPSResults.csv";
void loadKittiData(KittiCalibration& kitti_calibration,
vector<ImuMeasurement>& imu_measurements,
vector<GpsMeasurement>& gps_measurements) {
string line;
// Read IMU metadata and compute relative sensor pose transforms
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
// AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
ifstream imu_metadata(imu_metadata_file.c_str());
printf("-- Reading sensor metadata\n");
getline(imu_metadata, line, '\n'); // ignore the first line
// Load Kitti calibration
getline(imu_metadata, line, '\n');
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
&kitti_calibration.body_ptx,
&kitti_calibration.body_pty,
&kitti_calibration.body_ptz,
&kitti_calibration.body_prx,
&kitti_calibration.body_pry,
&kitti_calibration.body_prz,
&kitti_calibration.accelerometer_sigma,
&kitti_calibration.gyroscope_sigma,
&kitti_calibration.integration_sigma,
&kitti_calibration.accelerometer_bias_sigma,
&kitti_calibration.gyroscope_bias_sigma,
&kitti_calibration.average_delta_t);
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
kitti_calibration.body_ptx,
kitti_calibration.body_pty,
kitti_calibration.body_ptz,
kitti_calibration.body_prx,
kitti_calibration.body_pry,
kitti_calibration.body_prz,
kitti_calibration.accelerometer_sigma,
kitti_calibration.gyroscope_sigma,
kitti_calibration.integration_sigma,
kitti_calibration.accelerometer_bias_sigma,
kitti_calibration.gyroscope_bias_sigma,
kitti_calibration.average_delta_t);
// Read IMU data
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
printf("-- Reading IMU measurements from file\n");
{
ifstream imu_data(imu_data_file.c_str());
getline(imu_data, line, '\n'); // ignore the first line
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0;
while (!imu_data.eof()) {
getline(imu_data, line, '\n');
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf",
&time, &dt,
&acc_x, &acc_y, &acc_z,
&gyro_x, &gyro_y, &gyro_z);
ImuMeasurement measurement;
measurement.time = time;
measurement.dt = dt;
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
imu_measurements.push_back(measurement);
}
}
// Read GPS data
// Time,X,Y,Z
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
printf("-- Reading GPS measurements from file\n");
{
ifstream gps_data(gps_data_file.c_str());
getline(gps_data, line, '\n'); // ignore the first line
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
while (!gps_data.eof()) {
getline(gps_data, line, '\n');
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
GpsMeasurement measurement;
measurement.time = time;
measurement.position = Vector3(gps_x, gps_y, gps_z);
gps_measurements.push_back(measurement);
}
}
}
int main(int argc, char* argv[]) {
KittiCalibration kitti_calibration;
vector<ImuMeasurement> imu_measurements;
vector<GpsMeasurement> gps_measurements;
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
.finished();
auto body_T_imu = Pose3::Expmap(BodyP);
if (!body_T_imu.equals(Pose3(), 1e-5)) {
printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same");
exit(-1);
}
// Configure different variables
// double t_offset = gps_measurements[0].time;
size_t first_gps_pose = 1;
size_t gps_skip = 10; // Skip this many GPS measurements each time
double g = 9.8;
auto w_coriolis = Vector3::Zero(); // zero vector
// Configure noise models
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
Vector3::Constant(1.0/0.07))
.finished());
// Set initial conditions for the estimated trajectory
// initial pose is the reference frame (navigation frame)
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
// the vehicle is stationary at the beginning at position 0,0,0
Vector3 current_velocity_global = Vector3::Zero();
auto current_bias = imuBias::ConstantBias(); // init with zero bias
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
Vector3::Constant(1.0))
.finished());
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100),
Vector3::Constant(5.00e-05))
.finished());
// Set IMU preintegration parameters
Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
// error committed in integrating position from velocities
Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2);
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous
imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
imu_params->omegaCoriolis = w_coriolis;
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr;
// Set ISAM2 parameters and create ISAM2 solver object
ISAM2Params isam_params;
isam_params.factorization = ISAM2Params::CHOLESKY;
isam_params.relinearizeSkip = 10;
ISAM2 isam(isam_params);
// Create the factor graph and values object that will store new factors and values to add to the incremental graph
NonlinearFactorGraph new_factors;
Values new_values; // values storing the initial estimates of new nodes in the factor graph
/// Main loop:
/// (1) we read the measurements
/// (2) we create the corresponding factors in the graph
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n");
size_t j = 0;
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
// At each non=IMU measurement we initialize a new node in the graph
auto current_pose_key = X(i);
auto current_vel_key = V(i);
auto current_bias_key = B(i);
double t = gps_measurements[i].time;
if (i == first_gps_pose) {
// Create initial estimate and prior on initial pose, velocity, and biases
new_values.insert(current_pose_key, current_pose_global);
new_values.insert(current_vel_key, current_velocity_global);
new_values.insert(current_bias_key, current_bias);
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x);
new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v);
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b);
} else {
double t_previous = gps_measurements[i-1].time;
// Summarize IMU data between the previous GPS measurement and now
current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias);
static size_t included_imu_measurement_count = 0;
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
if (imu_measurements[j].time >= t_previous) {
current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer,
imu_measurements[j].gyroscope,
imu_measurements[j].dt);
included_imu_measurement_count++;
}
j++;
}
// Create IMU factor
auto previous_pose_key = X(i-1);
auto previous_vel_key = V(i-1);
auto previous_bias_key = B(i-1);
new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key,
current_pose_key, current_vel_key,
previous_bias_key, *current_summarized_measurement);
// Bias evolution as given in the IMU metadata
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() <<
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
.finished());
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key,
current_bias_key,
imuBias::ConstantBias(),
sigma_between_b);
// Create GPS factor
auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
if ((i % gps_skip) == 0) {
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps);
new_values.insert(current_pose_key, gps_pose);
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
gps_pose.translation().print();
printf("\n\n");
} else {
new_values.insert(current_pose_key, current_pose_global);
}
// Add initial values for velocity and bias based on the previous estimates
new_values.insert(current_vel_key, current_velocity_global);
new_values.insert(current_bias_key, current_bias);
// Update solver
// =======================================================================
// We accumulate 2*GPSskip GPS measurements before updating the solver at
// first so that the heading becomes observable.
if (i > (first_gps_pose + 2*gps_skip)) {
printf("################ NEW FACTORS AT TIME %lf ################\n", t);
new_factors.print();
isam.update(new_factors, new_values);
// Reset the newFactors and newValues list
new_factors.resize(0);
new_values.clear();
// Extract the result/current estimates
Values result = isam.calculateEstimate();
current_pose_global = result.at<Pose3>(current_pose_key);
current_velocity_global = result.at<Vector3>(current_vel_key);
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
printf("\n################ POSE AT TIME %lf ################\n", t);
current_pose_global.print();
printf("\n\n");
}
}
}
// Save results to file
printf("\nWriting results to file...\n");
FILE* fp_out = fopen(output_filename.c_str(), "w+");
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
Values result = isam.calculateEstimate();
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
auto pose_key = X(i);
auto vel_key = V(i);
auto bias_key = B(i);
auto pose = result.at<Pose3>(pose_key);
auto velocity = result.at<Vector3>(vel_key);
auto bias = result.at<imuBias::ConstantBias>(bias_key);
auto pose_quat = pose.rotation().toQuaternion();
auto gps = gps_measurements[i].position;
cout << "State at #" << i << endl;
cout << "Pose:" << endl << pose << endl;
cout << "Velocity:" << endl << velocity << endl;
cout << "Bias:" << endl << bias << endl;
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
gps_measurements[i].time,
pose.x(), pose.y(), pose.z(),
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
gps(0), gps(1), gps(2));
}
fclose(fp_out);
}

View File

@ -4,7 +4,8 @@
* @author Alexander (pumaking on BitBucket)
*/
#include <gtsam/geometry/SimpleCamera.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/geometry/Cal3_S2.h>
#include <gtsam/nonlinear/ISAM2.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/SmartProjectionPoseFactor.h>
@ -27,7 +28,7 @@ int main(int argc, char* argv[]) {
noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v
Vector6 sigmas;
sigmas << Vector3::Constant(0.3), Vector3::Constant(0.1);
sigmas << Vector3::Constant(0.1), Vector3::Constant(0.3);
auto noise = noiseModel::Diagonal::Sigmas(sigmas);
ISAM2Params parameters;
@ -56,7 +57,7 @@ int main(int argc, char* argv[]) {
vector<Pose3> poses = {pose1, pose2, pose3, pose4, pose5};
// Add first pose
graph.emplace_shared<PriorFactor<Pose3>>(X(0), poses[0], noise);
graph.addPrior(X(0), poses[0], noise);
initialEstimate.insert(X(0), poses[0].compose(delta));
// Create smart factor with measurement from first pose only
@ -70,7 +71,7 @@ int main(int argc, char* argv[]) {
cout << "i = " << i << endl;
// Add prior on new pose
graph.emplace_shared<PriorFactor<Pose3>>(X(i), poses[i], noise);
graph.addPrior(X(i), poses[i], noise);
initialEstimate.insert(X(i), poses[i].compose(delta));
// "Simulate" measurement from this pose

View File

@ -129,18 +129,16 @@ int main(int argc, char* argv[]) {
// Pose prior - at identity
auto priorPoseNoise = noiseModel::Diagonal::Sigmas(
(Vector(6) << Vector3::Constant(0.1), Vector3::Constant(0.1)).finished());
graph.emplace_shared<PriorFactor<Pose3>>(X(1), Pose3::identity(),
priorPoseNoise);
graph.addPrior(X(1), Pose3::identity(), priorPoseNoise);
initialEstimate.insert(X(0), Pose3::identity());
// Bias prior
graph.add(PriorFactor<imuBias::ConstantBias>(B(1), imu.priorImuBias,
imu.biasNoiseModel));
graph.addPrior(B(1), imu.priorImuBias,
imu.biasNoiseModel);
initialEstimate.insert(B(0), imu.priorImuBias);
// Velocity prior - assume stationary
graph.add(
PriorFactor<Vector3>(V(1), Vector3(0, 0, 0), imu.velocityNoiseModel));
graph.addPrior(V(1), Vector3(0, 0, 0), imu.velocityNoiseModel);
initialEstimate.insert(V(0), Vector3(0, 0, 0));
int lastFrame = 1;

View File

@ -1,12 +1,12 @@
#include <gtsam/geometry/SimpleCamera.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/geometry/Cal3_S2.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/navigation/ImuBias.h>
#include <gtsam/navigation/ImuFactor.h>
#include <gtsam/navigation/Scenario.h>
#include <gtsam/nonlinear/ISAM2.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <vector>
@ -34,7 +34,7 @@ int main(int argc, char* argv[]) {
double radius = 30;
const Point3 up(0, 0, 1), target(0, 0, 0);
const Point3 position(radius, 0, 0);
const SimpleCamera camera = SimpleCamera::Lookat(position, target, up);
const auto camera = PinholeCamera<Cal3_S2>::Lookat(position, target, up);
const auto pose_0 = camera.pose();
// Now, create a constant-twist scenario that makes the camera orbit the
@ -60,21 +60,18 @@ int main(int argc, char* argv[]) {
// 0.1 rad std on roll, pitch, yaw, 30cm std on x,y,z.
auto noise = noiseModel::Diagonal::Sigmas(
(Vector(6) << Vector3::Constant(0.1), Vector3::Constant(0.3)).finished());
newgraph.push_back(PriorFactor<Pose3>(X(0), pose_0, noise));
newgraph.addPrior(X(0), pose_0, noise);
// Add imu priors
Key biasKey = Symbol('b', 0);
auto biasnoise = noiseModel::Diagonal::Sigmas(Vector6::Constant(0.1));
PriorFactor<imuBias::ConstantBias> biasprior(biasKey, imuBias::ConstantBias(),
biasnoise);
newgraph.push_back(biasprior);
newgraph.addPrior(biasKey, imuBias::ConstantBias(), biasnoise);
initialEstimate.insert(biasKey, imuBias::ConstantBias());
auto velnoise = noiseModel::Diagonal::Sigmas(Vector3(0.1, 0.1, 0.1));
Vector n_velocity(3);
n_velocity << 0, angular_velocity * radius, 0;
PriorFactor<Vector> velprior(V(0), n_velocity, velnoise);
newgraph.push_back(velprior);
newgraph.addPrior(V(0), n_velocity, velnoise);
initialEstimate.insert(V(0), n_velocity);

View File

@ -11,14 +11,16 @@
/**
* @file imuFactorsExample
* @brief Test example for using GTSAM ImuFactor and ImuCombinedFactor navigation code.
* @brief Test example for using GTSAM ImuFactor and ImuCombinedFactor
* navigation code.
* @author Garrett (ghemann@gmail.com), Luca Carlone
*/
/**
* Example of use of the imuFactors (imuFactor and combinedImuFactor) in conjunction with GPS
* - you can test imuFactor (resp. combinedImuFactor) by commenting (resp. uncommenting)
* the line #define USE_COMBINED (few lines below)
* Example of use of the imuFactors (imuFactor and combinedImuFactor) in
* conjunction with GPS
* - imuFactor is used by default. You can test combinedImuFactor by
* appending a `-c` flag at the end (see below for example command).
* - we read IMU and GPS data from a CSV file, with the following format:
* A row starting with "i" is the first initial position formatted with
* N, E, D, qx, qY, qZ, qW, velN, velE, velD
@ -26,52 +28,84 @@
* linAccN, linAccE, linAccD, angVelN, angVelE, angVelD
* A row starting with "1" is a gps correction formatted with
* N, E, D, qX, qY, qZ, qW
* Note that for GPS correction, we're only using the position not the rotation. The
* rotation is provided in the file for ground truth comparison.
* Note that for GPS correction, we're only using the position not the
* rotation. The rotation is provided in the file for ground truth comparison.
*
* Usage: ./ImuFactorsExample [data_csv_path] [-c]
* optional arguments:
* data_csv_path path to the CSV file with the IMU data.
* -c use CombinedImuFactor
* Note: Define USE_LM to use Levenberg Marquardt Optimizer
* By default ISAM2 is used
*/
// GTSAM related includes.
#include <gtsam/navigation/CombinedImuFactor.h>
#include <gtsam/navigation/GPSFactor.h>
#include <gtsam/navigation/ImuFactor.h>
#include <gtsam/slam/dataset.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/dataset.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/ISAM2.h>
#include <gtsam/inference/Symbol.h>
#include <cstring>
#include <fstream>
#include <iostream>
// Uncomment line below to use the CombinedIMUFactor as opposed to the standard ImuFactor.
// #define USE_COMBINED
// Uncomment the following to use Levenberg Marquardt Optimizer
// #define USE_LM
using namespace gtsam;
using namespace std;
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
const string output_filename = "imuFactorExampleResults.csv";
static const char output_filename[] = "imuFactorExampleResults.csv";
static const char use_combined_imu_flag[3] = "-c";
// This will either be PreintegratedImuMeasurements (for ImuFactor) or
// PreintegratedCombinedMeasurements (for CombinedImuFactor).
PreintegrationType *imu_preintegrated_;
int main(int argc, char* argv[])
{
int main(int argc, char* argv[]) {
string data_filename;
bool use_combined_imu = false;
#ifndef USE_LM
printf("Using ISAM2\n");
ISAM2Params parameters;
parameters.relinearizeThreshold = 0.01;
parameters.relinearizeSkip = 1;
ISAM2 isam2(parameters);
#else
printf("Using Levenberg Marquardt Optimizer\n");
#endif
if (argc < 2) {
printf("using default CSV file\n");
data_filename = findExampleDataFile("imuAndGPSdata.csv");
} else if (argc < 3) {
if (strcmp(argv[1], use_combined_imu_flag) == 0) {
printf("using CombinedImuFactor\n");
use_combined_imu = true;
printf("using default CSV file\n");
data_filename = findExampleDataFile("imuAndGPSdata.csv");
} else {
data_filename = argv[1];
}
} else {
data_filename = argv[1];
if (strcmp(argv[2], use_combined_imu_flag) == 0) {
printf("using CombinedImuFactor\n");
use_combined_imu = true;
}
}
// Set up output file for plotting errors
FILE* fp_out = fopen(output_filename.c_str(), "w+");
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m),gt_qx,gt_qy,gt_qz,gt_qw\n");
FILE* fp_out = fopen(output_filename, "w+");
fprintf(fp_out,
"#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m),gt_qx,"
"gt_qy,gt_qz,gt_qw\n");
// Begin parsing the CSV file. Input the first line for initialization.
// From there, we'll iterate through the file and we'll preintegrate the IMU
@ -80,9 +114,9 @@ int main(int argc, char* argv[])
string value;
// Format is (N,E,D,qX,qY,qZ,qW,velN,velE,velD)
Eigen::Matrix<double,10,1> initial_state = Eigen::Matrix<double,10,1>::Zero();
getline(file, value, ','); // i
for (int i=0; i<9; i++) {
Vector10 initial_state;
getline(file, value, ','); // i
for (int i = 0; i < 9; i++) {
getline(file, value, ',');
initial_state(i) = atof(value.c_str());
}
@ -90,13 +124,14 @@ int main(int argc, char* argv[])
initial_state(9) = atof(value.c_str());
cout << "initial state:\n" << initial_state.transpose() << "\n\n";
// Assemble initial quaternion through gtsam constructor ::quaternion(w,x,y,z);
// Assemble initial quaternion through GTSAM constructor
// ::quaternion(w,x,y,z);
Rot3 prior_rotation = Rot3::Quaternion(initial_state(6), initial_state(3),
initial_state(4), initial_state(5));
Point3 prior_point(initial_state.head<3>());
Pose3 prior_pose(prior_rotation, prior_point);
Vector3 prior_velocity(initial_state.tail<3>());
imuBias::ConstantBias prior_imu_bias; // assume zero initial bias
imuBias::ConstantBias prior_imu_bias; // assume zero initial bias
Values initial_values;
int correction_count = 0;
@ -104,53 +139,64 @@ int main(int argc, char* argv[])
initial_values.insert(V(correction_count), prior_velocity);
initial_values.insert(B(correction_count), prior_imu_bias);
// Assemble prior noise model and add it the graph.
noiseModel::Diagonal::shared_ptr pose_noise_model = noiseModel::Diagonal::Sigmas((Vector(6) << 0.01, 0.01, 0.01, 0.5, 0.5, 0.5).finished()); // rad,rad,rad,m, m, m
noiseModel::Diagonal::shared_ptr velocity_noise_model = noiseModel::Isotropic::Sigma(3,0.1); // m/s
noiseModel::Diagonal::shared_ptr bias_noise_model = noiseModel::Isotropic::Sigma(6,1e-3);
// Assemble prior noise model and add it the graph.`
auto pose_noise_model = noiseModel::Diagonal::Sigmas(
(Vector(6) << 0.01, 0.01, 0.01, 0.5, 0.5, 0.5)
.finished()); // rad,rad,rad,m, m, m
auto velocity_noise_model = noiseModel::Isotropic::Sigma(3, 0.1); // m/s
auto bias_noise_model = noiseModel::Isotropic::Sigma(6, 1e-3);
// Add all prior factors (pose, velocity, bias) to the graph.
NonlinearFactorGraph *graph = new NonlinearFactorGraph();
graph->add(PriorFactor<Pose3>(X(correction_count), prior_pose, pose_noise_model));
graph->add(PriorFactor<Vector3>(V(correction_count), prior_velocity,velocity_noise_model));
graph->add(PriorFactor<imuBias::ConstantBias>(B(correction_count), prior_imu_bias,bias_noise_model));
NonlinearFactorGraph* graph = new NonlinearFactorGraph();
graph->addPrior(X(correction_count), prior_pose, pose_noise_model);
graph->addPrior(V(correction_count), prior_velocity, velocity_noise_model);
graph->addPrior(B(correction_count), prior_imu_bias, bias_noise_model);
// We use the sensor specs to build the noise model for the IMU factor.
double accel_noise_sigma = 0.0003924;
double gyro_noise_sigma = 0.000205689024915;
double accel_bias_rw_sigma = 0.004905;
double gyro_bias_rw_sigma = 0.000001454441043;
Matrix33 measured_acc_cov = Matrix33::Identity(3,3) * pow(accel_noise_sigma,2);
Matrix33 measured_omega_cov = Matrix33::Identity(3,3) * pow(gyro_noise_sigma,2);
Matrix33 integration_error_cov = Matrix33::Identity(3,3)*1e-8; // error committed in integrating position from velocities
Matrix33 bias_acc_cov = Matrix33::Identity(3,3) * pow(accel_bias_rw_sigma,2);
Matrix33 bias_omega_cov = Matrix33::Identity(3,3) * pow(gyro_bias_rw_sigma,2);
Matrix66 bias_acc_omega_int = Matrix::Identity(6,6)*1e-5; // error in the bias used for preintegration
Matrix33 measured_acc_cov = I_3x3 * pow(accel_noise_sigma, 2);
Matrix33 measured_omega_cov = I_3x3 * pow(gyro_noise_sigma, 2);
Matrix33 integration_error_cov =
I_3x3 * 1e-8; // error committed in integrating position from velocities
Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2);
Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2);
Matrix66 bias_acc_omega_int =
I_6x6 * 1e-5; // error in the bias used for preintegration
boost::shared_ptr<PreintegratedCombinedMeasurements::Params> p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
// PreintegrationBase params:
p->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
p->integrationCovariance = integration_error_cov; // integration uncertainty continuous
p->accelerometerCovariance =
measured_acc_cov; // acc white noise in continuous
p->integrationCovariance =
integration_error_cov; // integration uncertainty continuous
// should be using 2nd order integration
// PreintegratedRotation params:
p->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
p->gyroscopeCovariance =
measured_omega_cov; // gyro white noise in continuous
// PreintegrationCombinedMeasurements params:
p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
p->biasAccOmegaInt = bias_acc_omega_int;
#ifdef USE_COMBINED
imu_preintegrated_ = new PreintegratedCombinedMeasurements(p, prior_imu_bias);
#else
imu_preintegrated_ = new PreintegratedImuMeasurements(p, prior_imu_bias);
#endif
std::shared_ptr<PreintegrationType> preintegrated = nullptr;
if (use_combined_imu) {
preintegrated =
std::make_shared<PreintegratedCombinedMeasurements>(p, prior_imu_bias);
} else {
preintegrated =
std::make_shared<PreintegratedImuMeasurements>(p, prior_imu_bias);
}
assert(preintegrated);
// Store previous state for the imu integration and the latest predicted outcome.
// Store previous state for imu integration and latest predicted outcome.
NavState prev_state(prior_pose, prior_velocity);
NavState prop_state = prev_state;
imuBias::ConstantBias prev_bias = prior_imu_bias;
// Keep track of the total error over the entire run for a simple performance metric.
// Keep track of total error over the entire run as simple performance metric.
double current_position_error = 0.0, current_orientation_error = 0.0;
double output_time = 0.0;
@ -159,14 +205,13 @@ int main(int argc, char* argv[])
// All priors have been set up, now iterate through the data file.
while (file.good()) {
// Parse out first value
getline(file, value, ',');
int type = atoi(value.c_str());
if (type == 0) { // IMU measurement
Eigen::Matrix<double,6,1> imu = Eigen::Matrix<double,6,1>::Zero();
for (int i=0; i<5; ++i) {
if (type == 0) { // IMU measurement
Vector6 imu;
for (int i = 0; i < 5; ++i) {
getline(file, value, ',');
imu(i) = atof(value.c_str());
}
@ -174,11 +219,11 @@ int main(int argc, char* argv[])
imu(5) = atof(value.c_str());
// Adding the IMU preintegration.
imu_preintegrated_->integrateMeasurement(imu.head<3>(), imu.tail<3>(), dt);
preintegrated->integrateMeasurement(imu.head<3>(), imu.tail<3>(), dt);
} else if (type == 1) { // GPS measurement
Eigen::Matrix<double,7,1> gps = Eigen::Matrix<double,7,1>::Zero();
for (int i=0; i<6; ++i) {
} else if (type == 1) { // GPS measurement
Vector7 gps;
for (int i = 0; i < 6; ++i) {
getline(file, value, ',');
gps(i) = atof(value.c_str());
}
@ -188,50 +233,62 @@ int main(int argc, char* argv[])
correction_count++;
// Adding IMU factor and GPS factor and optimizing.
#ifdef USE_COMBINED
PreintegratedCombinedMeasurements *preint_imu_combined = dynamic_cast<PreintegratedCombinedMeasurements*>(imu_preintegrated_);
CombinedImuFactor imu_factor(X(correction_count-1), V(correction_count-1),
X(correction_count ), V(correction_count ),
B(correction_count-1), B(correction_count ),
*preint_imu_combined);
graph->add(imu_factor);
#else
PreintegratedImuMeasurements *preint_imu = dynamic_cast<PreintegratedImuMeasurements*>(imu_preintegrated_);
ImuFactor imu_factor(X(correction_count-1), V(correction_count-1),
X(correction_count ), V(correction_count ),
B(correction_count-1),
*preint_imu);
graph->add(imu_factor);
imuBias::ConstantBias zero_bias(Vector3(0, 0, 0), Vector3(0, 0, 0));
graph->add(BetweenFactor<imuBias::ConstantBias>(B(correction_count-1),
B(correction_count ),
zero_bias, bias_noise_model));
#endif
if (use_combined_imu) {
auto preint_imu_combined =
dynamic_cast<const PreintegratedCombinedMeasurements&>(
*preintegrated);
CombinedImuFactor imu_factor(
X(correction_count - 1), V(correction_count - 1),
X(correction_count), V(correction_count), B(correction_count - 1),
B(correction_count), preint_imu_combined);
graph->add(imu_factor);
} else {
auto preint_imu =
dynamic_cast<const PreintegratedImuMeasurements&>(*preintegrated);
ImuFactor imu_factor(X(correction_count - 1), V(correction_count - 1),
X(correction_count), V(correction_count),
B(correction_count - 1), preint_imu);
graph->add(imu_factor);
imuBias::ConstantBias zero_bias(Vector3(0, 0, 0), Vector3(0, 0, 0));
graph->add(BetweenFactor<imuBias::ConstantBias>(
B(correction_count - 1), B(correction_count), zero_bias,
bias_noise_model));
}
noiseModel::Diagonal::shared_ptr correction_noise = noiseModel::Isotropic::Sigma(3,1.0);
auto correction_noise = noiseModel::Isotropic::Sigma(3, 1.0);
GPSFactor gps_factor(X(correction_count),
Point3(gps(0), // N,
gps(1), // E,
gps(2)), // D,
Point3(gps(0), // N,
gps(1), // E,
gps(2)), // D,
correction_noise);
graph->add(gps_factor);
// Now optimize and compare results.
prop_state = imu_preintegrated_->predict(prev_state, prev_bias);
prop_state = preintegrated->predict(prev_state, prev_bias);
initial_values.insert(X(correction_count), prop_state.pose());
initial_values.insert(V(correction_count), prop_state.v());
initial_values.insert(B(correction_count), prev_bias);
Values result;
#ifdef USE_LM
LevenbergMarquardtOptimizer optimizer(*graph, initial_values);
Values result = optimizer.optimize();
result = optimizer.optimize();
#else
isam2.update(*graph, initial_values);
isam2.update();
result = isam2.calculateEstimate();
// reset the graph
graph->resize(0);
initial_values.clear();
#endif
// Overwrite the beginning of the preintegration for the next step.
prev_state = NavState(result.at<Pose3>(X(correction_count)),
result.at<Vector3>(V(correction_count)));
prev_bias = result.at<imuBias::ConstantBias>(B(correction_count));
// Reset the preintegration object.
imu_preintegrated_->resetIntegrationAndSetBias(prev_bias);
preintegrated->resetIntegrationAndSetBias(prev_bias);
// Print out the position and orientation error for comparison.
Vector3 gtsam_position = prev_state.pose().translation();
@ -242,19 +299,19 @@ int main(int argc, char* argv[])
Quaternion gps_quat(gps(6), gps(3), gps(4), gps(5));
Quaternion quat_error = gtsam_quat * gps_quat.inverse();
quat_error.normalize();
Vector3 euler_angle_error(quat_error.x()*2,
quat_error.y()*2,
quat_error.z()*2);
Vector3 euler_angle_error(quat_error.x() * 2, quat_error.y() * 2,
quat_error.z() * 2);
current_orientation_error = euler_angle_error.norm();
// display statistics
cout << "Position error:" << current_position_error << "\t " << "Angular error:" << current_orientation_error << "\n";
cout << "Position error:" << current_position_error << "\t "
<< "Angular error:" << current_orientation_error << "\n";
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
output_time, gtsam_position(0), gtsam_position(1), gtsam_position(2),
gtsam_quat.x(), gtsam_quat.y(), gtsam_quat.z(), gtsam_quat.w(),
gps(0), gps(1), gps(2),
gps_quat.x(), gps_quat.y(), gps_quat.z(), gps_quat.w());
output_time, gtsam_position(0), gtsam_position(1),
gtsam_position(2), gtsam_quat.x(), gtsam_quat.y(), gtsam_quat.z(),
gtsam_quat.w(), gps(0), gps(1), gps(2), gps_quat.x(),
gps_quat.y(), gps_quat.z(), gps_quat.w());
output_time += 1.0;
@ -264,6 +321,7 @@ int main(int argc, char* argv[])
}
}
fclose(fp_out);
cout << "Complete, results written to " << output_filename << "\n\n";;
cout << "Complete, results written to " << output_filename << "\n\n";
return 0;
}

View File

@ -22,7 +22,6 @@
#include <gtsam/nonlinear/Marginals.h>
#include <gtsam/nonlinear/expressions.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/expressions.h>
#include <cmath>

View File

@ -66,12 +66,11 @@ using namespace gtsam;
#include <gtsam/nonlinear/NonlinearFactor.h>
class UnaryFactor: public NoiseModelFactor1<Pose2> {
// The factor will hold a measurement consisting of an (X,Y) location
// We could this with a Point2 but here we just use two doubles
double mx_, my_;
public:
public:
/// shorthand for a smart pointer to a factor
typedef boost::shared_ptr<UnaryFactor> shared_ptr;
@ -85,15 +84,15 @@ public:
// The first is the 'evaluateError' function. This function implements the desired measurement
// function, returning a vector of errors when evaluated at the provided variable value. It
// must also calculate the Jacobians for this measurement function, if requested.
Vector evaluateError(const Pose2& q, boost::optional<Matrix&> H = boost::none) const
{
Vector evaluateError(const Pose2& q,
boost::optional<Matrix&> H = boost::none) const {
// The measurement function for a GPS-like measurement is simple:
// error_x = pose.x - measurement.x
// error_y = pose.y - measurement.y
// Consequently, the Jacobians are:
// [ derror_x/dx derror_x/dy derror_x/dtheta ] = [1 0 0]
// [ derror_y/dx derror_y/dy derror_y/dtheta ] = [0 1 0]
if (H) (*H) = (Matrix(2,3) << 1.0,0.0,0.0, 0.0,1.0,0.0).finished();
if (H) (*H) = (Matrix(2, 3) << 1.0, 0.0, 0.0, 0.0, 1.0, 0.0).finished();
return (Vector(2) << q.x() - mx_, q.y() - my_).finished();
}
@ -107,29 +106,28 @@ public:
// Additionally, we encourage you the use of unit testing your custom factors,
// (as all GTSAM factors are), in which you would need an equals and print, to satisfy the
// GTSAM_CONCEPT_TESTABLE_INST(T) defined in Testable.h, but these are not needed below.
}; // UnaryFactor
}; // UnaryFactor
int main(int argc, char** argv) {
// 1. Create a factor graph container and add factors to it
NonlinearFactorGraph graph;
// 2a. Add odometry factors
// For simplicity, we will use the same noise model for each odometry factor
noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
auto odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
// Create odometry (Between) factors between consecutive poses
graph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise);
graph.emplace_shared<BetweenFactor<Pose2> >(2, 3, Pose2(2.0, 0.0, 0.0), odometryNoise);
// 2b. Add "GPS-like" measurements
// We will use our custom UnaryFactor for this.
noiseModel::Diagonal::shared_ptr unaryNoise = noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.1)); // 10cm std on x,y
auto unaryNoise =
noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.1)); // 10cm std on x,y
graph.emplace_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise);
graph.emplace_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise);
graph.emplace_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise);
graph.print("\nFactor Graph:\n"); // print
graph.print("\nFactor Graph:\n"); // print
// 3. Create the data structure to hold the initialEstimate estimate to the solution
// For illustrative purposes, these have been deliberately set to incorrect values
@ -137,7 +135,7 @@ int main(int argc, char** argv) {
initialEstimate.insert(1, Pose2(0.5, 0.0, 0.2));
initialEstimate.insert(2, Pose2(2.3, 0.1, -0.2));
initialEstimate.insert(3, Pose2(4.1, 0.1, 0.1));
initialEstimate.print("\nInitial Estimate:\n"); // print
initialEstimate.print("\nInitial Estimate:\n"); // print
// 4. Optimize using Levenberg-Marquardt optimization. The optimizer
// accepts an optional set of configuration parameters, controlling

View File

@ -11,7 +11,8 @@
/**
* @file METISOrdering.cpp
* @brief Simple robot motion example, with prior and two odometry measurements, using a METIS ordering
* @brief Simple robot motion example, with prior and two odometry measurements,
* using a METIS ordering
* @author Frank Dellaert
* @author Andrew Melim
*/
@ -22,12 +23,11 @@
*/
#include <gtsam/geometry/Pose2.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/Marginals.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/Values.h>
#include <gtsam/slam/BetweenFactor.h>
using namespace std;
using namespace gtsam;
@ -35,26 +35,26 @@ using namespace gtsam;
int main(int argc, char** argv) {
NonlinearFactorGraph graph;
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.emplace_shared<PriorFactor<Pose2> >(1, priorMean, priorNoise);
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
auto priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.addPrior(1, priorMean, priorNoise);
Pose2 odometry(2.0, 0.0, 0.0);
noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
auto odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
graph.emplace_shared<BetweenFactor<Pose2> >(1, 2, odometry, odometryNoise);
graph.emplace_shared<BetweenFactor<Pose2> >(2, 3, odometry, odometryNoise);
graph.print("\nFactor Graph:\n"); // print
graph.print("\nFactor Graph:\n"); // print
Values initial;
initial.insert(1, Pose2(0.5, 0.0, 0.2));
initial.insert(2, Pose2(2.3, 0.1, -0.2));
initial.insert(3, Pose2(4.1, 0.1, 0.1));
initial.print("\nInitial Estimate:\n"); // print
initial.print("\nInitial Estimate:\n"); // print
// optimize using Levenberg-Marquardt optimization
LevenbergMarquardtParams params;
// In order to specify the ordering type, we need to se the NonlinearOptimizerParameter "orderingType"
// By default this parameter is set to OrderingType::COLAMD
// In order to specify the ordering type, we need to se the "orderingType". By
// default this parameter is set to OrderingType::COLAMD
params.orderingType = Ordering::METIS;
LevenbergMarquardtOptimizer optimizer(graph, initial, params);
Values result = optimizer.optimize();

View File

@ -29,7 +29,6 @@
// have been provided with the library for solving robotics/SLAM/Bundle Adjustment problems.
// Here we will use Between factors for the relative motion described by odometry measurements.
// Also, we will initialize the robot at the origin using a Prior factor.
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
// When the factors are created, we will add them to a Factor Graph. As the factors we are using
@ -57,24 +56,23 @@ using namespace std;
using namespace gtsam;
int main(int argc, char** argv) {
// Create an empty nonlinear factor graph
NonlinearFactorGraph graph;
// Add a prior on the first pose, setting it to the origin
// A prior factor consists of a mean and a noise model (covariance matrix)
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.emplace_shared<PriorFactor<Pose2> >(1, priorMean, priorNoise);
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
auto priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.addPrior(1, priorMean, priorNoise);
// Add odometry factors
Pose2 odometry(2.0, 0.0, 0.0);
// For simplicity, we will use the same noise model for each odometry factor
noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
auto odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
// Create odometry (Between) factors between consecutive poses
graph.emplace_shared<BetweenFactor<Pose2> >(1, 2, odometry, odometryNoise);
graph.emplace_shared<BetweenFactor<Pose2> >(2, 3, odometry, odometryNoise);
graph.print("\nFactor Graph:\n"); // print
graph.print("\nFactor Graph:\n"); // print
// Create the data structure to hold the initialEstimate estimate to the solution
// For illustrative purposes, these have been deliberately set to incorrect values
@ -82,7 +80,7 @@ int main(int argc, char** argv) {
initial.insert(1, Pose2(0.5, 0.0, 0.2));
initial.insert(2, Pose2(2.3, 0.1, -0.2));
initial.insert(3, Pose2(4.1, 0.1, 0.1));
initial.print("\nInitial Estimate:\n"); // print
initial.print("\nInitial Estimate:\n"); // print
// optimize using Levenberg-Marquardt optimization
Values result = LevenbergMarquardtOptimizer(graph, initial).optimize();

View File

@ -40,7 +40,6 @@
// Here we will use a RangeBearing factor for the range-bearing measurements to identified
// landmarks, and Between factors for the relative motion described by odometry measurements.
// Also, we will initialize the robot at the origin using a Prior factor.
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/sam/BearingRangeFactor.h>
@ -70,35 +69,36 @@ using namespace std;
using namespace gtsam;
int main(int argc, char** argv) {
// Create a factor graph
NonlinearFactorGraph graph;
// Create the keys we need for this simple example
static Symbol x1('x',1), x2('x',2), x3('x',3);
static Symbol l1('l',1), l2('l',2);
static Symbol x1('x', 1), x2('x', 2), x3('x', 3);
static Symbol l1('l', 1), l2('l', 2);
// Add a prior on pose x1 at the origin. A prior factor consists of a mean and a noise model (covariance matrix)
Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta
graph.emplace_shared<PriorFactor<Pose2> >(x1, prior, priorNoise); // add directly to graph
// Add a prior on pose x1 at the origin. A prior factor consists of a mean and
// a noise model (covariance matrix)
Pose2 prior(0.0, 0.0, 0.0); // prior mean is at origin
auto priorNoise = noiseModel::Diagonal::Sigmas(
Vector3(0.3, 0.3, 0.1)); // 30cm std on x,y, 0.1 rad on theta
graph.addPrior(x1, prior, priorNoise); // add directly to graph
// Add two odometry factors
Pose2 odometry(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case)
noiseModel::Diagonal::shared_ptr odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1)); // 20cm std on x,y, 0.1 rad on theta
Pose2 odometry(2.0, 0.0, 0.0);
// create a measurement for both factors (the same in this case)
auto odometryNoise = noiseModel::Diagonal::Sigmas(
Vector3(0.2, 0.2, 0.1)); // 20cm std on x,y, 0.1 rad on theta
graph.emplace_shared<BetweenFactor<Pose2> >(x1, x2, odometry, odometryNoise);
graph.emplace_shared<BetweenFactor<Pose2> >(x2, x3, odometry, odometryNoise);
// Add Range-Bearing measurements to two different landmarks
// create a noise model for the landmark measurements
noiseModel::Diagonal::shared_ptr measurementNoise = noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.2)); // 0.1 rad std on bearing, 20cm on range
auto measurementNoise = noiseModel::Diagonal::Sigmas(
Vector2(0.1, 0.2)); // 0.1 rad std on bearing, 20cm on range
// create the measurement values - indices are (pose id, landmark id)
Rot2 bearing11 = Rot2::fromDegrees(45),
bearing21 = Rot2::fromDegrees(90),
Rot2 bearing11 = Rot2::fromDegrees(45), bearing21 = Rot2::fromDegrees(90),
bearing32 = Rot2::fromDegrees(90);
double range11 = std::sqrt(4.0+4.0),
range21 = 2.0,
range32 = 2.0;
double range11 = std::sqrt(4.0 + 4.0), range21 = 2.0, range32 = 2.0;
// Add Bearing-Range factors
graph.emplace_shared<BearingRangeFactor<Pose2, Point2> >(x1, l1, bearing11, range11, measurementNoise);
@ -111,7 +111,7 @@ int main(int argc, char** argv) {
// Create (deliberately inaccurate) initial estimate
Values initialEstimate;
initialEstimate.insert(x1, Pose2(0.5, 0.0, 0.2));
initialEstimate.insert(x2, Pose2(2.3, 0.1,-0.2));
initialEstimate.insert(x2, Pose2(2.3, 0.1, -0.2));
initialEstimate.insert(x3, Pose2(4.1, 0.1, 0.1));
initialEstimate.insert(l1, Point2(1.8, 2.1));
initialEstimate.insert(l2, Point2(4.1, 1.8));
@ -139,4 +139,3 @@ int main(int argc, char** argv) {
return 0;
}

View File

@ -36,7 +36,6 @@
// Here we will use Between factors for the relative motion described by odometry measurements.
// We will also use a Between Factor to encode the loop closure constraint
// Also, we will initialize the robot at the origin using a Prior factor.
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
// When the factors are created, we will add them to a Factor Graph. As the factors we are using
@ -65,21 +64,20 @@ using namespace std;
using namespace gtsam;
int main(int argc, char** argv) {
// 1. Create a factor graph container and add factors to it
NonlinearFactorGraph graph;
// 2a. Add a prior on the first pose, setting it to the origin
// A prior factor consists of a mean and a noise model (covariance matrix)
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.emplace_shared<PriorFactor<Pose2> >(1, Pose2(0, 0, 0), priorNoise);
auto priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.addPrior(1, Pose2(0, 0, 0), priorNoise);
// For simplicity, we will use the same noise model for odometry and loop closures
noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
auto model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
// 2b. Add odometry factors
// Create odometry (Between) factors between consecutive poses
graph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(2, 0, 0 ), model);
graph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(2, 0, 0), model);
graph.emplace_shared<BetweenFactor<Pose2> >(2, 3, Pose2(2, 0, M_PI_2), model);
graph.emplace_shared<BetweenFactor<Pose2> >(3, 4, Pose2(2, 0, M_PI_2), model);
graph.emplace_shared<BetweenFactor<Pose2> >(4, 5, Pose2(2, 0, M_PI_2), model);
@ -89,17 +87,17 @@ int main(int argc, char** argv) {
// these constraints may be identified in many ways, such as appearance-based techniques
// with camera images. We will use another Between Factor to enforce this constraint:
graph.emplace_shared<BetweenFactor<Pose2> >(5, 2, Pose2(2, 0, M_PI_2), model);
graph.print("\nFactor Graph:\n"); // print
graph.print("\nFactor Graph:\n"); // print
// 3. Create the data structure to hold the initialEstimate estimate to the solution
// For illustrative purposes, these have been deliberately set to incorrect values
Values initialEstimate;
initialEstimate.insert(1, Pose2(0.5, 0.0, 0.2 ));
initialEstimate.insert(2, Pose2(2.3, 0.1, -0.2 ));
initialEstimate.insert(3, Pose2(4.1, 0.1, M_PI_2));
initialEstimate.insert(4, Pose2(4.0, 2.0, M_PI ));
initialEstimate.insert(1, Pose2(0.5, 0.0, 0.2));
initialEstimate.insert(2, Pose2(2.3, 0.1, -0.2));
initialEstimate.insert(3, Pose2(4.1, 0.1, M_PI_2));
initialEstimate.insert(4, Pose2(4.0, 2.0, M_PI));
initialEstimate.insert(5, Pose2(2.1, 2.1, -M_PI_2));
initialEstimate.print("\nInitial Estimate:\n"); // print
initialEstimate.print("\nInitial Estimate:\n"); // print
// 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
// The optimizer accepts an optional set of configuration parameters,

View File

@ -21,7 +21,6 @@
#include <gtsam/nonlinear/ExpressionFactorGraph.h>
// For an explanation of headers below, please see Pose2SLAMExample.cpp
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
@ -31,7 +30,6 @@ using namespace std;
using namespace gtsam;
int main(int argc, char** argv) {
// 1. Create a factor graph container and add factors to it
ExpressionFactorGraph graph;
@ -39,31 +37,32 @@ int main(int argc, char** argv) {
Pose2_ x1(1), x2(2), x3(3), x4(4), x5(5);
// 2a. Add a prior on the first pose, setting it to the origin
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
auto priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.addExpressionFactor(x1, Pose2(0, 0, 0), priorNoise);
// For simplicity, we will use the same noise model for odometry and loop closures
noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
// For simplicity, we use the same noise model for odometry and loop closures
auto model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
// 2b. Add odometry factors
graph.addExpressionFactor(between(x1,x2), Pose2(2, 0, 0 ), model);
graph.addExpressionFactor(between(x2,x3), Pose2(2, 0, M_PI_2), model);
graph.addExpressionFactor(between(x3,x4), Pose2(2, 0, M_PI_2), model);
graph.addExpressionFactor(between(x4,x5), Pose2(2, 0, M_PI_2), model);
graph.addExpressionFactor(between(x1, x2), Pose2(2, 0, 0), model);
graph.addExpressionFactor(between(x2, x3), Pose2(2, 0, M_PI_2), model);
graph.addExpressionFactor(between(x3, x4), Pose2(2, 0, M_PI_2), model);
graph.addExpressionFactor(between(x4, x5), Pose2(2, 0, M_PI_2), model);
// 2c. Add the loop closure constraint
graph.addExpressionFactor(between(x5,x2), Pose2(2, 0, M_PI_2), model);
graph.print("\nFactor Graph:\n"); // print
graph.addExpressionFactor(between(x5, x2), Pose2(2, 0, M_PI_2), model);
graph.print("\nFactor Graph:\n"); // print
// 3. Create the data structure to hold the initialEstimate estimate to the solution
// For illustrative purposes, these have been deliberately set to incorrect values
// 3. Create the data structure to hold the initialEstimate estimate to the
// solution For illustrative purposes, these have been deliberately set to
// incorrect values
Values initialEstimate;
initialEstimate.insert(1, Pose2(0.5, 0.0, 0.2 ));
initialEstimate.insert(2, Pose2(2.3, 0.1, -0.2 ));
initialEstimate.insert(3, Pose2(4.1, 0.1, M_PI_2));
initialEstimate.insert(4, Pose2(4.0, 2.0, M_PI ));
initialEstimate.insert(1, Pose2(0.5, 0.0, 0.2));
initialEstimate.insert(2, Pose2(2.3, 0.1, -0.2));
initialEstimate.insert(3, Pose2(4.1, 0.1, M_PI_2));
initialEstimate.insert(4, Pose2(4.0, 2.0, M_PI));
initialEstimate.insert(5, Pose2(2.1, 2.1, -M_PI_2));
initialEstimate.print("\nInitial Estimate:\n"); // print
initialEstimate.print("\nInitial Estimate:\n"); // print
// 4. Optimize the initial values using a Gauss-Newton nonlinear optimizer
GaussNewtonParams parameters;

View File

@ -19,7 +19,6 @@
*/
#include <gtsam/slam/dataset.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
#include <fstream>
@ -29,60 +28,60 @@ using namespace gtsam;
// HOWTO: ./Pose2SLAMExample_g2o inputFile outputFile (maxIterations) (tukey/huber)
int main(const int argc, const char *argv[]) {
string kernelType = "none";
int maxIterations = 100; // default
string g2oFile = findExampleDataFile("noisyToyGraph.txt"); // default
int maxIterations = 100; // default
string g2oFile = findExampleDataFile("noisyToyGraph.txt"); // default
// Parse user's inputs
if (argc > 1){
g2oFile = argv[1]; // input dataset filename
// outputFile = g2oFile = argv[2]; // done later
if (argc > 1) {
g2oFile = argv[1]; // input dataset filename
}
if (argc > 3){
maxIterations = atoi(argv[3]); // user can specify either tukey or huber
if (argc > 3) {
maxIterations = atoi(argv[3]); // user can specify either tukey or huber
}
if (argc > 4){
kernelType = argv[4]; // user can specify either tukey or huber
if (argc > 4) {
kernelType = argv[4]; // user can specify either tukey or huber
}
// reading file and creating factor graph
NonlinearFactorGraph::shared_ptr graph;
Values::shared_ptr initial;
bool is3D = false;
if(kernelType.compare("none") == 0){
boost::tie(graph, initial) = readG2o(g2oFile,is3D);
if (kernelType.compare("none") == 0) {
boost::tie(graph, initial) = readG2o(g2oFile, is3D);
}
if(kernelType.compare("huber") == 0){
if (kernelType.compare("huber") == 0) {
std::cout << "Using robust kernel: huber " << std::endl;
boost::tie(graph, initial) = readG2o(g2oFile,is3D, KernelFunctionTypeHUBER);
boost::tie(graph, initial) =
readG2o(g2oFile, is3D, KernelFunctionTypeHUBER);
}
if(kernelType.compare("tukey") == 0){
if (kernelType.compare("tukey") == 0) {
std::cout << "Using robust kernel: tukey " << std::endl;
boost::tie(graph, initial) = readG2o(g2oFile,is3D, KernelFunctionTypeTUKEY);
boost::tie(graph, initial) =
readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY);
}
// Add prior on the pose having index (key) = 0
NonlinearFactorGraph graphWithPrior = *graph;
noiseModel::Diagonal::shared_ptr priorModel = //
auto priorModel = //
noiseModel::Diagonal::Variances(Vector3(1e-6, 1e-6, 1e-8));
graphWithPrior.add(PriorFactor<Pose2>(0, Pose2(), priorModel));
graph->addPrior(0, Pose2(), priorModel);
std::cout << "Adding prior on pose 0 " << std::endl;
GaussNewtonParams params;
params.setVerbosity("TERMINATION");
if (argc > 3) {
params.maxIterations = maxIterations;
std::cout << "User required to perform maximum " << params.maxIterations << " iterations "<< std::endl;
std::cout << "User required to perform maximum " << params.maxIterations
<< " iterations " << std::endl;
}
std::cout << "Optimizing the factor graph" << std::endl;
GaussNewtonOptimizer optimizer(graphWithPrior, *initial, params);
GaussNewtonOptimizer optimizer(*graph, *initial, params);
Values result = optimizer.optimize();
std::cout << "Optimization complete" << std::endl;
std::cout << "initial error=" <<graph->error(*initial)<< std::endl;
std::cout << "final error=" <<graph->error(result)<< std::endl;
std::cout << "initial error=" << graph->error(*initial) << std::endl;
std::cout << "final error=" << graph->error(result) << std::endl;
if (argc < 3) {
result.print("result");

View File

@ -17,7 +17,6 @@
*/
// For an explanation of headers below, please see Pose2SLAMExample.cpp
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
@ -43,7 +42,7 @@ int main (int argc, char** argv) {
// Add a Gaussian prior on first poses
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.01, 0.01, 0.01));
graph->push_back(PriorFactor<Pose2>(0, priorMean, priorNoise));
graph -> addPrior(0, priorMean, priorNoise);
// Single Step Optimization using Levenberg-Marquardt
Values result = LevenbergMarquardtOptimizer(*graph, *initial).optimize();

View File

@ -17,7 +17,6 @@
*/
// For an explanation of headers below, please see Pose2SLAMExample.cpp
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
@ -26,20 +25,20 @@
using namespace std;
using namespace gtsam;
int main (int argc, char** argv) {
int main(int argc, char** argv) {
// 1. Create a factor graph container and add factors to it
NonlinearFactorGraph graph;
// 2a. Add a prior on the first pose, setting it to the origin
noiseModel::Diagonal::shared_ptr priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.emplace_shared<PriorFactor<Pose2> >(1, Pose2(0, 0, 0), priorNoise);
auto priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
graph.addPrior(1, Pose2(0, 0, 0), priorNoise);
// For simplicity, we will use the same noise model for odometry and loop closures
noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
// For simplicity, we will use the same noise model for odometry and loop
// closures
auto model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
// 2b. Add odometry factors
graph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(2, 0, 0 ), model);
graph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(2, 0, 0), model);
graph.emplace_shared<BetweenFactor<Pose2> >(2, 3, Pose2(2, 0, M_PI_2), model);
graph.emplace_shared<BetweenFactor<Pose2> >(3, 4, Pose2(2, 0, M_PI_2), model);
graph.emplace_shared<BetweenFactor<Pose2> >(4, 5, Pose2(2, 0, M_PI_2), model);
@ -50,10 +49,10 @@ int main (int argc, char** argv) {
// 3. Create the data structure to hold the initial estimate to the solution
// For illustrative purposes, these have been deliberately set to incorrect values
Values initial;
initial.insert(1, Pose2(0.5, 0.0, 0.2 ));
initial.insert(2, Pose2(2.3, 0.1, -0.2 ));
initial.insert(3, Pose2(4.1, 0.1, M_PI_2));
initial.insert(4, Pose2(4.0, 2.0, M_PI ));
initial.insert(1, Pose2(0.5, 0.0, 0.2));
initial.insert(2, Pose2(2.3, 0.1, -0.2));
initial.insert(3, Pose2(4.1, 0.1, M_PI_2));
initial.insert(4, Pose2(4.0, 2.0, M_PI));
initial.insert(5, Pose2(2.1, 2.1, -M_PI_2));
// Single Step Optimization using Levenberg-Marquardt

Some files were not shown because too many files have changed in this diff Show More