Merge branch 'develop' into fix-1496
commit
ecd6450b5b
|
|
@ -9,14 +9,13 @@ set -x -e
|
|||
# install TBB with _debug.so files
|
||||
function install_tbb()
|
||||
{
|
||||
echo install_tbb
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
sudo apt-get -y install libtbb-dev
|
||||
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
brew install tbb
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
if [ -z ${PYTHON_VERSION+x} ]; then
|
||||
|
|
@ -37,19 +36,18 @@ function install_dependencies()
|
|||
|
||||
export PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
|
||||
|
||||
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
|
||||
|
||||
$PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
|
||||
if [ "${GTSAM_WITH_TBB:-OFF}" == "ON" ]; then
|
||||
install_tbb
|
||||
fi
|
||||
}
|
||||
|
||||
function build()
|
||||
{
|
||||
mkdir $GITHUB_WORKSPACE/build
|
||||
cd $GITHUB_WORKSPACE/build
|
||||
|
||||
export CMAKE_GENERATOR=Ninja
|
||||
BUILD_PYBIND="ON"
|
||||
|
||||
cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
cmake $GITHUB_WORKSPACE \
|
||||
-B build \
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
-DGTSAM_BUILD_TESTS=OFF \
|
||||
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
|
||||
-DGTSAM_USE_QUATERNIONS=OFF \
|
||||
|
|
@ -65,16 +63,16 @@ function build()
|
|||
|
||||
|
||||
# Set to 2 cores so that Actions does not error out during resource provisioning.
|
||||
make -j2 install
|
||||
cmake --build build -j2
|
||||
|
||||
cd $GITHUB_WORKSPACE/build/python
|
||||
$PYTHON -m pip install --user .
|
||||
$PYTHON -m pip install --user build/python
|
||||
}
|
||||
|
||||
function test()
|
||||
{
|
||||
cd $GITHUB_WORKSPACE/python/gtsam/tests
|
||||
$PYTHON -m unittest discover -v
|
||||
cd $GITHUB_WORKSPACE
|
||||
}
|
||||
|
||||
# select between build or test
|
||||
|
|
|
|||
|
|
@ -5,33 +5,30 @@
|
|||
# Specifically Linux and macOS.
|
||||
##########################################################
|
||||
|
||||
set -e # Make sure any error makes the script to return an error code
|
||||
set -x # echo
|
||||
|
||||
# install TBB with _debug.so files
|
||||
function install_tbb()
|
||||
{
|
||||
echo install_tbb
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
sudo apt-get -y install libtbb-dev
|
||||
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
brew install tbb
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# common tasks before either build or test
|
||||
function configure()
|
||||
{
|
||||
set -e # Make sure any error makes the script to return an error code
|
||||
set -x # echo
|
||||
# delete old build
|
||||
rm -rf build
|
||||
|
||||
SOURCE_DIR=$GITHUB_WORKSPACE
|
||||
BUILD_DIR=$GITHUB_WORKSPACE/build
|
||||
|
||||
#env
|
||||
rm -fr $BUILD_DIR || true
|
||||
mkdir $BUILD_DIR && cd $BUILD_DIR
|
||||
|
||||
[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
|
||||
if [ "${GTSAM_WITH_TBB:-OFF}" == "ON" ]; then
|
||||
install_tbb
|
||||
fi
|
||||
|
||||
if [ ! -z "$GCC_VERSION" ]; then
|
||||
export CC=gcc-$GCC_VERSION
|
||||
|
|
@ -40,7 +37,9 @@ function configure()
|
|||
|
||||
# GTSAM_BUILD_WITH_MARCH_NATIVE=OFF: to avoid crashes in builder VMs
|
||||
# CMAKE_CXX_FLAGS="-w": Suppress warnings to avoid IO latency in CI logs
|
||||
cmake $SOURCE_DIR \
|
||||
export CMAKE_GENERATOR=Ninja
|
||||
cmake $GITHUB_WORKSPACE \
|
||||
-B build \
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Debug} \
|
||||
-DCMAKE_CXX_FLAGS="-w" \
|
||||
-DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \
|
||||
|
|
@ -62,9 +61,9 @@ function configure()
|
|||
function finish ()
|
||||
{
|
||||
# Print ccache stats
|
||||
[ -x "$(command -v ccache)" ] && ccache -s
|
||||
|
||||
cd $SOURCE_DIR
|
||||
if [ -x "$(command -v ccache)" ]; then
|
||||
ccache -s
|
||||
fi
|
||||
}
|
||||
|
||||
# compile the code with the intent of populating the cache
|
||||
|
|
@ -77,12 +76,12 @@ function build ()
|
|||
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
if (($(nproc) > 2)); then
|
||||
make -j4
|
||||
cmake --build build -j4
|
||||
else
|
||||
make -j2
|
||||
cmake --build build -j2
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu)
|
||||
cmake --build build -j$(sysctl -n hw.physicalcpu)
|
||||
fi
|
||||
|
||||
finish
|
||||
|
|
@ -99,12 +98,12 @@ function test ()
|
|||
# Actual testing
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
if (($(nproc) > 2)); then
|
||||
make -j$(nproc) check
|
||||
cmake --build build -j$(nproc) --target check
|
||||
else
|
||||
make -j2 check
|
||||
cmake --build build -j2 --target check
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu) check
|
||||
cmake --build build -j$(sysctl -n hw.physicalcpu) --target check
|
||||
fi
|
||||
|
||||
finish
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ jobs:
|
|||
fi
|
||||
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev ninja-build
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ jobs:
|
|||
fi
|
||||
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libboost-all-dev ninja-build
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
|
@ -109,10 +109,13 @@ jobs:
|
|||
with:
|
||||
swap-size-gb: 6
|
||||
|
||||
- name: Install Dependencies
|
||||
- name: Install System Dependencies
|
||||
run: |
|
||||
bash .github/scripts/python.sh -d
|
||||
|
||||
- name: Install Python Dependencies
|
||||
run: python$PYTHON_VERSION -m pip install -r python/dev_requirements.txt
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash .github/scripts/python.sh -b
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ jobs:
|
|||
sudo add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main"
|
||||
fi
|
||||
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev
|
||||
sudo apt-get -y install cmake build-essential pkg-config libpython3-dev python3-numpy libicu-dev ninja-build
|
||||
|
||||
if [ "${{ matrix.compiler }}" = "gcc" ]; then
|
||||
sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ jobs:
|
|||
Debug,
|
||||
Release
|
||||
]
|
||||
|
||||
build_unstable: [ON]
|
||||
include:
|
||||
- name: windows-2019-cl
|
||||
|
|
@ -93,10 +94,23 @@ jobs:
|
|||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup msbuild
|
||||
uses: ilammy/msvc-dev-cmd@v1
|
||||
with:
|
||||
arch: x${{ matrix.platform }}
|
||||
|
||||
- name: Configuration
|
||||
shell: bash
|
||||
run: |
|
||||
export CMAKE_GENERATOR=Ninja
|
||||
cmake -E remove_directory build
|
||||
cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT}\lib"
|
||||
cmake -B build \
|
||||
-S . \
|
||||
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
|
||||
-DGTSAM_ALLOW_DEPRECATED_SINCE_V43=OFF \
|
||||
-DBOOST_ROOT="${BOOST_ROOT}" \
|
||||
-DBOOST_INCLUDEDIR="${BOOST_ROOT}\boost\include" \
|
||||
-DBOOST_LIBRARYDIR="${BOOST_ROOT}\lib"
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
|
|
@ -106,9 +120,6 @@ jobs:
|
|||
cmake --build build -j4 --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
|
||||
# Target doesn't exist
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target wrap
|
||||
|
||||
- name: Test
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -116,50 +127,33 @@ jobs:
|
|||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.basis
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.discrete
|
||||
|
||||
# Compilation error
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.geometry
|
||||
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.geometry
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.inference
|
||||
|
||||
# Compile. Fail with exception
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.linear
|
||||
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.navigation
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.sam
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.sfm
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.symbolic
|
||||
|
||||
# Compile. Fail with exception
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.hybrid
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.nonlinear
|
||||
|
||||
# Compilation error
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.slam
|
||||
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.nonlinear
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.slam
|
||||
|
||||
# Run GTSAM_UNSTABLE tests
|
||||
cmake --build build -j4 --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.geometry_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.linear_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.discrete_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.dynamics_unstable
|
||||
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.nonlinear_unstable
|
||||
|
||||
# Compilation error
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.slam_unstable
|
||||
|
||||
# Compilation error
|
||||
# Compile. Fail with exception
|
||||
# cmake --build build -j4 --config ${{ matrix.build_type }} --target check.partition
|
||||
|
||||
# Run all tests
|
||||
# cmake --build build -j1 --config ${{ matrix.build_type }} --target check
|
||||
|
|
|
|||
|
|
@ -1,11 +1,5 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
# new feature to Cmake Version > 2.8.12
|
||||
# Mac ONLY. Define Relative Path on Mac OS
|
||||
if(NOT DEFINED CMAKE_MACOSX_RPATH)
|
||||
set(CMAKE_MACOSX_RPATH 0)
|
||||
endif()
|
||||
|
||||
# Set the version number for the library
|
||||
set (GTSAM_VERSION_MAJOR 4)
|
||||
set (GTSAM_VERSION_MINOR 3)
|
||||
|
|
@ -13,18 +7,16 @@ set (GTSAM_VERSION_PATCH 0)
|
|||
set (GTSAM_PRERELEASE_VERSION "a0")
|
||||
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
||||
|
||||
if (${GTSAM_VERSION_PATCH} EQUAL 0)
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}${GTSAM_PRERELEASE_VERSION}")
|
||||
if ("${GTSAM_PRERELEASE_VERSION}" STREQUAL "")
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
else()
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}${GTSAM_PRERELEASE_VERSION}")
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}${GTSAM_PRERELEASE_VERSION}")
|
||||
endif()
|
||||
|
||||
project(GTSAM
|
||||
LANGUAGES CXX C
|
||||
VERSION "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
|
||||
message(STATUS "GTSAM Version: ${GTSAM_VERSION_STRING}")
|
||||
|
||||
set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
|
||||
set (CMAKE_PROJECT_VERSION_MINOR ${GTSAM_VERSION_MINOR})
|
||||
set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH})
|
||||
|
|
@ -44,13 +36,6 @@ set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake"
|
|||
include(GtsamMakeConfigFile)
|
||||
include(GNUInstallDirs)
|
||||
|
||||
# Load build type flags and default to Debug mode
|
||||
include(GtsamBuildTypes)
|
||||
|
||||
# Use macros for creating tests/timing scripts
|
||||
include(GtsamTesting)
|
||||
include(GtsamPrinting)
|
||||
|
||||
# guard against in-source builds
|
||||
if(${GTSAM_SOURCE_DIR} STREQUAL ${GTSAM_BINARY_DIR})
|
||||
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
|
||||
|
|
@ -58,6 +43,13 @@ endif()
|
|||
|
||||
include(cmake/HandleGeneralOptions.cmake) # CMake build options
|
||||
|
||||
# Load build type flags and default to Debug mode
|
||||
include(GtsamBuildTypes)
|
||||
|
||||
# Use macros for creating tests/timing scripts
|
||||
include(GtsamTesting)
|
||||
include(GtsamPrinting)
|
||||
|
||||
############### Decide on BOOST ######################################
|
||||
# Enable or disable serialization with GTSAM_ENABLE_BOOST_SERIALIZATION
|
||||
option(GTSAM_ENABLE_BOOST_SERIALIZATION "Enable Boost serialization" ON)
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ Here are some tips to get the best possible performance out of GTSAM.
|
|||
optimization by 30-50%. Please note that this may not be true for very small
|
||||
problems where the overhead of dispatching work to multiple threads outweighs
|
||||
the benefit. We recommend that you benchmark your problem with/without TBB.
|
||||
3. Add `-march=native` to `GTSAM_CMAKE_CXX_FLAGS`. A performance gain of
|
||||
3. Use `GTSAM_BUILD_WITH_MARCH_NATIVE`. A performance gain of
|
||||
25-30% can be expected on modern processors. Note that this affects the portability
|
||||
of your executable. It may not run when copied to another system with older/different
|
||||
processor architecture.
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
**As of January 2023, the `develop` branch is officially in "Pre 4.3" mode. We envision several API-breaking changes as we switch to C++17 and away from boost.**
|
||||
|
||||
In addition, features deprecated in 4.2 will be removed. Please use the last [4.2a8 release](https://github.com/borglab/gtsam/releases/tag/4.2a8) if you need those features. However, most are easily converted and can be tracked down (in 4.2) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42`.
|
||||
In addition, features deprecated in 4.2 will be removed. Please use the stable [4.2 release](https://github.com/borglab/gtsam/releases/tag/4.2) if you need those features. However, most are easily converted and can be tracked down (in 4.2) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V42`.
|
||||
|
||||
## What is GTSAM?
|
||||
|
||||
|
|
|
|||
|
|
@ -55,9 +55,6 @@ if(NOT CMAKE_BUILD_TYPE AND NOT MSVC AND NOT XCODE_VERSION)
|
|||
"Choose the type of build, options are: None Debug Release Timing Profiling RelWithDebInfo." FORCE)
|
||||
endif()
|
||||
|
||||
# Add option for using build type postfixes to allow installing multiple build modes
|
||||
option(GTSAM_BUILD_TYPE_POSTFIXES "Enable/Disable appending the build type to the name of compiled libraries" ON)
|
||||
|
||||
# Define all cache variables, to be populated below depending on the OS/compiler:
|
||||
set(GTSAM_COMPILE_OPTIONS_PRIVATE "" CACHE INTERNAL "(Do not edit) Private compiler flags for all build configurations." FORCE)
|
||||
set(GTSAM_COMPILE_OPTIONS_PUBLIC "" CACHE INTERNAL "(Do not edit) Public compiler flags (exported to user projects) for all build configurations." FORCE)
|
||||
|
|
@ -82,6 +79,13 @@ set(GTSAM_COMPILE_DEFINITIONS_PRIVATE_RELWITHDEBINFO "NDEBUG" CACHE STRING "(Us
|
|||
set(GTSAM_COMPILE_DEFINITIONS_PRIVATE_RELEASE "NDEBUG" CACHE STRING "(User editable) Private preprocessor macros for Release configuration.")
|
||||
set(GTSAM_COMPILE_DEFINITIONS_PRIVATE_PROFILING "NDEBUG" CACHE STRING "(User editable) Private preprocessor macros for Profiling configuration.")
|
||||
set(GTSAM_COMPILE_DEFINITIONS_PRIVATE_TIMING "NDEBUG;ENABLE_TIMING" CACHE STRING "(User editable) Private preprocessor macros for Timing configuration.")
|
||||
|
||||
mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_DEBUG)
|
||||
mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_RELWITHDEBINFO)
|
||||
mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_RELEASE)
|
||||
mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_PROFILING)
|
||||
mark_as_advanced(GTSAM_COMPILE_DEFINITIONS_PRIVATE_TIMING)
|
||||
|
||||
if(MSVC)
|
||||
# Common to all configurations:
|
||||
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE
|
||||
|
|
@ -143,6 +147,13 @@ else()
|
|||
set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING -g -O3 CACHE STRING "(User editable) Private compiler flags for Timing configuration.")
|
||||
endif()
|
||||
|
||||
mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON)
|
||||
mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG)
|
||||
mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_RELWITHDEBINFO)
|
||||
mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_RELEASE)
|
||||
mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_PROFILING)
|
||||
mark_as_advanced(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING)
|
||||
|
||||
# Enable C++17:
|
||||
if (NOT CMAKE_VERSION VERSION_LESS 3.8)
|
||||
set(GTSAM_COMPILE_FEATURES_PUBLIC "cxx_std_17" CACHE STRING "CMake compile features property for all gtsam targets.")
|
||||
|
|
@ -198,7 +209,6 @@ if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
|
|||
endif()
|
||||
|
||||
if (NOT MSVC)
|
||||
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" OFF)
|
||||
if(GTSAM_BUILD_WITH_MARCH_NATIVE)
|
||||
# Check if Apple OS and compiler is [Apple]Clang
|
||||
if(APPLE AND (${CMAKE_CXX_COMPILER_ID} MATCHES "^(Apple)?Clang$"))
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ endmacro()
|
|||
# GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.
|
||||
#
|
||||
# Usage example:
|
||||
# gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib")
|
||||
# gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib" ON)
|
||||
#
|
||||
# Arguments:
|
||||
# globPatterns: The list of files or glob patterns from which to create examples, with
|
||||
|
|
@ -51,8 +51,9 @@ endmacro()
|
|||
# excludedFiles: A list of files or globs to exclude, e.g. "C*.cpp;BrokenExample.cpp". Pass
|
||||
# an empty string "" if nothing needs to be excluded.
|
||||
# linkLibraries: The list of libraries to link to.
|
||||
macro(gtsamAddExamplesGlob globPatterns excludedFiles linkLibraries)
|
||||
gtsamAddExesGlob_impl("${globPatterns}" "${excludedFiles}" "${linkLibraries}" "examples" ${GTSAM_BUILD_EXAMPLES_ALWAYS})
|
||||
# buildWithAll: Build examples with `make` and/or `make all`
|
||||
macro(gtsamAddExamplesGlob globPatterns excludedFiles linkLibraries buildWithAll)
|
||||
gtsamAddExesGlob_impl("${globPatterns}" "${excludedFiles}" "${linkLibraries}" "examples" ${buildWithAll})
|
||||
endmacro()
|
||||
|
||||
|
||||
|
|
@ -76,8 +77,9 @@ endmacro()
|
|||
# excludedFiles: A list of files or globs to exclude, e.g. "C*.cpp;BrokenExample.cpp". Pass
|
||||
# an empty string "" if nothing needs to be excluded.
|
||||
# linkLibraries: The list of libraries to link to.
|
||||
macro(gtsamAddTimingGlob globPatterns excludedFiles linkLibraries)
|
||||
gtsamAddExesGlob_impl("${globPatterns}" "${excludedFiles}" "${linkLibraries}" "timing" ${GTSAM_BUILD_TIMING_ALWAYS})
|
||||
# buildWithAll: Build examples with `make` and/or `make all`
|
||||
macro(gtsamAddTimingGlob globPatterns excludedFiles linkLibraries buildWithAll)
|
||||
gtsamAddExesGlob_impl("${globPatterns}" "${excludedFiles}" "${linkLibraries}" "timing" ${buildWithAll})
|
||||
endmacro()
|
||||
|
||||
|
||||
|
|
@ -86,9 +88,8 @@ endmacro()
|
|||
# Build macros for using tests
|
||||
enable_testing()
|
||||
|
||||
#TODO(Varun) Move to HandlePrintConfiguration.cmake. This will require additional changes.
|
||||
option(GTSAM_BUILD_TESTS "Enable/Disable building of tests" ON)
|
||||
option(GTSAM_BUILD_EXAMPLES_ALWAYS "Build examples with 'make all' (build with 'make examples' if not)" ON)
|
||||
option(GTSAM_BUILD_TIMING_ALWAYS "Build timing scripts with 'make all' (build with 'make timing' if not" OFF)
|
||||
|
||||
# Add option for combining unit tests
|
||||
if(MSVC OR XCODE_VERSION)
|
||||
|
|
@ -123,6 +124,7 @@ add_custom_target(timing)
|
|||
# Implementations of this file's macros:
|
||||
|
||||
macro(gtsamAddTestsGlob_impl groupName globPatterns excludedFiles linkLibraries)
|
||||
#TODO(Varun) Building of tests should not depend on global gtsam flag
|
||||
if(GTSAM_BUILD_TESTS)
|
||||
# Add group target if it doesn't already exist
|
||||
if(NOT TARGET check.${groupName})
|
||||
|
|
|
|||
|
|
@ -8,6 +8,18 @@ else()
|
|||
set(GTSAM_UNSTABLE_AVAILABLE 0)
|
||||
endif()
|
||||
|
||||
### GtsamTesting related options
|
||||
option(GTSAM_BUILD_EXAMPLES_ALWAYS "Build examples with 'make all' (build with 'make examples' if not)" ON)
|
||||
option(GTSAM_BUILD_TIMING_ALWAYS "Build timing scripts with 'make all' (build with 'make timing' if not" OFF)
|
||||
###
|
||||
|
||||
# Add option for using build type postfixes to allow installing multiple build modes
|
||||
option(GTSAM_BUILD_TYPE_POSTFIXES "Enable/Disable appending the build type to the name of compiled libraries" ON)
|
||||
|
||||
if (NOT MSVC)
|
||||
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" OFF)
|
||||
endif()
|
||||
|
||||
# Configurable Options
|
||||
if(GTSAM_UNSTABLE_AVAILABLE)
|
||||
option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON)
|
||||
|
|
|
|||
|
|
@ -47,9 +47,9 @@ linkLibraries: The list of libraries to link to in addition to CppUnitLite.
|
|||
</code></pre>
|
||||
</li>
|
||||
<li>
|
||||
<p><code>gtsamAddExamplesGlob(globPatterns excludedFiles linkLibraries)</code> Add scripts that will serve as examples of how to use the library. A list of files or glob patterns is specified, and one executable will be created for each matching .cpp file. These executables will not be installed. They are build with 'make all' if GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.</p>
|
||||
<p><code>gtsamAddExamplesGlob(globPatterns excludedFiles linkLibraries buildWithAll)</code> Add scripts that will serve as examples of how to use the library. A list of files or glob patterns is specified, and one executable will be created for each matching .cpp file. These executables will not be installed. They are build with 'make all' if GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.</p>
|
||||
<p>Usage example:</p>
|
||||
<pre><code>gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib")
|
||||
<pre><code>gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib" ON)
|
||||
</code></pre>
|
||||
<p>Arguments:</p>
|
||||
<pre><code>globPatterns: The list of files or glob patterns from which to create unit tests, with
|
||||
|
|
@ -58,6 +58,7 @@ linkLibraries: The list of libraries to link to in addition to CppUnitLite.
|
|||
excludedFiles: A list of files or globs to exclude, e.g. "C*.cpp;BrokenExample.cpp". Pass
|
||||
an empty string "" if nothing needs to be excluded.
|
||||
linkLibraries: The list of libraries to link to.
|
||||
buildWithAll: Build examples with `make` and/or `make all`
|
||||
</code></pre>
|
||||
</li>
|
||||
</ul>
|
||||
|
|
|
|||
|
|
@ -52,11 +52,11 @@ Defines two useful functions for creating CTest unit tests. Also immediately cr
|
|||
Pass an empty string "" if nothing needs to be excluded.
|
||||
linkLibraries: The list of libraries to link to in addition to CppUnitLite.
|
||||
|
||||
* `gtsamAddExamplesGlob(globPatterns excludedFiles linkLibraries)` Add scripts that will serve as examples of how to use the library. A list of files or glob patterns is specified, and one executable will be created for each matching .cpp file. These executables will not be installed. They are build with 'make all' if GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.
|
||||
* `gtsamAddExamplesGlob(globPatterns excludedFiles linkLibraries buildWithAll)` Add scripts that will serve as examples of how to use the library. A list of files or glob patterns is specified, and one executable will be created for each matching .cpp file. These executables will not be installed. They are build with 'make all' if GTSAM_BUILD_EXAMPLES_ALWAYS is enabled. They may also be built with 'make examples'.
|
||||
|
||||
Usage example:
|
||||
|
||||
gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib")
|
||||
gtsamAddExamplesGlob("*.cpp" "BrokenExample.cpp" "gtsam;GeographicLib" ON)
|
||||
|
||||
Arguments:
|
||||
|
||||
|
|
@ -66,6 +66,7 @@ Defines two useful functions for creating CTest unit tests. Also immediately cr
|
|||
excludedFiles: A list of files or globs to exclude, e.g. "C*.cpp;BrokenExample.cpp". Pass
|
||||
an empty string "" if nothing needs to be excluded.
|
||||
linkLibraries: The list of libraries to link to.
|
||||
buildWithAll: Build examples with `make` and/or `make all`
|
||||
|
||||
## GtsamMakeConfigFile
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ if (GTSAM_BUILD_DOCS)
|
|||
if (GTSAM_BUILD_UNSTABLE)
|
||||
list(APPEND doc_subdirs ${gtsam_unstable_doc_subdirs})
|
||||
endif()
|
||||
if (GTSAM_BUILD_EXAMPLES)
|
||||
if (GTSAM_BUILD_EXAMPLES_ALWAYS)
|
||||
list(APPEND doc_subdirs examples)
|
||||
endif()
|
||||
|
||||
|
|
|
|||
131
doc/Hybrid.lyx
131
doc/Hybrid.lyx
|
|
@ -86,7 +86,7 @@ Hybrid Inference
|
|||
\end_layout
|
||||
|
||||
\begin_layout Author
|
||||
Frank Dellaert & Varun Agrawal
|
||||
Frank Dellaert
|
||||
\end_layout
|
||||
|
||||
\begin_layout Date
|
||||
|
|
@ -264,12 +264,110 @@ If we take the log of
|
|||
we get
|
||||
\begin_inset Formula
|
||||
\begin{equation}
|
||||
\log P(x|y,m)=\log P_{m}(x|y)=K_{gcm}-E_{gcm}(x,y).\label{eq:gm_log}
|
||||
\log P(x|y,m)=\log P_{m}(x|y)=K_{gc}(m)-E_{gcm}(x,y).\label{eq:gm_log}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
Equating
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\noindent
|
||||
For conciseness, we will write
|
||||
\begin_inset Formula $K_{gc}(m)$
|
||||
\end_inset
|
||||
|
||||
as
|
||||
\begin_inset Formula $K_{gcm}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\SpecialChar allowbreak
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\noindent
|
||||
The key point here is that
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $K_{gm}$
|
||||
\end_inset
|
||||
|
||||
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
is the log-normalization constant for the complete
|
||||
\emph on
|
||||
GaussianMixture
|
||||
\emph default
|
||||
across all values of
|
||||
\begin_inset Formula $m$
|
||||
\end_inset
|
||||
|
||||
, and cannot be dependent on the value of
|
||||
\begin_inset Formula $m$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
In contrast,
|
||||
\begin_inset Formula $K_{gcm}$
|
||||
\end_inset
|
||||
|
||||
is the log-normalization constant for a specific
|
||||
\emph on
|
||||
GaussianConditional
|
||||
\emph default
|
||||
mode (thus dependent on
|
||||
\begin_inset Formula $m$
|
||||
\end_inset
|
||||
|
||||
) and can have differing values based on the covariance matrices for each
|
||||
mode.
|
||||
Thus to obtain a constant
|
||||
\begin_inset Formula $K_{gm}$
|
||||
\end_inset
|
||||
|
||||
which satisfies the invariant, we need to specify
|
||||
\begin_inset Formula $E_{gm}(x,y,m)$
|
||||
\end_inset
|
||||
|
||||
accordingly.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\SpecialChar allowbreak
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\noindent
|
||||
By equating
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand eqref
|
||||
reference "eq:gm_invariant"
|
||||
|
|
@ -289,7 +387,7 @@ noprefix "false"
|
|||
|
||||
\end_inset
|
||||
|
||||
we see that this can be achieved by defining the error
|
||||
, we see that this can be achieved by defining the error
|
||||
\begin_inset Formula $E_{gm}(x,y,m)$
|
||||
\end_inset
|
||||
|
||||
|
|
@ -541,9 +639,9 @@ noprefix "false"
|
|||
|
||||
:
|
||||
\begin_inset Formula
|
||||
\[
|
||||
E_{mf}(y,m)=C+E_{gcm}(\bar{x},y)-K_{gcm}.
|
||||
\]
|
||||
\begin{equation}
|
||||
E_{mf}(y,m)=C+E_{gcm}(\bar{x},y)-K_{gcm}\label{eq:mixture_factor}
|
||||
\end{equation}
|
||||
|
||||
\end_inset
|
||||
|
||||
|
|
@ -555,7 +653,24 @@ Unfortunately, we can no longer choose
|
|||
\begin_inset Formula $m$
|
||||
\end_inset
|
||||
|
||||
to make the constant disappear.
|
||||
to make the constant disappear, since
|
||||
\begin_inset Formula $C$
|
||||
\end_inset
|
||||
|
||||
has to be a constant applicable across all
|
||||
\begin_inset Formula $m$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\SpecialChar allowbreak
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\noindent
|
||||
There are two possibilities:
|
||||
\end_layout
|
||||
|
||||
|
|
|
|||
BIN
doc/Hybrid.pdf
BIN
doc/Hybrid.pdf
Binary file not shown.
|
|
@ -6,9 +6,6 @@ FROM borglab/ubuntu-gtsam:bionic
|
|||
# Install pip
|
||||
RUN apt-get install -y python3-pip python3-dev
|
||||
|
||||
# Install python wrapper requirements
|
||||
RUN python3 -m pip install -U -r /usr/src/gtsam/python/requirements.txt
|
||||
|
||||
# Run cmake again, now with python toolbox on
|
||||
WORKDIR /usr/src/gtsam/build
|
||||
RUN cmake \
|
||||
|
|
|
|||
|
|
@ -20,4 +20,4 @@ if (NOT GTSAM_USE_BOOST_FEATURES)
|
|||
)
|
||||
endif()
|
||||
|
||||
gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam;${Boost_PROGRAM_OPTIONS_LIBRARY}")
|
||||
gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam;${Boost_PROGRAM_OPTIONS_LIBRARY}" ${GTSAM_BUILD_EXAMPLES_ALWAYS})
|
||||
|
|
|
|||
|
|
@ -0,0 +1,75 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GNCExample.cpp
|
||||
* @brief Simple example showcasing a Graduated Non-Convexity based solver
|
||||
* @author Achintya Mohan
|
||||
*/
|
||||
|
||||
/**
|
||||
* A simple 2D pose graph optimization example
|
||||
* - The robot is initially at origin (0.0, 0.0, 0.0)
|
||||
* - We have full odometry measurements for 2 motions
|
||||
* - The robot first moves to (1.0, 0.0, 0.1) and then to (1.0, 1.0, 0.2)
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/nonlinear/GncOptimizer.h>
|
||||
#include <gtsam/nonlinear/GncParams.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtParams.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
int main() {
|
||||
cout << "Graduated Non-Convexity Example\n";
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
// Add a prior to the first point, set to the origin
|
||||
auto priorNoise = noiseModel::Isotropic::Sigma(3, 0.1);
|
||||
graph.addPrior(1, Pose2(0.0, 0.0, 0.0), priorNoise);
|
||||
|
||||
// Add additional factors, noise models must be Gaussian
|
||||
Pose2 x1(1.0, 0.0, 0.1);
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(1, 2, x1, noiseModel::Isotropic::Sigma(3, 0.2));
|
||||
Pose2 x2(0.0, 1.0, 0.1);
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(2, 3, x2, noiseModel::Isotropic::Sigma(3, 0.4));
|
||||
|
||||
// Initial estimates
|
||||
Values initial;
|
||||
initial.insert(1, Pose2(0.2, 0.5, -0.1));
|
||||
initial.insert(2, Pose2(0.8, 0.3, 0.1));
|
||||
initial.insert(3, Pose2(0.8, 0.2, 0.3));
|
||||
|
||||
// Set options for the non-minimal solver
|
||||
LevenbergMarquardtParams lmParams;
|
||||
lmParams.setMaxIterations(1000);
|
||||
lmParams.setRelativeErrorTol(1e-5);
|
||||
|
||||
// Set GNC-specific options
|
||||
GncParams<LevenbergMarquardtParams> gncParams(lmParams);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
|
||||
// Optimize the graph and print results
|
||||
GncOptimizer<GncParams<LevenbergMarquardtParams>> optimizer(graph, initial, gncParams);
|
||||
Values result = optimizer.optimize();
|
||||
result.print("Final Result:");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -112,7 +112,7 @@ if(GTSAM_SUPPORT_NESTED_DISSECTION)
|
|||
endif()
|
||||
|
||||
# Versions
|
||||
set(gtsam_version ${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH})
|
||||
set(gtsam_version ${GTSAM_VERSION_STRING})
|
||||
set(gtsam_soversion ${GTSAM_VERSION_MAJOR})
|
||||
message(STATUS "GTSAM Version: ${gtsam_version}")
|
||||
message(STATUS "Install prefix: ${CMAKE_INSTALL_PREFIX}")
|
||||
|
|
@ -145,10 +145,6 @@ if (GTSAM_USE_EIGEN_MKL)
|
|||
target_include_directories(gtsam PUBLIC ${MKL_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if(GTSAM_USE_TBB)
|
||||
target_include_directories(gtsam PUBLIC ${TBB_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
# Add includes for source directories 'BEFORE' boost and any system include
|
||||
# paths so that the compiler uses GTSAM headers in our source directory instead
|
||||
# of any previously installed GTSAM headers.
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
* @date Feb 7, 2012
|
||||
*/
|
||||
|
||||
#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
|
||||
#pragma once
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
|
@ -270,3 +271,4 @@ void deserializeBinary(const std::string& serialized, T& output,
|
|||
///@}
|
||||
|
||||
} // namespace gtsam
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ namespace gtsam {
|
|||
* @ingroup discrete
|
||||
*/
|
||||
template <typename L>
|
||||
class GTSAM_EXPORT AlgebraicDecisionTree : public DecisionTree<L, double> {
|
||||
class AlgebraicDecisionTree : public DecisionTree<L, double> {
|
||||
/**
|
||||
* @brief Default method used by `labelFormatter` or `valueFormatter` when
|
||||
* printing.
|
||||
|
|
|
|||
|
|
@ -164,15 +164,23 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
// create new factor, note we collect keys that are not in frontalKeys
|
||||
// TODO(frank): why do we need this??? result should contain correct keys!!!
|
||||
/*
|
||||
Due to branch merging, the labels in `result` may be missing some keys
|
||||
E.g. After branch merging, we may get a ADT like:
|
||||
Leaf [2] 1.0204082
|
||||
|
||||
This is missing the key values used for branching.
|
||||
*/
|
||||
KeyVector difference, frontalKeys_(frontalKeys), keys_(keys());
|
||||
// Get the difference of the frontalKeys and the factor keys using set_difference
|
||||
std::sort(keys_.begin(), keys_.end());
|
||||
std::sort(frontalKeys_.begin(), frontalKeys_.end());
|
||||
std::set_difference(keys_.begin(), keys_.end(), frontalKeys_.begin(),
|
||||
frontalKeys_.end(), back_inserter(difference));
|
||||
|
||||
DiscreteKeys dkeys;
|
||||
for (i = 0; i < keys().size(); i++) {
|
||||
Key j = keys()[i];
|
||||
// TODO(frank): inefficient!
|
||||
if (std::find(frontalKeys.begin(), frontalKeys.end(), j) !=
|
||||
frontalKeys.end())
|
||||
continue;
|
||||
dkeys.push_back(DiscreteKey(j, cardinality(j)));
|
||||
for (Key key : difference) {
|
||||
dkeys.push_back(DiscreteKey(key, cardinality(key)));
|
||||
}
|
||||
return std::make_shared<DecisionTreeFactor>(dkeys, result);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -110,4 +110,12 @@ class GTSAM_EXPORT DiscreteBayesTree
|
|||
/// @}
|
||||
};
|
||||
|
||||
/// traits
|
||||
template <>
|
||||
struct traits<DiscreteBayesTreeClique>
|
||||
: public Testable<DiscreteBayesTreeClique> {};
|
||||
|
||||
template <>
|
||||
struct traits<DiscreteBayesTree> : public Testable<DiscreteBayesTree> {};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -77,6 +77,18 @@ class GTSAM_EXPORT DiscreteConditional
|
|||
const Signature::Table& table)
|
||||
: DiscreteConditional(Signature(key, parents, table)) {}
|
||||
|
||||
/**
|
||||
* Construct from key, parents, and a vector<double> specifying the
|
||||
* conditional probability table (CPT) in 00 01 10 11 order. For
|
||||
* three-valued, it would be 00 01 02 10 11 12 20 21 22, etc....
|
||||
*
|
||||
* Example: DiscreteConditional P(D, {B,E}, table);
|
||||
*/
|
||||
DiscreteConditional(const DiscreteKey& key, const DiscreteKeys& parents,
|
||||
const std::vector<double>& table)
|
||||
: DiscreteConditional(1, DiscreteKeys{key} & parents,
|
||||
ADT(DiscreteKeys{key} & parents, table)) {}
|
||||
|
||||
/**
|
||||
* Construct from key, parents, and a string specifying the conditional
|
||||
* probability table (CPT) in 00 01 10 11 order. For three-valued, it would
|
||||
|
|
|
|||
|
|
@ -49,6 +49,7 @@ namespace gtsam {
|
|||
|
||||
void DiscreteKeys::print(const std::string& s,
|
||||
const KeyFormatter& keyFormatter) const {
|
||||
std::cout << (s.empty() ? "" : s + " ") << std::endl;
|
||||
for (auto&& dkey : *this) {
|
||||
std::cout << DefaultKeyFormatter(dkey.first) << " " << dkey.second
|
||||
<< std::endl;
|
||||
|
|
|
|||
|
|
@ -74,6 +74,12 @@ namespace gtsam {
|
|||
return *this;
|
||||
}
|
||||
|
||||
/// Add multiple keys (non-const!)
|
||||
DiscreteKeys& operator&(const DiscreteKeys& keys) {
|
||||
this->insert(this->end(), keys.begin(), keys.end());
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// Print the keys and cardinalities.
|
||||
void print(const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
|
|
|||
|
|
@ -280,11 +280,11 @@ class DiscreteLookupDAG {
|
|||
};
|
||||
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
std::pair<gtsam::DiscreteConditional*, gtsam::DecisionTreeFactor*>
|
||||
pair<gtsam::DiscreteConditional*, gtsam::DecisionTreeFactor*>
|
||||
EliminateDiscrete(const gtsam::DiscreteFactorGraph& factors,
|
||||
const gtsam::Ordering& frontalKeys);
|
||||
|
||||
std::pair<gtsam::DiscreteConditional*, gtsam::DecisionTreeFactor*>
|
||||
pair<gtsam::DiscreteConditional*, gtsam::DecisionTreeFactor*>
|
||||
EliminateForMPE(const gtsam::DiscreteFactorGraph& factors,
|
||||
const gtsam::Ordering& frontalKeys);
|
||||
|
||||
|
|
|
|||
|
|
@ -48,6 +48,11 @@ TEST(DiscreteConditional, constructors) {
|
|||
DiscreteConditional actual2(1, f2);
|
||||
DecisionTreeFactor expected2 = f2 / *f2.sum(1);
|
||||
EXPECT(assert_equal(expected2, static_cast<DecisionTreeFactor>(actual2)));
|
||||
|
||||
std::vector<double> probs{0.2, 0.5, 0.3, 0.6, 0.4, 0.7, 0.25, 0.55, 0.35, 0.65, 0.45, 0.75};
|
||||
DiscreteConditional actual3(X, {Y, Z}, probs);
|
||||
DecisionTreeFactor expected3 = f2;
|
||||
EXPECT(assert_equal(expected3, static_cast<DecisionTreeFactor>(actual3)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ TEST_UNSAFE( DiscreteMarginals, truss ) {
|
|||
|
||||
Clique expected0(std::make_shared<DiscreteConditional>((key[0] | key[2], key[4]) = "2/1 2/1 2/1 2/1"));
|
||||
Clique::shared_ptr actual0 = (*bayesTree)[0];
|
||||
// EXPECT(assert_equal(expected0, *actual0)); // TODO, correct but fails
|
||||
EXPECT(assert_equal(expected0, *actual0));
|
||||
|
||||
Clique expected1(std::make_shared<DiscreteConditional>((key[1] | key[3], key[4]) = "1/2 1/2 1/2 1/2"));
|
||||
Clique::shared_ptr actual1 = (*bayesTree)[1];
|
||||
|
|
|
|||
|
|
@ -131,12 +131,15 @@ class CameraSet : public std::vector<CAMERA, Eigen::aligned_allocator<CAMERA>> {
|
|||
return z;
|
||||
}
|
||||
|
||||
/** An overload o the project2 function to accept
|
||||
/** An overload of the project2 function to accept
|
||||
* full matrices and vectors and pass it to the pointer
|
||||
* version of the function
|
||||
* version of the function.
|
||||
*
|
||||
* Use SFINAE to resolve overload ambiguity.
|
||||
*/
|
||||
template <class POINT, class... OptArgs>
|
||||
ZVector project2(const POINT& point, OptArgs&... args) const {
|
||||
typename std::enable_if<(sizeof...(OptArgs) != 0), ZVector>::type project2(
|
||||
const POINT& point, OptArgs&... args) const {
|
||||
// pass it to the pointer version of the function
|
||||
return project2(point, (&args)...);
|
||||
}
|
||||
|
|
@ -435,8 +438,7 @@ class CameraSet : public std::vector<CAMERA, Eigen::aligned_allocator<CAMERA>> {
|
|||
|
||||
// (DxD) += (DxZDim) * ( (ZDimxD) - (ZDimx3) * (3xZDim) * (ZDimxD) )
|
||||
// add contribution of current factor
|
||||
// TODO(gareth): Eigen doesn't let us pass the expression. Call eval() for
|
||||
// now...
|
||||
// Eigen doesn't let us pass the expression so we call eval()
|
||||
augmentedHessian.updateDiagonalBlock(
|
||||
aug_i,
|
||||
((FiT *
|
||||
|
|
|
|||
|
|
@ -332,7 +332,8 @@ public:
|
|||
}
|
||||
|
||||
/// stream operator
|
||||
friend std::ostream& operator<<(std::ostream &os, const PinholePose& camera) {
|
||||
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
|
||||
const PinholePose& camera) {
|
||||
os << "{R: " << camera.pose().rotation().rpy().transpose();
|
||||
os << ", t: " << camera.pose().translation().transpose();
|
||||
if (!camera.K_) os << ", K: none";
|
||||
|
|
|
|||
|
|
@ -74,9 +74,10 @@ class GTSAM_EXPORT Similarity2 : public LieGroup<Similarity2, 4> {
|
|||
bool operator==(const Similarity2& other) const;
|
||||
|
||||
/// Print with optional string
|
||||
void print(const std::string& s) const;
|
||||
void print(const std::string& s = "") const;
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& os, const Similarity2& p);
|
||||
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
|
||||
const Similarity2& p);
|
||||
|
||||
/// @}
|
||||
/// @name Group
|
||||
|
|
|
|||
|
|
@ -171,7 +171,7 @@ Similarity3 Similarity3::Align(const Point3Pairs &abPointPairs) {
|
|||
return internal::align(d_abPointPairs, aRb, centroids);
|
||||
}
|
||||
|
||||
Similarity3 Similarity3::Align(const vector<Pose3Pair> &abPosePairs) {
|
||||
Similarity3 Similarity3::Align(const Pose3Pairs &abPosePairs) {
|
||||
const size_t n = abPosePairs.size();
|
||||
if (n < 2)
|
||||
throw std::runtime_error("input should have at least 2 pairs of poses");
|
||||
|
|
|
|||
|
|
@ -75,9 +75,10 @@ class GTSAM_EXPORT Similarity3 : public LieGroup<Similarity3, 7> {
|
|||
bool operator==(const Similarity3& other) const;
|
||||
|
||||
/// Print with optional string
|
||||
void print(const std::string& s) const;
|
||||
void print(const std::string& s = "") const;
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& os, const Similarity3& p);
|
||||
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
|
||||
const Similarity3& p);
|
||||
|
||||
/// @}
|
||||
/// @name Group
|
||||
|
|
@ -132,7 +133,7 @@ class GTSAM_EXPORT Similarity3 : public LieGroup<Similarity3, 7> {
|
|||
* computed using the algorithm described here:
|
||||
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf
|
||||
*/
|
||||
static Similarity3 Align(const std::vector<Pose3Pair>& abPosePairs);
|
||||
static Similarity3 Align(const Pose3Pairs& abPosePairs);
|
||||
|
||||
/// @}
|
||||
/// @name Lie Group
|
||||
|
|
|
|||
|
|
@ -238,9 +238,9 @@ class GTSAM_EXPORT SphericalCamera {
|
|||
// end of class SphericalCamera
|
||||
|
||||
template <>
|
||||
struct traits<SphericalCamera> : public internal::LieGroup<Pose3> {};
|
||||
struct traits<SphericalCamera> : public internal::Manifold<SphericalCamera> {};
|
||||
|
||||
template <>
|
||||
struct traits<const SphericalCamera> : public internal::LieGroup<Pose3> {};
|
||||
struct traits<const SphericalCamera> : public internal::Manifold<SphericalCamera> {};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -105,7 +105,8 @@ public:
|
|||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& os, const Unit3& pair);
|
||||
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
|
||||
const Unit3& pair);
|
||||
|
||||
/// The print fuction
|
||||
void print(const std::string& s = std::string()) const;
|
||||
|
|
|
|||
|
|
@ -1082,6 +1082,7 @@ class Similarity2 {
|
|||
|
||||
// Standard Interface
|
||||
bool equals(const gtsam::Similarity2& sim, double tol) const;
|
||||
void print(const std::string& s = "") const;
|
||||
Matrix matrix() const;
|
||||
gtsam::Rot2& rotation();
|
||||
gtsam::Point2& translation();
|
||||
|
|
@ -1105,6 +1106,7 @@ class Similarity3 {
|
|||
|
||||
// Standard Interface
|
||||
bool equals(const gtsam::Similarity3& sim, double tol) const;
|
||||
void print(const std::string& s = "") const;
|
||||
Matrix matrix() const;
|
||||
gtsam::Rot3& rotation();
|
||||
gtsam::Point3& translation();
|
||||
|
|
@ -1180,11 +1182,13 @@ class TriangulationParameters {
|
|||
bool enableEPI;
|
||||
double landmarkDistanceThreshold;
|
||||
double dynamicOutlierRejectionThreshold;
|
||||
bool useLOST;
|
||||
gtsam::SharedNoiseModel noiseModel;
|
||||
TriangulationParameters(const double rankTolerance = 1.0,
|
||||
const bool enableEPI = false,
|
||||
double landmarkDistanceThreshold = -1,
|
||||
double dynamicOutlierRejectionThreshold = -1,
|
||||
const bool useLOST = false,
|
||||
const gtsam::SharedNoiseModel& noiseModel = nullptr);
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -24,6 +24,13 @@
|
|||
#include <cmath>
|
||||
#include <iostream>
|
||||
|
||||
#if defined(__i686__) || defined(__i386__)
|
||||
// See issue discussion: https://github.com/borglab/gtsam/issues/1605
|
||||
constexpr double TEST_THRESHOLD = 1e-5;
|
||||
#else
|
||||
constexpr double TEST_THRESHOLD = 1e-7;
|
||||
#endif
|
||||
|
||||
using namespace std::placeholders;
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
|
@ -104,8 +111,8 @@ TEST(SphericalCamera, Dproject) {
|
|||
Matrix numerical_pose = numericalDerivative21(project3, pose, point1);
|
||||
Matrix numerical_point = numericalDerivative22(project3, pose, point1);
|
||||
EXPECT(assert_equal(bearing1, result));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, TEST_THRESHOLD));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, TEST_THRESHOLD));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -123,8 +130,8 @@ TEST(SphericalCamera, reprojectionError) {
|
|||
Matrix numerical_point =
|
||||
numericalDerivative32(reprojectionError2, pose, point1, bearing1);
|
||||
EXPECT(assert_equal(Vector2(0.0, 0.0), result));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, TEST_THRESHOLD));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, TEST_THRESHOLD));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -137,9 +144,9 @@ TEST(SphericalCamera, reprojectionError_noisy) {
|
|||
numericalDerivative31(reprojectionError2, pose, point1, bearing_noisy);
|
||||
Matrix numerical_point =
|
||||
numericalDerivative32(reprojectionError2, pose, point1, bearing_noisy);
|
||||
EXPECT(assert_equal(Vector2(-0.050282, 0.00833482), result, 1e-5));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
EXPECT(assert_equal(Vector2(-0.050282, 0.00833482), result, 1e2*TEST_THRESHOLD));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, TEST_THRESHOLD));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, TEST_THRESHOLD));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -151,8 +158,8 @@ TEST(SphericalCamera, Dproject2) {
|
|||
camera.project2(point1, Dpose, Dpoint);
|
||||
Matrix numerical_pose = numericalDerivative21(project3, pose1, point1);
|
||||
Matrix numerical_point = numericalDerivative22(project3, pose1, point1);
|
||||
CHECK(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
CHECK(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
CHECK(assert_equal(numerical_pose, Dpose, TEST_THRESHOLD));
|
||||
CHECK(assert_equal(numerical_point, Dpoint, TEST_THRESHOLD));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -574,6 +574,11 @@ struct GTSAM_EXPORT TriangulationParameters {
|
|||
*/
|
||||
double dynamicOutlierRejectionThreshold;
|
||||
|
||||
/**
|
||||
* if true, will use the LOST algorithm instead of DLT
|
||||
*/
|
||||
bool useLOST;
|
||||
|
||||
SharedNoiseModel noiseModel; ///< used in the nonlinear triangulation
|
||||
|
||||
/**
|
||||
|
|
@ -588,10 +593,12 @@ struct GTSAM_EXPORT TriangulationParameters {
|
|||
TriangulationParameters(const double _rankTolerance = 1.0,
|
||||
const bool _enableEPI = false, double _landmarkDistanceThreshold = -1,
|
||||
double _dynamicOutlierRejectionThreshold = -1,
|
||||
const bool _useLOST = false,
|
||||
const SharedNoiseModel& _noiseModel = nullptr) :
|
||||
rankTolerance(_rankTolerance), enableEPI(_enableEPI), //
|
||||
landmarkDistanceThreshold(_landmarkDistanceThreshold), //
|
||||
dynamicOutlierRejectionThreshold(_dynamicOutlierRejectionThreshold),
|
||||
useLOST(_useLOST),
|
||||
noiseModel(_noiseModel){
|
||||
}
|
||||
|
||||
|
|
@ -604,6 +611,7 @@ struct GTSAM_EXPORT TriangulationParameters {
|
|||
<< std::endl;
|
||||
os << "dynamicOutlierRejectionThreshold = "
|
||||
<< p.dynamicOutlierRejectionThreshold << std::endl;
|
||||
os << "useLOST = " << p.useLOST << std::endl;
|
||||
os << "noise model" << std::endl;
|
||||
return os;
|
||||
}
|
||||
|
|
@ -665,8 +673,8 @@ class TriangulationResult : public std::optional<Point3> {
|
|||
return value();
|
||||
}
|
||||
// stream to output
|
||||
friend std::ostream& operator<<(std::ostream& os,
|
||||
const TriangulationResult& result) {
|
||||
GTSAM_EXPORT friend std::ostream& operator<<(
|
||||
std::ostream& os, const TriangulationResult& result) {
|
||||
if (result)
|
||||
os << "point = " << *result << std::endl;
|
||||
else
|
||||
|
|
@ -701,7 +709,7 @@ TriangulationResult triangulateSafe(const CameraSet<CAMERA>& cameras,
|
|||
try {
|
||||
Point3 point =
|
||||
triangulatePoint3<CAMERA>(cameras, measured, params.rankTolerance,
|
||||
params.enableEPI, params.noiseModel);
|
||||
params.enableEPI, params.noiseModel, params.useLOST);
|
||||
|
||||
// Check landmark distance and re-projection errors to avoid outliers
|
||||
size_t i = 0;
|
||||
|
|
|
|||
|
|
@ -123,6 +123,10 @@ class GTSAM_EXPORT HybridBayesTree : public BayesTree<HybridBayesTreeClique> {
|
|||
};
|
||||
|
||||
/// traits
|
||||
template <>
|
||||
struct traits<HybridBayesTreeClique> : public Testable<HybridBayesTreeClique> {
|
||||
};
|
||||
|
||||
template <>
|
||||
struct traits<HybridBayesTree> : public Testable<HybridBayesTree> {};
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,8 @@
|
|||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @brief
|
||||
* @brief Incremental Smoothing and Mapping (ISAM) algorithm
|
||||
* for hybrid factor graphs.
|
||||
*
|
||||
* @ingroup hybrid
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -56,8 +56,6 @@ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize(
|
|||
for (auto& f : factors_) {
|
||||
// First check if it is a valid factor
|
||||
if (!f) {
|
||||
// TODO(dellaert): why?
|
||||
linearFG->push_back(GaussianFactor::shared_ptr());
|
||||
continue;
|
||||
}
|
||||
// Check if it is a nonlinear mixture factor
|
||||
|
|
|
|||
|
|
@ -42,8 +42,8 @@
|
|||
#include <functional>
|
||||
#include <iostream>
|
||||
#include <iterator>
|
||||
#include <vector>
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
|
||||
#include "Switching.h"
|
||||
#include "TinyHybridExample.h"
|
||||
|
|
@ -613,11 +613,11 @@ TEST(HybridGaussianFactorGraph, assembleGraphTree) {
|
|||
// Create expected decision tree with two factor graphs:
|
||||
|
||||
// Get mixture factor:
|
||||
auto mixture = std::dynamic_pointer_cast<GaussianMixtureFactor>(fg.at(0));
|
||||
auto mixture = fg.at<GaussianMixtureFactor>(0);
|
||||
CHECK(mixture);
|
||||
|
||||
// Get prior factor:
|
||||
const auto gf = std::dynamic_pointer_cast<HybridConditional>(fg.at(1));
|
||||
const auto gf = fg.at<HybridConditional>(1);
|
||||
CHECK(gf);
|
||||
using GF = GaussianFactor::shared_ptr;
|
||||
const GF prior = gf->asGaussian();
|
||||
|
|
|
|||
|
|
@ -310,6 +310,21 @@ class FactorGraph {
|
|||
*/
|
||||
sharedFactor& at(size_t i) { return factors_.at(i); }
|
||||
|
||||
/** Get a specific factor by index and typecast to factor type F
|
||||
* (this checks array bounds and may throw
|
||||
* an exception, as opposed to operator[] which does not).
|
||||
*/
|
||||
template <typename F>
|
||||
std::shared_ptr<F> at(size_t i) {
|
||||
return std::dynamic_pointer_cast<F>(factors_.at(i));
|
||||
}
|
||||
|
||||
/// Const version of templated `at` method.
|
||||
template <typename F>
|
||||
const std::shared_ptr<F> at(size_t i) const {
|
||||
return std::dynamic_pointer_cast<F>(factors_.at(i));
|
||||
}
|
||||
|
||||
/** Get a specific factor by index (this does not check array bounds, as
|
||||
* opposed to at() which does).
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Key.h
|
||||
* @file Key.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @author Alex Cunningham
|
||||
|
|
@ -26,6 +26,9 @@ using namespace std;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/// Assign default key formatter
|
||||
KeyFormatter DefaultKeyFormatter = &_defaultKeyFormatter;
|
||||
|
||||
/* ************************************************************************* */
|
||||
string _defaultKeyFormatter(Key key) {
|
||||
const Symbol asSymbol(key);
|
||||
|
|
|
|||
|
|
@ -37,10 +37,16 @@ using KeyFormatter = std::function<std::string(Key)>;
|
|||
// Helper function for DefaultKeyFormatter
|
||||
GTSAM_EXPORT std::string _defaultKeyFormatter(Key key);
|
||||
|
||||
/// The default KeyFormatter, which is used if no KeyFormatter is passed to
|
||||
/// a nonlinear 'print' function. Automatically detects plain integer keys
|
||||
/// and Symbol keys.
|
||||
static const KeyFormatter DefaultKeyFormatter = &_defaultKeyFormatter;
|
||||
/**
|
||||
* The default KeyFormatter, which is used if no KeyFormatter is passed
|
||||
* to a 'print' function.
|
||||
*
|
||||
* Automatically detects plain integer keys and Symbol keys.
|
||||
*
|
||||
* Marked as `extern` so that it can be updated by external libraries.
|
||||
*
|
||||
*/
|
||||
extern GTSAM_EXPORT KeyFormatter DefaultKeyFormatter;
|
||||
|
||||
// Helper function for Multi-robot Key Formatter
|
||||
GTSAM_EXPORT std::string _multirobotKeyFormatter(gtsam::Key key);
|
||||
|
|
@ -124,7 +130,3 @@ struct traits<Key> {
|
|||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -219,6 +219,11 @@ TEST(Ordering, AppendVector) {
|
|||
|
||||
Ordering expected{X(0), X(1), X(2)};
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
|
||||
actual = Ordering();
|
||||
Ordering addl{X(0), X(1), X(2)};
|
||||
actual += addl;
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -72,8 +72,8 @@ public:
|
|||
GTSAM_EXPORT virtual void print(std::ostream &os) const;
|
||||
|
||||
/* for serialization */
|
||||
friend std::ostream& operator<<(std::ostream &os,
|
||||
const IterativeOptimizationParameters &p);
|
||||
GTSAM_EXPORT friend std::ostream &operator<<(
|
||||
std::ostream &os, const IterativeOptimizationParameters &p);
|
||||
|
||||
GTSAM_EXPORT static Verbosity verbosityTranslator(const std::string &s);
|
||||
GTSAM_EXPORT static std::string verbosityTranslator(Verbosity v);
|
||||
|
|
|
|||
|
|
@ -312,6 +312,12 @@ namespace gtsam {
|
|||
/** Get a view of the A matrix */
|
||||
ABlock getA() { return Ab_.range(0, size()); }
|
||||
|
||||
/**
|
||||
* Get a view of the A matrix for the variable
|
||||
* pointed to by the given key.
|
||||
*/
|
||||
ABlock getA(const Key& key) { return Ab_(find(key) - begin()); }
|
||||
|
||||
/** Update an information matrix by adding the information corresponding to this factor
|
||||
* (used internally during elimination).
|
||||
* @param scatter A mapping from variable index to slot index in this HessianFactor
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
#include <gtsam/linear/LossFunctions.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
|
@ -424,6 +425,132 @@ L2WithDeadZone::shared_ptr L2WithDeadZone::Create(double k, const ReweightScheme
|
|||
return shared_ptr(new L2WithDeadZone(k, reweight));
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
// AsymmetricTukey
|
||||
/* ************************************************************************* */
|
||||
|
||||
AsymmetricTukey::AsymmetricTukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {
|
||||
if (c <= 0) {
|
||||
throw runtime_error("mEstimator AsymmetricTukey takes only positive double in constructor.");
|
||||
}
|
||||
}
|
||||
|
||||
double AsymmetricTukey::weight(double distance) const {
|
||||
distance = -distance;
|
||||
if (distance >= 0.0) {
|
||||
return 1.0;
|
||||
} else if (distance > -c_) {
|
||||
const double one_minus_xc2 = 1.0 - distance * distance / csquared_;
|
||||
return one_minus_xc2 * one_minus_xc2;
|
||||
}
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
double AsymmetricTukey::loss(double distance) const {
|
||||
distance = -distance;
|
||||
if (distance >= 0.0) {
|
||||
return distance * distance / 2.0;
|
||||
} else if (distance >= -c_) {
|
||||
const double one_minus_xc2 = 1.0 - distance * distance / csquared_;
|
||||
const double t = one_minus_xc2 * one_minus_xc2 * one_minus_xc2;
|
||||
return csquared_ * (1 - t) / 6.0;
|
||||
}
|
||||
return csquared_ / 6.0;
|
||||
}
|
||||
|
||||
void AsymmetricTukey::print(const std::string &s="") const {
|
||||
std::cout << s << ": AsymmetricTukey (" << c_ << ")" << std::endl;
|
||||
}
|
||||
|
||||
bool AsymmetricTukey::equals(const Base &expected, double tol) const {
|
||||
const AsymmetricTukey* p = dynamic_cast<const AsymmetricTukey*>(&expected);
|
||||
if (p == nullptr) return false;
|
||||
return std::abs(c_ - p->c_) < tol;
|
||||
}
|
||||
|
||||
AsymmetricTukey::shared_ptr AsymmetricTukey::Create(double c, const ReweightScheme reweight) {
|
||||
return shared_ptr(new AsymmetricTukey(c, reweight));
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
// AsymmetricCauchy
|
||||
/* ************************************************************************* */
|
||||
|
||||
AsymmetricCauchy::AsymmetricCauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k), ksquared_(k * k) {
|
||||
if (k <= 0) {
|
||||
throw runtime_error("mEstimator AsymmetricCauchy takes only positive double in constructor.");
|
||||
}
|
||||
}
|
||||
|
||||
double AsymmetricCauchy::weight(double distance) const {
|
||||
distance = -distance;
|
||||
if (distance >= 0.0) {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
return ksquared_ / (ksquared_ + distance*distance);
|
||||
|
||||
}
|
||||
|
||||
double AsymmetricCauchy::loss(double distance) const {
|
||||
distance = -distance;
|
||||
if (distance >= 0.0) {
|
||||
return distance * distance / 2.0;
|
||||
}
|
||||
const double val = std::log1p(distance * distance / ksquared_);
|
||||
return ksquared_ * val * 0.5;
|
||||
}
|
||||
|
||||
void AsymmetricCauchy::print(const std::string &s="") const {
|
||||
std::cout << s << ": AsymmetricCauchy (" << k_ << ")" << std::endl;
|
||||
}
|
||||
|
||||
bool AsymmetricCauchy::equals(const Base &expected, double tol) const {
|
||||
const AsymmetricCauchy* p = dynamic_cast<const AsymmetricCauchy*>(&expected);
|
||||
if (p == nullptr) return false;
|
||||
return std::abs(k_ - p->k_) < tol;
|
||||
}
|
||||
|
||||
AsymmetricCauchy::shared_ptr AsymmetricCauchy::Create(double k, const ReweightScheme reweight) {
|
||||
return shared_ptr(new AsymmetricCauchy(k, reweight));
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Custom
|
||||
/* ************************************************************************* */
|
||||
|
||||
Custom::Custom(std::function<double(double)> weight, std::function<double(double)> loss, const ReweightScheme reweight,
|
||||
std::string name)
|
||||
: Base(reweight), weight_(std::move(weight)), loss_(loss), name_(std::move(name)) {}
|
||||
|
||||
double Custom::weight(double distance) const {
|
||||
return weight_(distance);
|
||||
}
|
||||
|
||||
double Custom::loss(double distance) const {
|
||||
return loss_(distance);
|
||||
}
|
||||
|
||||
void Custom::print(const std::string &s = "") const {
|
||||
std::cout << s << ": Custom (" << name_ << ")" << std::endl;
|
||||
}
|
||||
|
||||
bool Custom::equals(const Base &expected, double tol) const {
|
||||
const auto *p = dynamic_cast<const Custom *>(&expected);
|
||||
if (p == nullptr)
|
||||
return false;
|
||||
return name_ == p->name_ && weight_.target<double(double)>() == p->weight_.target<double(double)>() &&
|
||||
loss_.target<double(double)>() == p->loss_.target<double(double)>() && reweight_ == p->reweight_;
|
||||
}
|
||||
|
||||
Custom::shared_ptr Custom::Create(std::function<double(double)> weight, std::function<double(double)> loss,
|
||||
const ReweightScheme reweight, const std::string &name) {
|
||||
return std::make_shared<Custom>(std::move(weight), std::move(loss), reweight, name);
|
||||
}
|
||||
|
||||
} // namespace mEstimator
|
||||
} // namespace noiseModel
|
||||
} // gtsam
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
* @date Jan 13, 2010
|
||||
* @author Richard Roberts
|
||||
* @author Frank Dellaert
|
||||
* @author Fan Jiang
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
|
@ -470,6 +471,123 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
|
|||
#endif
|
||||
};
|
||||
|
||||
/** Implementation of the "AsymmetricTukey" robust error model.
|
||||
*
|
||||
* This model has a scalar parameter "c".
|
||||
*
|
||||
* - Following are all for one side, the other is standard L2
|
||||
* - Loss \rho(x) = c² (1 - (1-x²/c²)³)/6 if |x|<c, c²/6 otherwise
|
||||
* - Derivative \phi(x) = x(1-x²/c²)² if |x|<c, 0 otherwise
|
||||
* - Weight w(x) = \phi(x)/x = (1-x²/c²)² if |x|<c, 0 otherwise
|
||||
*/
|
||||
class GTSAM_EXPORT AsymmetricTukey : public Base {
|
||||
protected:
|
||||
double c_, csquared_;
|
||||
|
||||
public:
|
||||
typedef std::shared_ptr<AsymmetricTukey> shared_ptr;
|
||||
|
||||
AsymmetricTukey(double c = 4.6851, const ReweightScheme reweight = Block);
|
||||
double weight(double distance) const override;
|
||||
double loss(double distance) const override;
|
||||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return c_; }
|
||||
|
||||
private:
|
||||
#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
|
||||
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar &BOOST_SERIALIZATION_NVP(c_);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
/** Implementation of the "AsymmetricCauchy" robust error model.
|
||||
*
|
||||
* This model has a scalar parameter "k".
|
||||
*
|
||||
* - Following are all for one side, the other is standard L2
|
||||
* - Loss \rho(x) = 0.5 k² log(1+x²/k²)
|
||||
* - Derivative \phi(x) = (k²x)/(x²+k²)
|
||||
* - Weight w(x) = \phi(x)/x = k²/(x²+k²)
|
||||
*/
|
||||
class GTSAM_EXPORT AsymmetricCauchy : public Base {
|
||||
protected:
|
||||
double k_, ksquared_;
|
||||
|
||||
public:
|
||||
typedef std::shared_ptr<AsymmetricCauchy> shared_ptr;
|
||||
|
||||
AsymmetricCauchy(double k = 0.1, const ReweightScheme reweight = Block);
|
||||
double weight(double distance) const override;
|
||||
double loss(double distance) const override;
|
||||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
|
||||
double modelParameter() const { return k_; }
|
||||
|
||||
private:
|
||||
#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
|
||||
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar &BOOST_SERIALIZATION_NVP(k_);
|
||||
ar &BOOST_SERIALIZATION_NVP(ksquared_);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Type alias for the custom loss and weight functions
|
||||
using CustomLossFunction = std::function<double(double)>;
|
||||
using CustomWeightFunction = std::function<double(double)>;
|
||||
|
||||
/** Implementation of the "Custom" robust error model.
|
||||
*
|
||||
* This model just takes two functions as input, one for the loss and one for the weight.
|
||||
*/
|
||||
class GTSAM_EXPORT Custom : public Base {
|
||||
protected:
|
||||
std::function<double(double)> weight_, loss_;
|
||||
std::string name_;
|
||||
|
||||
public:
|
||||
typedef std::shared_ptr<Custom> shared_ptr;
|
||||
|
||||
Custom(CustomWeightFunction weight, CustomLossFunction loss,
|
||||
const ReweightScheme reweight = Block, std::string name = "Custom");
|
||||
double weight(double distance) const override;
|
||||
double loss(double distance) const override;
|
||||
void print(const std::string &s) const override;
|
||||
bool equals(const Base &expected, double tol = 1e-8) const override;
|
||||
static shared_ptr Create(std::function<double(double)> weight, std::function<double(double)> loss,
|
||||
const ReweightScheme reweight = Block, const std::string &name = "Custom");
|
||||
inline std::string& name() { return name_; }
|
||||
|
||||
inline std::function<double(double)>& weightFunction() { return weight_; }
|
||||
inline std::function<double(double)>& lossFunction() { return loss_; }
|
||||
|
||||
// Default constructor for serialization
|
||||
inline Custom() = default;
|
||||
|
||||
private:
|
||||
#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
|
||||
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar &BOOST_SERIALIZATION_NVP(name_);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace mEstimator
|
||||
} // namespace noiseModel
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ GaussianFactorGraph buildFactorSubgraph(const GaussianFactorGraph &gfg,
|
|||
|
||||
/** Split the graph into a subgraph and the remaining edges.
|
||||
* Note that the remaining factorgraph has null factors. */
|
||||
std::pair<GaussianFactorGraph, GaussianFactorGraph> splitFactorGraph(
|
||||
std::pair<GaussianFactorGraph, GaussianFactorGraph> GTSAM_EXPORT splitFactorGraph(
|
||||
const GaussianFactorGraph &factorGraph, const Subgraph &subgraph);
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -89,6 +89,7 @@ virtual class Unit : gtsam::noiseModel::Isotropic {
|
|||
|
||||
namespace mEstimator {
|
||||
virtual class Base {
|
||||
enum ReweightScheme { Scalar, Block };
|
||||
void print(string s = "") const;
|
||||
};
|
||||
|
||||
|
|
@ -191,6 +192,36 @@ virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
|
|||
double loss(double error) const;
|
||||
};
|
||||
|
||||
virtual class AsymmetricTukey: gtsam::noiseModel::mEstimator::Base {
|
||||
AsymmetricTukey(double k, gtsam::noiseModel::mEstimator::Base::ReweightScheme reweight);
|
||||
static gtsam::noiseModel::mEstimator::AsymmetricTukey* Create(double k);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
double weight(double error) const;
|
||||
double loss(double error) const;
|
||||
};
|
||||
|
||||
virtual class Custom: gtsam::noiseModel::mEstimator::Base {
|
||||
Custom(gtsam::noiseModel::mEstimator::CustomWeightFunction weight,
|
||||
gtsam::noiseModel::mEstimator::CustomLossFunction loss,
|
||||
gtsam::noiseModel::mEstimator::Base::ReweightScheme reweight,
|
||||
std::string name);
|
||||
static gtsam::noiseModel::mEstimator::Custom* Create(
|
||||
gtsam::noiseModel::mEstimator::CustomWeightFunction weight,
|
||||
gtsam::noiseModel::mEstimator::CustomLossFunction loss,
|
||||
gtsam::noiseModel::mEstimator::Base::ReweightScheme reweight,
|
||||
std::string name);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
double weight(double error) const;
|
||||
double loss(double error) const;
|
||||
};
|
||||
|
||||
|
||||
}///\namespace mEstimator
|
||||
|
||||
virtual class Robust : gtsam::noiseModel::Base {
|
||||
|
|
|
|||
|
|
@ -391,8 +391,7 @@ TEST(GaussianFactorGraph, clone) {
|
|||
EXPECT(assert_equal(init_graph, actCloned)); // Same as the original version
|
||||
|
||||
// Apply an in-place change to init_graph and compare
|
||||
JacobianFactor::shared_ptr jacFactor0 =
|
||||
std::dynamic_pointer_cast<JacobianFactor>(init_graph.at(0));
|
||||
JacobianFactor::shared_ptr jacFactor0 = init_graph.at<JacobianFactor>(0);
|
||||
CHECK(jacFactor0);
|
||||
jacFactor0->getA(jacFactor0->begin()) *= 7.;
|
||||
EXPECT(assert_inequal(init_graph, exp_graph));
|
||||
|
|
|
|||
|
|
@ -65,7 +65,10 @@ TEST(JacobianFactor, constructors_and_accessors)
|
|||
JacobianFactor actual(terms[0].first, terms[0].second, b, noise);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
LONGS_EQUAL((long)terms[0].first, (long)actual.keys().back());
|
||||
// Key iterator
|
||||
EXPECT(assert_equal(terms[0].second, actual.getA(actual.end() - 1)));
|
||||
// Key
|
||||
EXPECT(assert_equal(terms[0].second, actual.getA(terms[0].first)));
|
||||
EXPECT(assert_equal(b, expected.getb()));
|
||||
EXPECT(assert_equal(b, actual.getb()));
|
||||
EXPECT(noise == expected.get_model());
|
||||
|
|
@ -78,7 +81,10 @@ TEST(JacobianFactor, constructors_and_accessors)
|
|||
terms[1].first, terms[1].second, b, noise);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
LONGS_EQUAL((long)terms[1].first, (long)actual.keys().back());
|
||||
// Key iterator
|
||||
EXPECT(assert_equal(terms[1].second, actual.getA(actual.end() - 1)));
|
||||
// Key
|
||||
EXPECT(assert_equal(terms[1].second, actual.getA(terms[1].first)));
|
||||
EXPECT(assert_equal(b, expected.getb()));
|
||||
EXPECT(assert_equal(b, actual.getb()));
|
||||
EXPECT(noise == expected.get_model());
|
||||
|
|
@ -91,7 +97,10 @@ TEST(JacobianFactor, constructors_and_accessors)
|
|||
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
LONGS_EQUAL((long)terms[2].first, (long)actual.keys().back());
|
||||
// Key iterator
|
||||
EXPECT(assert_equal(terms[2].second, actual.getA(actual.end() - 1)));
|
||||
// Key
|
||||
EXPECT(assert_equal(terms[2].second, actual.getA(terms[2].first)));
|
||||
EXPECT(assert_equal(b, expected.getb()));
|
||||
EXPECT(assert_equal(b, actual.getb()));
|
||||
EXPECT(noise == expected.get_model());
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
* @date Jan 13, 2010
|
||||
* @author Richard Roberts
|
||||
* @author Frank Dellaert
|
||||
* @author Fan Jiang
|
||||
*/
|
||||
|
||||
|
||||
|
|
@ -504,6 +505,22 @@ TEST(NoiseModel, robustFunctionCauchy)
|
|||
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error4), 1e-8);
|
||||
}
|
||||
|
||||
TEST(NoiseModel, robustFunctionAsymmetricCauchy)
|
||||
{
|
||||
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
|
||||
const mEstimator::AsymmetricCauchy::shared_ptr cauchy = mEstimator::AsymmetricCauchy::Create(k);
|
||||
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error1), 1e-8);
|
||||
DOUBLES_EQUAL(0.2000, cauchy->weight(error2), 1e-8);
|
||||
// Test negative value to ensure we take absolute value of error.
|
||||
DOUBLES_EQUAL(1.0, cauchy->weight(error3), 1e-8);
|
||||
DOUBLES_EQUAL(1.0, cauchy->weight(error4), 1e-8);
|
||||
|
||||
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error1), 1e-8);
|
||||
DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error2), 1e-8);
|
||||
DOUBLES_EQUAL(50.0, cauchy->loss(error3), 1e-8);
|
||||
DOUBLES_EQUAL(0.5, cauchy->loss(error4), 1e-8);
|
||||
}
|
||||
|
||||
TEST(NoiseModel, robustFunctionGemanMcClure)
|
||||
{
|
||||
const double k = 1.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
|
||||
|
|
@ -551,6 +568,22 @@ TEST(NoiseModel, robustFunctionTukey)
|
|||
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error4), 1e-8);
|
||||
}
|
||||
|
||||
TEST(NoiseModel, robustFunctionAsymmetricTukey)
|
||||
{
|
||||
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
|
||||
const mEstimator::AsymmetricTukey::shared_ptr tukey = mEstimator::AsymmetricTukey::Create(k);
|
||||
DOUBLES_EQUAL(0.9216, tukey->weight(error1), 1e-8);
|
||||
DOUBLES_EQUAL(0.0, tukey->weight(error2), 1e-8);
|
||||
// Test negative value to ensure we take absolute value of error.
|
||||
DOUBLES_EQUAL(1.0, tukey->weight(error3), 1e-8);
|
||||
DOUBLES_EQUAL(1.0, tukey->weight(error4), 1e-8);
|
||||
|
||||
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error1), 1e-8);
|
||||
DOUBLES_EQUAL(4.166666666666667, tukey->loss(error2), 1e-8);
|
||||
DOUBLES_EQUAL(50.0, tukey->loss(error3), 1e-8);
|
||||
DOUBLES_EQUAL(0.5, tukey->loss(error4), 1e-8);
|
||||
}
|
||||
|
||||
TEST(NoiseModel, robustFunctionDCS)
|
||||
{
|
||||
const double k = 1.0, error1 = 1.0, error2 = 10.0;
|
||||
|
|
@ -670,6 +703,35 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone)
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(NoiseModel, robustNoiseCustomHuber) {
|
||||
const double k = 10.0, error1 = 1.0, error2 = 100.0;
|
||||
Matrix A = (Matrix(2, 2) << 1.0, 10.0, 100.0, 1000.0).finished();
|
||||
Vector b = Vector2(error1, error2);
|
||||
const Robust::shared_ptr robust =
|
||||
Robust::Create(mEstimator::Custom::Create(
|
||||
[k](double e) {
|
||||
const auto abs_e = std::abs(e);
|
||||
return abs_e <= k ? 1.0 : k / abs_e;
|
||||
},
|
||||
[k](double e) {
|
||||
const auto abs_e = std::abs(e);
|
||||
return abs_e <= k ? abs_e * abs_e / 2.0 : k * abs_e - k * k / 2.0;
|
||||
},
|
||||
noiseModel::mEstimator::Custom::Scalar, "Huber"),
|
||||
Unit::Create(2));
|
||||
|
||||
robust->WhitenSystem(A, b);
|
||||
|
||||
DOUBLES_EQUAL(error1, b(0), 1e-8);
|
||||
DOUBLES_EQUAL(sqrt(k * error2), b(1), 1e-8);
|
||||
|
||||
DOUBLES_EQUAL(1.0, A(0, 0), 1e-8);
|
||||
DOUBLES_EQUAL(10.0, A(0, 1), 1e-8);
|
||||
DOUBLES_EQUAL(sqrt(k * 100.0), A(1, 0), 1e-8);
|
||||
DOUBLES_EQUAL(sqrt(k / 100.0) * 1000.0, A(1, 1), 1e-8);
|
||||
}
|
||||
|
||||
TEST(NoiseModel, lossFunctionAtZero)
|
||||
{
|
||||
const double k = 5.0;
|
||||
|
|
@ -697,6 +759,12 @@ TEST(NoiseModel, lossFunctionAtZero)
|
|||
auto lsdz = mEstimator::L2WithDeadZone::Create(k);
|
||||
DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
|
||||
DOUBLES_EQUAL(lsdz->weight(0), 0, 1e-8);
|
||||
auto assy_cauchy = mEstimator::AsymmetricCauchy::Create(k);
|
||||
DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
|
||||
DOUBLES_EQUAL(lsdz->weight(0), 0, 1e-8);
|
||||
auto assy_tukey = mEstimator::AsymmetricTukey::Create(k);
|
||||
DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
|
||||
DOUBLES_EQUAL(lsdz->weight(0), 0, 1e-8);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -65,10 +65,9 @@ class GncOptimizer {
|
|||
nfg_.resize(graph.size());
|
||||
for (size_t i = 0; i < graph.size(); i++) {
|
||||
if (graph[i]) {
|
||||
NoiseModelFactor::shared_ptr factor = std::dynamic_pointer_cast<
|
||||
NoiseModelFactor>(graph[i]);
|
||||
auto robust = std::dynamic_pointer_cast<
|
||||
noiseModel::Robust>(factor->noiseModel());
|
||||
NoiseModelFactor::shared_ptr factor = graph.at<NoiseModelFactor>(i);
|
||||
auto robust =
|
||||
std::dynamic_pointer_cast<noiseModel::Robust>(factor->noiseModel());
|
||||
// if the factor has a robust loss, we remove the robust loss
|
||||
nfg_[i] = robust ? factor-> cloneWithNewNoiseModel(robust->noise()) : factor;
|
||||
}
|
||||
|
|
@ -401,10 +400,8 @@ class GncOptimizer {
|
|||
newGraph.resize(nfg_.size());
|
||||
for (size_t i = 0; i < nfg_.size(); i++) {
|
||||
if (nfg_[i]) {
|
||||
auto factor = std::dynamic_pointer_cast<
|
||||
NoiseModelFactor>(nfg_[i]);
|
||||
auto noiseModel =
|
||||
std::dynamic_pointer_cast<noiseModel::Gaussian>(
|
||||
auto factor = nfg_.at<NoiseModelFactor>(i);
|
||||
auto noiseModel = std::dynamic_pointer_cast<noiseModel::Gaussian>(
|
||||
factor->noiseModel());
|
||||
if (noiseModel) {
|
||||
Matrix newInfo = weights[i] * noiseModel->information();
|
||||
|
|
|
|||
|
|
@ -556,9 +556,12 @@ void ISAM2::marginalizeLeaves(
|
|||
// We do not need the marginal factors associated with this clique
|
||||
// because their information is already incorporated in the new
|
||||
// marginal factor. So, now associate this marginal factor with the
|
||||
// parent of this clique.
|
||||
// parent of this clique. If the clique is a root and has no parent, then
|
||||
// we can discard it without keeping track of the marginal factor.
|
||||
if (clique->parent()) {
|
||||
marginalFactors[clique->parent()->conditional()->front()].push_back(
|
||||
marginalFactor);
|
||||
}
|
||||
// Now remove this clique and its subtree - all of its marginal
|
||||
// information has been stored in marginalFactors.
|
||||
trackingRemoveSubtree(clique);
|
||||
|
|
@ -636,7 +639,7 @@ void ISAM2::marginalizeLeaves(
|
|||
|
||||
// Make the clique's matrix appear as a subset
|
||||
const DenseIndex dimToRemove = cg->matrixObject().offset(nToRemove);
|
||||
cg->matrixObject().firstBlock() = nToRemove;
|
||||
cg->matrixObject().firstBlock() += nToRemove;
|
||||
cg->matrixObject().rowStart() = dimToRemove;
|
||||
|
||||
// Change the keys in the clique
|
||||
|
|
@ -662,42 +665,55 @@ void ISAM2::marginalizeLeaves(
|
|||
// At this point we have updated the BayesTree, now update the remaining iSAM2
|
||||
// data structures
|
||||
|
||||
// Remove the factors to remove that will be summarized in marginal factors
|
||||
NonlinearFactorGraph removedFactors;
|
||||
for (const auto index : factorIndicesToRemove) {
|
||||
removedFactors.push_back(nonlinearFactors_[index]);
|
||||
nonlinearFactors_.remove(index);
|
||||
if (params_.cacheLinearizedFactors) {
|
||||
linearFactors_.remove(index);
|
||||
}
|
||||
}
|
||||
variableIndex_.remove(factorIndicesToRemove.begin(),
|
||||
factorIndicesToRemove.end(), removedFactors);
|
||||
|
||||
// Gather factors to add - the new marginal factors
|
||||
GaussianFactorGraph factorsToAdd;
|
||||
GaussianFactorGraph factorsToAdd{};
|
||||
NonlinearFactorGraph nonlinearFactorsToAdd{};
|
||||
for (const auto& key_factors : marginalFactors) {
|
||||
for (const auto& factor : key_factors.second) {
|
||||
if (factor) {
|
||||
factorsToAdd.push_back(factor);
|
||||
if (marginalFactorsIndices)
|
||||
marginalFactorsIndices->push_back(nonlinearFactors_.size());
|
||||
nonlinearFactors_.push_back(
|
||||
std::make_shared<LinearContainerFactor>(factor));
|
||||
if (params_.cacheLinearizedFactors) linearFactors_.push_back(factor);
|
||||
nonlinearFactorsToAdd.emplace_shared<LinearContainerFactor>(factor);
|
||||
for (Key factorKey : *factor) {
|
||||
fixedVariables_.insert(factorKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
variableIndex_.augment(factorsToAdd); // Augment the variable index
|
||||
|
||||
// Remove the factors to remove that have been summarized in the newly-added
|
||||
// marginal factors
|
||||
NonlinearFactorGraph removedFactors;
|
||||
for (const auto index : factorIndicesToRemove) {
|
||||
removedFactors.push_back(nonlinearFactors_[index]);
|
||||
nonlinearFactors_.remove(index);
|
||||
if (params_.cacheLinearizedFactors) linearFactors_.remove(index);
|
||||
// Add the nonlinear factors and keep track of the new factor indices
|
||||
auto newFactorIndices = nonlinearFactors_.add_factors(nonlinearFactorsToAdd,
|
||||
params_.findUnusedFactorSlots);
|
||||
// Add cached linear factors.
|
||||
if (params_.cacheLinearizedFactors){
|
||||
linearFactors_.resize(nonlinearFactors_.size());
|
||||
for (std::size_t i = 0; i < nonlinearFactorsToAdd.size(); ++i){
|
||||
linearFactors_[newFactorIndices[i]] = factorsToAdd[i];
|
||||
}
|
||||
variableIndex_.remove(factorIndicesToRemove.begin(),
|
||||
factorIndicesToRemove.end(), removedFactors);
|
||||
|
||||
if (deletedFactorsIndices)
|
||||
deletedFactorsIndices->assign(factorIndicesToRemove.begin(),
|
||||
factorIndicesToRemove.end());
|
||||
}
|
||||
// Augment the variable index
|
||||
variableIndex_.augment(factorsToAdd, newFactorIndices);
|
||||
|
||||
// Remove the marginalized variables
|
||||
removeVariables(KeySet(leafKeys.begin(), leafKeys.end()));
|
||||
|
||||
if (deletedFactorsIndices) {
|
||||
deletedFactorsIndices->assign(factorIndicesToRemove.begin(),
|
||||
factorIndicesToRemove.end());
|
||||
}
|
||||
if (marginalFactorsIndices){
|
||||
*marginalFactorsIndices = std::move(newFactorIndices);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -345,7 +345,7 @@ class GTSAM_EXPORT ISAM2 : public BayesTree<ISAM2Clique> {
|
|||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
|
||||
ar & boost::serialization::base_object<BayesTree<ISAM2Clique> >(*this);
|
||||
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar & BOOST_SERIALIZATION_NVP(theta_);
|
||||
ar & BOOST_SERIALIZATION_NVP(variableIndex_);
|
||||
ar & BOOST_SERIALIZATION_NVP(delta_);
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ namespace gtsam {
|
|||
* Had to use the static_cast of a nullptr, because the compiler is not able to
|
||||
* deduce the type of the nullptr when expanding the evaluateError templates.
|
||||
*/
|
||||
#define OptionalNone static_cast<Matrix*>(nullptr)
|
||||
#define OptionalNone static_cast<gtsam::Matrix*>(nullptr)
|
||||
|
||||
/** This typedef will be used everywhere boost::optional<Matrix&> reference was used
|
||||
* previously. This is used to indicate that the Jacobian is optional. In the future
|
||||
|
|
|
|||
|
|
@ -86,7 +86,10 @@ class NonlinearFactorGraph {
|
|||
const gtsam::noiseModel::Base* noiseModel);
|
||||
|
||||
// NonlinearFactorGraph
|
||||
void printErrors(const gtsam::Values& values) const;
|
||||
void printErrors(const gtsam::Values& values,
|
||||
const string& str = "NonlinearFactorGraph: ",
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
double error(const gtsam::Values& values) const;
|
||||
double probPrime(const gtsam::Values& values) const;
|
||||
gtsam::Ordering orderingCOLAMD() const;
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ TEST(Serialization, ISAM2) {
|
|||
|
||||
std::string binaryPath = "saved_solver.dat";
|
||||
try {
|
||||
std::ofstream outputStream(binaryPath);
|
||||
std::ofstream outputStream(binaryPath, std::ios::out | std::ios::binary);
|
||||
boost::archive::binary_oarchive outputArchive(outputStream);
|
||||
outputArchive << solver;
|
||||
} catch(...) {
|
||||
|
|
@ -192,7 +192,7 @@ TEST(Serialization, ISAM2) {
|
|||
|
||||
gtsam::ISAM2 solverFromDisk;
|
||||
try {
|
||||
std::ifstream ifs(binaryPath);
|
||||
std::ifstream ifs(binaryPath, std::ios::in | std::ios::binary);
|
||||
boost::archive::binary_iarchive inputArchive(ifs);
|
||||
inputArchive >> solverFromDisk;
|
||||
} catch(...) {
|
||||
|
|
|
|||
|
|
@ -581,7 +581,7 @@ TEST(Values, std_move) {
|
|||
TEST(Values, VectorDynamicInsertFixedRead) {
|
||||
Values values;
|
||||
Vector v(3); v << 5.0, 6.0, 7.0;
|
||||
values.insert(key1, v);
|
||||
values.insert<Vector3>(key1, v);
|
||||
Vector3 expected(5.0, 6.0, 7.0);
|
||||
Vector3 actual = values.at<Vector3>(key1);
|
||||
CHECK(assert_equal(expected, actual));
|
||||
|
|
@ -629,7 +629,7 @@ TEST(Values, VectorFixedInsertFixedRead) {
|
|||
TEST(Values, MatrixDynamicInsertFixedRead) {
|
||||
Values values;
|
||||
Matrix v(1,3); v << 5.0, 6.0, 7.0;
|
||||
values.insert(key1, v);
|
||||
values.insert<Matrix13>(key1, v);
|
||||
Vector3 expected(5.0, 6.0, 7.0);
|
||||
CHECK(assert_equal((Vector)expected, values.at<Matrix13>(key1)));
|
||||
CHECK_EXCEPTION(values.at<Matrix23>(key1), exception);
|
||||
|
|
@ -639,7 +639,15 @@ TEST(Values, Demangle) {
|
|||
Values values;
|
||||
Matrix13 v; v << 5.0, 6.0, 7.0;
|
||||
values.insert(key1, v);
|
||||
string expected = "Values with 1 values:\nValue v1: (Eigen::Matrix<double, 1, 3, 1, 1, 3>)\n[\n 5, 6, 7\n]\n\n";
|
||||
#ifdef __GNUG__
|
||||
string expected =
|
||||
"Values with 1 values:\nValue v1: (Eigen::Matrix<double, 1, 3, 1, 1, "
|
||||
"3>)\n[\n 5, 6, 7\n]\n\n";
|
||||
#elif _WIN32
|
||||
string expected =
|
||||
"Values with 1 values:\nValue v1: "
|
||||
"(class Eigen::Matrix<double,1,3,1,1,3>)\n[\n 5, 6, 7\n]\n\n";
|
||||
#endif
|
||||
|
||||
EXPECT(assert_print_equal(expected, values));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,10 +59,12 @@ class Values {
|
|||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// New in 4.0, we have to specialize every insert/update/at to generate
|
||||
// wrappers Instead of the old: void insert(size_t j, const gtsam::Value&
|
||||
// value); void update(size_t j, const gtsam::Value& val); gtsam::Value
|
||||
// at(size_t j) const;
|
||||
// New in 4.0, we have to specialize every insert/update/at
|
||||
// to generate wrappers.
|
||||
// Instead of the old:
|
||||
// void insert(size_t j, const gtsam::Value& value);
|
||||
// void update(size_t j, const gtsam::Value& val);
|
||||
// gtsam::Value at(size_t j) const;
|
||||
|
||||
// The order is important: Vector has to precede Point2/Point3 so `atVector`
|
||||
// can work for those fixed-size vectors.
|
||||
|
|
@ -99,6 +101,10 @@ class Values {
|
|||
template <T = {gtsam::Point2, gtsam::Point3}>
|
||||
void insert(size_t j, const T& val);
|
||||
|
||||
// The order is important: Vector has to precede Point2/Point3 so `atVector`
|
||||
// can work for those fixed-size vectors.
|
||||
void update(size_t j, Vector vector);
|
||||
void update(size_t j, Matrix matrix);
|
||||
void update(size_t j, const gtsam::Point2& point2);
|
||||
void update(size_t j, const gtsam::Point3& point3);
|
||||
void update(size_t j, const gtsam::Rot2& rot2);
|
||||
|
|
@ -125,10 +131,12 @@ class Values {
|
|||
void update(size_t j, const gtsam::PinholePose<gtsam::Cal3Unified>& camera);
|
||||
void update(size_t j, const gtsam::imuBias::ConstantBias& constant_bias);
|
||||
void update(size_t j, const gtsam::NavState& nav_state);
|
||||
void update(size_t j, Vector vector);
|
||||
void update(size_t j, Matrix matrix);
|
||||
void update(size_t j, double c);
|
||||
|
||||
// The order is important: Vector has to precede Point2/Point3 so `atVector`
|
||||
// can work for those fixed-size vectors.
|
||||
void insert_or_assign(size_t j, Vector vector);
|
||||
void insert_or_assign(size_t j, Matrix matrix);
|
||||
void insert_or_assign(size_t j, const gtsam::Point2& point2);
|
||||
void insert_or_assign(size_t j, const gtsam::Point3& point3);
|
||||
void insert_or_assign(size_t j, const gtsam::Rot2& rot2);
|
||||
|
|
@ -165,8 +173,6 @@ class Values {
|
|||
void insert_or_assign(size_t j,
|
||||
const gtsam::imuBias::ConstantBias& constant_bias);
|
||||
void insert_or_assign(size_t j, const gtsam::NavState& nav_state);
|
||||
void insert_or_assign(size_t j, Vector vector);
|
||||
void insert_or_assign(size_t j, Matrix matrix);
|
||||
void insert_or_assign(size_t j, double c);
|
||||
|
||||
template <T = {gtsam::Point2,
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ std::vector<SfmTrack2d> tracksFromPairwiseMatches(
|
|||
}
|
||||
|
||||
// TODO(johnwlambert): return the Transitivity failure percentage here.
|
||||
return tracks2d;
|
||||
return validTracks;
|
||||
}
|
||||
|
||||
} // namespace gtsfm
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ namespace gtsam {
|
|||
// where s is an arbitrary scale that can be supplied, default 1.0. Hence, two
|
||||
// versions are supplied below corresponding to whether we have initial values
|
||||
// or not.
|
||||
class TranslationRecovery {
|
||||
class GTSAM_EXPORT TranslationRecovery {
|
||||
public:
|
||||
using KeyPair = std::pair<Key, Key>;
|
||||
using TranslationEdges = std::vector<BinaryMeasurement<Unit3>>;
|
||||
|
|
|
|||
|
|
@ -373,8 +373,10 @@ TEST(ShonanAveraging2, noisyToyGraphWithHuber) {
|
|||
// test that each factor is actually robust
|
||||
for (size_t i=0; i<=4; i++) { // note: last is the Gauge factor and is not robust
|
||||
const auto &robust = std::dynamic_pointer_cast<noiseModel::Robust>(
|
||||
std::dynamic_pointer_cast<NoiseModelFactor>(graph[i])->noiseModel());
|
||||
EXPECT(robust); // we expect the factors to be use a robust noise model (in particular, Huber)
|
||||
graph.at<NoiseModelFactor>(i)->noiseModel());
|
||||
// we expect the factors to be use a robust noise model
|
||||
// (in particular, Huber)
|
||||
EXPECT(robust);
|
||||
}
|
||||
|
||||
// test result
|
||||
|
|
|
|||
|
|
@ -162,8 +162,9 @@ protected:
|
|||
/// Collect all cameras: important that in key order.
|
||||
virtual Cameras cameras(const Values& values) const {
|
||||
Cameras cameras;
|
||||
for(const Key& k: this->keys_)
|
||||
for(const Key& k: this->keys_) {
|
||||
cameras.push_back(values.at<CAMERA>(k));
|
||||
}
|
||||
return cameras;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -155,10 +155,10 @@ Point2_ project2(const Expression<CAMERA>& camera_, const Expression<POINT>& p_)
|
|||
namespace internal {
|
||||
// Helper template for project3 expression below
|
||||
template <class CALIBRATION, class POINT>
|
||||
inline Point2 project6(const Pose3& x, const Point3& p, const Cal3_S2& K,
|
||||
inline Point2 project6(const Pose3& x, const POINT& p, const CALIBRATION& K,
|
||||
OptionalJacobian<2, 6> Dpose, OptionalJacobian<2, 3> Dpoint,
|
||||
OptionalJacobian<2, 5> Dcal) {
|
||||
return PinholeCamera<Cal3_S2>(x, K).project(p, Dpose, Dpoint, Dcal);
|
||||
OptionalJacobian<2, CALIBRATION::dimension> Dcal) {
|
||||
return PinholeCamera<CALIBRATION>(x, K).project(p, Dpose, Dpoint, Dcal);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -156,6 +156,9 @@ virtual class SmartProjectionPoseFactor : gtsam::NonlinearFactor {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
gtsam::TriangulationResult point() const;
|
||||
gtsam::TriangulationResult point(const gtsam::Values& values) const;
|
||||
};
|
||||
|
||||
typedef gtsam::SmartProjectionPoseFactor<gtsam::Cal3_S2>
|
||||
|
|
|
|||
|
|
@ -97,8 +97,7 @@ TEST(dataSet, load2D) {
|
|||
auto model = noiseModel::Unit::Create(3);
|
||||
BetweenFactor<Pose2> expected(1, 0, Pose2(-0.99879, 0.0417574, -0.00818381),
|
||||
model);
|
||||
BetweenFactor<Pose2>::shared_ptr actual =
|
||||
std::dynamic_pointer_cast<BetweenFactor<Pose2>>(graph->at(0));
|
||||
BetweenFactor<Pose2>::shared_ptr actual = graph->at<BetweenFactor<Pose2>>(0);
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
|
||||
// Check binary measurements, Pose2
|
||||
|
|
@ -113,9 +112,8 @@ TEST(dataSet, load2D) {
|
|||
// // Check factor parsing
|
||||
const auto actualFactors = parseFactors<Pose2>(filename);
|
||||
for (size_t i : {0, 1, 2, 3, 4, 5}) {
|
||||
EXPECT(assert_equal(
|
||||
*std::dynamic_pointer_cast<BetweenFactor<Pose2>>(graph->at(i)),
|
||||
*actualFactors[i], 1e-5));
|
||||
EXPECT(assert_equal(*graph->at<BetweenFactor<Pose2>>(i), *actualFactors[i],
|
||||
1e-5));
|
||||
}
|
||||
|
||||
// Check pose parsing
|
||||
|
|
@ -194,8 +192,7 @@ TEST(dataSet, readG2o3D) {
|
|||
// Check factor parsing
|
||||
const auto actualFactors = parseFactors<Pose3>(g2oFile);
|
||||
for (size_t i : {0, 1, 2, 3, 4, 5}) {
|
||||
EXPECT(assert_equal(
|
||||
*std::dynamic_pointer_cast<BetweenFactor<Pose3>>(expectedGraph[i]),
|
||||
EXPECT(assert_equal(*expectedGraph.at<BetweenFactor<Pose3>>(i),
|
||||
*actualFactors[i], 1e-5));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ if(GTSAM_SUPPORT_NESTED_DISSECTION) # Only build partition if metis is built
|
|||
endif(GTSAM_SUPPORT_NESTED_DISSECTION)
|
||||
|
||||
# Versions - same as core gtsam library
|
||||
set(gtsam_unstable_version ${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH})
|
||||
set(gtsam_unstable_version ${GTSAM_VERSION_STRING})
|
||||
set(gtsam_unstable_soversion ${GTSAM_VERSION_MAJOR})
|
||||
message(STATUS "GTSAM_UNSTABLE Version: ${gtsam_unstable_version}")
|
||||
message(STATUS "Install prefix: ${CMAKE_INSTALL_PREFIX}")
|
||||
|
|
|
|||
|
|
@ -12,4 +12,4 @@ endif()
|
|||
|
||||
|
||||
|
||||
gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable")
|
||||
gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable" ${GTSAM_BUILD_EXAMPLES_ALWAYS})
|
||||
|
|
|
|||
|
|
@ -183,13 +183,10 @@ class LoopyBelief {
|
|||
// accumulate unary factors
|
||||
if (graph.at(factorIndex)->size() == 1) {
|
||||
if (!prodOfUnaries)
|
||||
prodOfUnaries = std::dynamic_pointer_cast<DecisionTreeFactor>(
|
||||
graph.at(factorIndex));
|
||||
prodOfUnaries = graph.at<DecisionTreeFactor>(factorIndex);
|
||||
else
|
||||
prodOfUnaries = std::make_shared<DecisionTreeFactor>(
|
||||
*prodOfUnaries *
|
||||
(*std::dynamic_pointer_cast<DecisionTreeFactor>(
|
||||
graph.at(factorIndex))));
|
||||
*prodOfUnaries * (*graph.at<DecisionTreeFactor>(factorIndex)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -9,4 +9,4 @@ if (NOT GTSAM_USE_BOOST_FEATURES)
|
|||
endif()
|
||||
|
||||
|
||||
gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable")
|
||||
gtsamAddExamplesGlob("*.cpp" "${excluded_examples}" "gtsam_unstable" ${GTSAM_BUILD_EXAMPLES_ALWAYS})
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ namespace gtsam {
|
|||
* SLAM, where we have "time of arrival" measurements at a set of sensors. The
|
||||
* TOA functor below provides a measurement function for those applications.
|
||||
*/
|
||||
class Event {
|
||||
class GTSAM_UNSTABLE_EXPORT Event {
|
||||
double time_; ///< Time event was generated
|
||||
Point3 location_; ///< Location at time event was generated
|
||||
|
||||
|
|
@ -62,10 +62,10 @@ class Event {
|
|||
}
|
||||
|
||||
/** print with optional string */
|
||||
GTSAM_UNSTABLE_EXPORT void print(const std::string& s = "") const;
|
||||
void print(const std::string& s = "") const;
|
||||
|
||||
/** equals with an tolerance */
|
||||
GTSAM_UNSTABLE_EXPORT bool equals(const Event& other,
|
||||
bool equals(const Event& other,
|
||||
double tol = 1e-9) const;
|
||||
|
||||
/// Updates a with tangent space delta
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ namespace gtsam {
|
|||
using namespace std;
|
||||
|
||||
/// Mapping between variable's key and its corresponding dimensionality
|
||||
using KeyDimMap = std::map<Key, size_t>;
|
||||
using KeyDimMap = std::map<Key, uint32_t>;
|
||||
/*
|
||||
* Iterates through every factor in a linear graph and generates a
|
||||
* mapping between every factor key and it's corresponding dimensionality.
|
||||
|
|
|
|||
|
|
@ -65,9 +65,7 @@ GaussianFactorGraph::shared_ptr LPInitSolver::buildInitOfInitGraph() const {
|
|||
new GaussianFactorGraph(lp_.equalities));
|
||||
|
||||
// create factor ||x||^2 and add to the graph
|
||||
const KeyDimMap& constrainedKeyDim = lp_.constrainedKeyDimMap();
|
||||
for (const auto& [key, _] : constrainedKeyDim) {
|
||||
size_t dim = constrainedKeyDim.at(key);
|
||||
for (const auto& [key, dim] : lp_.constrainedKeyDimMap()) {
|
||||
initGraph->push_back(
|
||||
JacobianFactor(key, Matrix::Identity(dim, dim), Vector::Zero(dim)));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ TEST(NonlinearClusterTree, Clusters) {
|
|||
Ordering ordering;
|
||||
ordering.push_back(x1);
|
||||
const auto [bn, fg] = gfg->eliminatePartialSequential(ordering);
|
||||
auto expectedFactor = std::dynamic_pointer_cast<HessianFactor>(fg->at(0));
|
||||
auto expectedFactor = fg->at<HessianFactor>(0);
|
||||
if (!expectedFactor)
|
||||
throw std::runtime_error("Expected HessianFactor");
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ namespace gtsam { namespace partition {
|
|||
* whether node j is in the left part of the graph, the right part, or the
|
||||
* separator, respectively
|
||||
*/
|
||||
std::pair<int, sharedInts> separatorMetis(idx_t n, const sharedInts& xadj,
|
||||
std::pair<idx_t, sharedInts> separatorMetis(idx_t n, const sharedInts& xadj,
|
||||
const sharedInts& adjncy, const sharedInts& adjwgt, bool verbose) {
|
||||
|
||||
// control parameters
|
||||
|
|
@ -277,7 +277,7 @@ namespace gtsam { namespace partition {
|
|||
//throw runtime_error("separatorPartitionByMetis:stop for debug");
|
||||
}
|
||||
|
||||
if(result.C.size() != sepsize) {
|
||||
if(result.C.size() != size_t(sepsize)) {
|
||||
std::cout << "total key: " << keys.size()
|
||||
<< " result(A,B,C) = " << result.A.size() << ", " << result.B.size() << ", " << result.C.size()
|
||||
<< "; sepsize from Metis = " << sepsize << std::endl;
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <gtsam_unstable/dllexport.h>
|
||||
|
||||
#include "PartitionWorkSpace.h"
|
||||
|
||||
|
|
@ -49,7 +50,7 @@ namespace gtsam { namespace partition {
|
|||
typedef std::vector<sharedGenericFactor2D> GenericGraph2D;
|
||||
|
||||
/** merge nodes in DSF using constraints captured by the given graph */
|
||||
std::list<std::vector<size_t> > findIslands(const GenericGraph2D& graph, const std::vector<size_t>& keys, WorkSpace& workspace,
|
||||
std::list<std::vector<size_t> > GTSAM_UNSTABLE_EXPORT findIslands(const GenericGraph2D& graph, const std::vector<size_t>& keys, WorkSpace& workspace,
|
||||
const int minNrConstraintsPerCamera, const int minNrConstraintsPerLandmark);
|
||||
|
||||
/** eliminate the sensors from generic graph */
|
||||
|
|
@ -97,11 +98,11 @@ namespace gtsam { namespace partition {
|
|||
typedef std::vector<sharedGenericFactor3D> GenericGraph3D;
|
||||
|
||||
/** merge nodes in DSF using constraints captured by the given graph */
|
||||
std::list<std::vector<size_t> > findIslands(const GenericGraph3D& graph, const std::vector<size_t>& keys, WorkSpace& workspace,
|
||||
std::list<std::vector<size_t> > GTSAM_UNSTABLE_EXPORT findIslands(const GenericGraph3D& graph, const std::vector<size_t>& keys, WorkSpace& workspace,
|
||||
const size_t minNrConstraintsPerCamera, const size_t minNrConstraintsPerLandmark);
|
||||
|
||||
/** eliminate the sensors from generic graph */
|
||||
void reduceGenericGraph(const GenericGraph3D& graph, const std::vector<size_t>& cameraKeys, const std::vector<size_t>& landmarkKeys,
|
||||
void GTSAM_UNSTABLE_EXPORT reduceGenericGraph(const GenericGraph3D& graph, const std::vector<size_t>& cameraKeys, const std::vector<size_t>& landmarkKeys,
|
||||
const std::vector<int>& dictionary, GenericGraph3D& reducedGraph);
|
||||
|
||||
/** check whether the 3D graph is singular (under constrained) */
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
set(ignore_test "testNestedDissection.cpp")
|
||||
|
||||
if (NOT GTSAM_USE_BOOST_FEATURES)
|
||||
if (NOT GTSAM_USE_BOOST_FEATURES OR MSVC)
|
||||
list(APPEND ignore_test "testFindSeparator.cpp")
|
||||
endif()
|
||||
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ namespace gtsam {
|
|||
* @ingroup slam
|
||||
*/
|
||||
template <class POSE, class LANDMARK, class CALIBRATION = Cal3_S2>
|
||||
class GTSAM_UNSTABLE_EXPORT ProjectionFactorPPPC
|
||||
class ProjectionFactorPPPC
|
||||
: public NoiseModelFactorN<POSE, POSE, LANDMARK, CALIBRATION> {
|
||||
protected:
|
||||
Point2 measured_; ///< 2D measurement
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ namespace gtsam {
|
|||
* @ingroup slam
|
||||
*/
|
||||
template <class CAMERA>
|
||||
class GTSAM_UNSTABLE_EXPORT SmartProjectionPoseFactorRollingShutter
|
||||
class SmartProjectionPoseFactorRollingShutter
|
||||
: public SmartProjectionFactor<CAMERA> {
|
||||
private:
|
||||
typedef SmartProjectionFactor<CAMERA> Base;
|
||||
|
|
|
|||
|
|
@ -202,9 +202,8 @@ class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionFactorPP
|
|||
|
||||
/// linearize and return a Hessianfactor that is an approximation of error(p)
|
||||
std::shared_ptr<RegularHessianFactor<DimPose>> createHessianFactor(
|
||||
const Values& values, const double lambda = 0.0, bool diagonalDamping =
|
||||
false) const {
|
||||
|
||||
const Values& values, const double lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
// we may have multiple cameras sharing the same extrinsic cals, hence the number
|
||||
// of keys may be smaller than 2 * nrMeasurements (which is the upper bound where we
|
||||
// have a body key and an extrinsic calibration key for each measurement)
|
||||
|
|
@ -222,13 +221,12 @@ class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionFactorPP
|
|||
// triangulate 3D point at given linearization point
|
||||
triangulateSafe(cameras(values));
|
||||
|
||||
if (!result_) { // failed: return "empty/zero" Hessian
|
||||
for (Matrix& m : Gs)
|
||||
m = Matrix::Zero(DimPose, DimPose);
|
||||
for (Vector& v : gs)
|
||||
v = Vector::Zero(DimPose);
|
||||
return std::make_shared < RegularHessianFactor<DimPose>
|
||||
> (keys_, Gs, gs, 0.0);
|
||||
// failed: return "empty/zero" Hessian
|
||||
if (!result_) {
|
||||
for (Matrix& m : Gs) m = Matrix::Zero(DimPose, DimPose);
|
||||
for (Vector& v : gs) v = Vector::Zero(DimPose);
|
||||
return std::make_shared<RegularHessianFactor<DimPose>>(keys_, Gs, gs,
|
||||
0.0);
|
||||
}
|
||||
|
||||
// compute Jacobian given triangulated 3D Point
|
||||
|
|
@ -239,8 +237,9 @@ class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionFactorPP
|
|||
|
||||
// Whiten using noise model
|
||||
noiseModel_->WhitenSystem(E, b);
|
||||
for (size_t i = 0; i < Fs.size(); i++)
|
||||
for (size_t i = 0; i < Fs.size(); i++) {
|
||||
Fs[i] = noiseModel_->Whiten(Fs[i]);
|
||||
}
|
||||
|
||||
// build augmented Hessian (with last row/column being the information vector)
|
||||
Matrix3 P;
|
||||
|
|
@ -254,11 +253,12 @@ class GTSAM_UNSTABLE_EXPORT SmartStereoProjectionFactorPP
|
|||
}
|
||||
// but we need to get the augumented hessian wrt the unique keys in key_
|
||||
SymmetricBlockMatrix augmentedHessianUniqueKeys =
|
||||
Cameras::SchurComplementAndRearrangeBlocks<3,DimBlock,DimPose>(Fs,E,P,b,
|
||||
nonuniqueKeys, keys_);
|
||||
Base::Cameras::template SchurComplementAndRearrangeBlocks<3, DimBlock,
|
||||
DimPose>(
|
||||
Fs, E, P, b, nonuniqueKeys, keys_);
|
||||
|
||||
return std::make_shared < RegularHessianFactor<DimPose>
|
||||
> (keys_, augmentedHessianUniqueKeys);
|
||||
return std::make_shared<RegularHessianFactor<DimPose>>(
|
||||
keys_, augmentedHessianUniqueKeys);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1441,14 +1441,14 @@ TEST( SmartStereoProjectionPoseFactor, HessianWithRotationNonDegenerate ) {
|
|||
std::shared_ptr<GaussianFactor> hessianFactorRotTran =
|
||||
smartFactor->linearize(tranValues);
|
||||
|
||||
// Hessian is invariant to rotations and translations in the degenerate case
|
||||
EXPECT(
|
||||
assert_equal(hessianFactor->information(),
|
||||
double error;
|
||||
#ifdef GTSAM_USE_EIGEN_MKL
|
||||
hessianFactorRotTran->information(), 1e-5));
|
||||
error = 1e-5;
|
||||
#else
|
||||
hessianFactorRotTran->information(), 1e-6));
|
||||
error = 1e-6;
|
||||
#endif
|
||||
// Hessian is invariant to rotations and translations in the degenerate case
|
||||
EXPECT(assert_equal(hessianFactor->information(), hessianFactorRotTran->information(), error));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
gtsamAddTimingGlob("*.cpp" "" "gtsam_unstable")
|
||||
gtsamAddTimingGlob("*.cpp" "" "gtsam_unstable" ${GTSAM_BUILD_TIMING_ALWAYS})
|
||||
|
|
|
|||
|
|
@ -64,6 +64,10 @@ set(ignore
|
|||
gtsam::Point3
|
||||
gtsam::CustomFactor)
|
||||
|
||||
if (APPLE OR WIN32)
|
||||
list(APPEND ignore gtsam::Symbol)
|
||||
endif()
|
||||
|
||||
set(interface_files
|
||||
${GTSAM_SOURCE_DIR}/gtsam/gtsam.i
|
||||
${GTSAM_SOURCE_DIR}/gtsam/base/base.i
|
||||
|
|
|
|||
|
|
@ -10,6 +10,14 @@ The interface generated by the wrap tool also redirects the standard output stre
|
|||
|
||||
For instructions on updating the version of the [wrap library](https://github.com/borglab/wrap) included in GTSAM to the latest version, please refer to the [wrap README](https://github.com/borglab/wrap/blob/master/README.md#git-subtree-and-contributing)
|
||||
|
||||
### Filename Case Sensitivity
|
||||
|
||||
Due to the file name syntax requirements of Matlab, having the files `Symbol.m` (for the class) and `symbol.m` (for the function) together on an OS with a case-insensitive filesystem is impossible.
|
||||
|
||||
To put it simply, for an OS like Windows or MacOS where the filesystem is case-insensitive, we cannot have two files `symbol.m` and `Symbol.m` in the same folder. When trying to write one file after another, they will end up overwriting each other. We cannot specify 2 different filenames, since Matlab's syntax prevents that.
|
||||
|
||||
For this reason, on Windows and MacOS, we ignore the `Symbol` class and request users to use the `symbol` function exclusively for key creation.
|
||||
|
||||
## Ubuntu
|
||||
|
||||
If you have a newer Ubuntu system (later than 10.04), you must make a small modification to your MATLAB installation, due to MATLAB being distributed with an old version of the C++ standard library. Delete or rename all files starting with `libstdc++` in your MATLAB installation directory, in paths:
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
set(PROJECT_PYTHON_SOURCE_DIR ${PROJECT_SOURCE_DIR}/python)
|
||||
set(GTSAM_PYTHON_BUILD_DIRECTORY ${PROJECT_BINARY_DIR}/python)
|
||||
|
||||
if (NOT GTSAM_BUILD_PYTHON)
|
||||
|
|
@ -6,11 +7,11 @@ endif()
|
|||
|
||||
# Generate setup.py.
|
||||
file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
|
||||
configure_file(${PROJECT_SOURCE_DIR}/python/setup.py.in
|
||||
configure_file(${PROJECT_PYTHON_SOURCE_DIR}/setup.py.in
|
||||
${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py)
|
||||
|
||||
# Supply MANIFEST.in for older versions of Python
|
||||
file(COPY ${PROJECT_SOURCE_DIR}/python/MANIFEST.in
|
||||
file(COPY ${PROJECT_PYTHON_SOURCE_DIR}/MANIFEST.in
|
||||
DESTINATION ${GTSAM_PYTHON_BUILD_DIRECTORY})
|
||||
|
||||
set(WRAP_BUILD_TYPE_POSTFIXES ${GTSAM_BUILD_TYPE_POSTFIXES})
|
||||
|
|
@ -99,7 +100,7 @@ pybind_wrap(${GTSAM_PYTHON_TARGET} # target
|
|||
"gtsam" # module_name
|
||||
"gtsam" # top_namespace
|
||||
"${ignore}" # ignore_classes
|
||||
${PROJECT_SOURCE_DIR}/python/gtsam/gtsam.tpl
|
||||
${PROJECT_PYTHON_SOURCE_DIR}/gtsam/gtsam.tpl
|
||||
gtsam # libs
|
||||
"gtsam;gtsam_header" # dependencies
|
||||
${GTSAM_ENABLE_BOOST_SERIALIZATION} # use_boost_serialization
|
||||
|
|
@ -178,7 +179,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
"gtsam_unstable" # module_name
|
||||
"gtsam" # top_namespace
|
||||
"${ignore}" # ignore_classes
|
||||
${PROJECT_SOURCE_DIR}/python/gtsam_unstable/gtsam_unstable.tpl
|
||||
${PROJECT_PYTHON_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.tpl
|
||||
gtsam_unstable # libs
|
||||
"gtsam_unstable;gtsam_unstable_header" # dependencies
|
||||
${GTSAM_ENABLE_BOOST_SERIALIZATION} # use_boost_serialization
|
||||
|
|
|
|||
|
|
@ -11,17 +11,18 @@ import gtsam
|
|||
import numpy as np
|
||||
from typing import List
|
||||
|
||||
def error_func(this: gtsam.CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
|
||||
def error_func(this: gtsam.CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray:
|
||||
...
|
||||
```
|
||||
|
||||
`this` is a reference to the `CustomFactor` object. This is required because one can reuse the same
|
||||
`error_func` for multiple factors. `v` is a reference to the current set of values, and `H` is a list of
|
||||
**references** to the list of required Jacobians (see the corresponding C++ documentation).
|
||||
**references** to the list of required Jacobians (see the corresponding C++ documentation). Note that
|
||||
the error returned must be a 1D numpy array.
|
||||
|
||||
If `H` is `None`, it means the current factor evaluation does not need Jacobians. For example, the `error`
|
||||
method on a factor does not need Jacobians, so we don't evaluate them to save CPU. If `H` is not `None`,
|
||||
each entry of `H` can be assigned a `numpy` array, as the Jacobian for the corresponding variable.
|
||||
each entry of `H` can be assigned a (2D) `numpy` array, as the Jacobian for the corresponding variable.
|
||||
|
||||
After defining `error_func`, one can create a `CustomFactor` just like any other factor in GTSAM:
|
||||
|
||||
|
|
@ -42,7 +43,7 @@ from typing import List
|
|||
|
||||
expected = Pose2(2, 2, np.pi / 2)
|
||||
|
||||
def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):
|
||||
def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]) -> np.ndarray:
|
||||
"""
|
||||
Error function that mimics a BetweenFactor
|
||||
:param this: reference to the current CustomFactor being evaluated
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ For instructions on updating the version of the [wrap library](https://github.co
|
|||
- This wrapper needs `pyparsing(>=2.4.2)`, and `numpy(>=1.11.0)`. These can be installed as follows:
|
||||
|
||||
```bash
|
||||
pip install -r <gtsam_folder>/python/requirements.txt
|
||||
pip install -r <gtsam_folder>/python/dev_requirements.txt
|
||||
```
|
||||
|
||||
## Install
|
||||
|
|
|
|||
|
|
@ -0,0 +1,2 @@
|
|||
-r requirements.txt
|
||||
pyparsing>=2.4.2
|
||||
|
|
@ -15,7 +15,7 @@ from typing import List, Optional
|
|||
import gtsam
|
||||
import numpy as np
|
||||
|
||||
I = np.eye(1)
|
||||
I = np.eye(1) # Creates a 1-element, 2D array
|
||||
|
||||
|
||||
def simulate_car() -> List[float]:
|
||||
|
|
@ -30,7 +30,7 @@ def simulate_car() -> List[float]:
|
|||
|
||||
def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor,
|
||||
values: gtsam.Values,
|
||||
jacobians: Optional[List[np.ndarray]]) -> float:
|
||||
jacobians: Optional[List[np.ndarray]]) -> np.ndarray:
|
||||
"""GPS Factor error function
|
||||
:param measurement: GPS measurement, to be filled with `partial`
|
||||
:param this: gtsam.CustomFactor handle
|
||||
|
|
@ -44,12 +44,12 @@ def error_gps(measurement: np.ndarray, this: gtsam.CustomFactor,
|
|||
if jacobians is not None:
|
||||
jacobians[0] = I
|
||||
|
||||
return error
|
||||
return error # with input types this is a 1D np.ndarray
|
||||
|
||||
|
||||
def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor,
|
||||
values: gtsam.Values,
|
||||
jacobians: Optional[List[np.ndarray]]) -> float:
|
||||
jacobians: Optional[List[np.ndarray]]) -> np.ndarray:
|
||||
"""Odometry Factor error function
|
||||
:param measurement: Odometry measurement, to be filled with `partial`
|
||||
:param this: gtsam.CustomFactor handle
|
||||
|
|
@ -70,7 +70,7 @@ def error_odom(measurement: np.ndarray, this: gtsam.CustomFactor,
|
|||
|
||||
def error_lm(measurement: np.ndarray, this: gtsam.CustomFactor,
|
||||
values: gtsam.Values,
|
||||
jacobians: Optional[List[np.ndarray]]) -> float:
|
||||
jacobians: Optional[List[np.ndarray]]) -> np.ndarray:
|
||||
"""Landmark Factor error function
|
||||
:param measurement: Landmark measurement, to be filled with `partial`
|
||||
:param this: gtsam.CustomFactor handle
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ Authors: John Lambert
|
|||
"""
|
||||
|
||||
import unittest
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
import numpy as np
|
||||
from gtsam.gtsfm import Keypoints
|
||||
|
|
@ -16,6 +17,29 @@ from gtsam import IndexPair, Point2, SfmTrack2d
|
|||
class TestDsfTrackGenerator(GtsamTestCase):
|
||||
"""Tests for DsfTrackGenerator."""
|
||||
|
||||
def test_generate_tracks_from_pairwise_matches_nontransitive(
|
||||
self,
|
||||
) -> None:
|
||||
"""Tests DSF for non-transitive matches.
|
||||
|
||||
Test will result in no tracks since nontransitive tracks are naively discarded by DSF.
|
||||
"""
|
||||
keypoints_list = get_dummy_keypoints_list()
|
||||
nontransitive_matches_dict = get_nontransitive_matches() # contains one non-transitive track
|
||||
|
||||
# For each image pair (i1,i2), we provide a (K,2) matrix
|
||||
# of corresponding keypoint indices (k1,k2).
|
||||
matches_dict = {}
|
||||
for (i1,i2), corr_idxs in nontransitive_matches_dict.items():
|
||||
matches_dict[IndexPair(i1, i2)] = corr_idxs
|
||||
|
||||
tracks = gtsam.gtsfm.tracksFromPairwiseMatches(
|
||||
matches_dict,
|
||||
keypoints_list,
|
||||
verbose=True,
|
||||
)
|
||||
self.assertEqual(len(tracks), 0, "Tracks not filtered correctly")
|
||||
|
||||
def test_track_generation(self) -> None:
|
||||
"""Ensures that DSF generates three tracks from measurements
|
||||
in 3 images (H=200,W=400)."""
|
||||
|
|
@ -29,7 +53,7 @@ class TestDsfTrackGenerator(GtsamTestCase):
|
|||
keypoints_list.append(kps_i2)
|
||||
|
||||
# For each image pair (i1,i2), we provide a (K,2) matrix
|
||||
# of corresponding image indices (k1,k2).
|
||||
# of corresponding keypoint indices (k1,k2).
|
||||
matches_dict = {}
|
||||
matches_dict[IndexPair(0, 1)] = np.array([[0, 0], [1, 1]])
|
||||
matches_dict[IndexPair(1, 2)] = np.array([[2, 0], [1, 1]])
|
||||
|
|
@ -93,5 +117,73 @@ class TestSfmTrack2d(GtsamTestCase):
|
|||
assert track.numberMeasurements() == 1
|
||||
|
||||
|
||||
def get_dummy_keypoints_list() -> List[Keypoints]:
|
||||
""" """
|
||||
img1_kp_coords = np.array([[1, 1], [2, 2], [3, 3.]])
|
||||
img1_kp_scale = np.array([6.0, 9.0, 8.5])
|
||||
img2_kp_coords = np.array(
|
||||
[
|
||||
[1, 1.],
|
||||
[2, 2],
|
||||
[3, 3],
|
||||
[4, 4],
|
||||
[5, 5],
|
||||
[6, 6],
|
||||
[7, 7],
|
||||
[8, 8],
|
||||
]
|
||||
)
|
||||
img3_kp_coords = np.array(
|
||||
[
|
||||
[1, 1.],
|
||||
[2, 2],
|
||||
[3, 3],
|
||||
[4, 4],
|
||||
[5, 5],
|
||||
[6, 6],
|
||||
[7, 7],
|
||||
[8, 8],
|
||||
[9, 9],
|
||||
[10, 10],
|
||||
]
|
||||
)
|
||||
img4_kp_coords = np.array(
|
||||
[
|
||||
[1, 1.],
|
||||
[2, 2],
|
||||
[3, 3],
|
||||
[4, 4],
|
||||
[5, 5],
|
||||
]
|
||||
)
|
||||
keypoints_list = [
|
||||
Keypoints(coordinates=img1_kp_coords),
|
||||
Keypoints(coordinates=img2_kp_coords),
|
||||
Keypoints(coordinates=img3_kp_coords),
|
||||
Keypoints(coordinates=img4_kp_coords),
|
||||
]
|
||||
return keypoints_list
|
||||
|
||||
|
||||
def get_nontransitive_matches() -> Dict[Tuple[int, int], np.ndarray]:
|
||||
"""Set up correspondences for each (i1,i2) pair that violates transitivity.
|
||||
|
||||
(i=0, k=0) (i=0, k=1)
|
||||
| \\ |
|
||||
| \\ |
|
||||
(i=1, k=2)--(i=2,k=3)--(i=3, k=4)
|
||||
|
||||
Transitivity is violated due to the match between frames 0 and 3.
|
||||
"""
|
||||
nontransitive_matches_dict = {
|
||||
(0, 1): np.array([[0, 2]]),
|
||||
(1, 2): np.array([[2, 3]]),
|
||||
(0, 2): np.array([[0, 3]]),
|
||||
(0, 3): np.array([[1, 4]]),
|
||||
(2, 3): np.array([[3, 4]]),
|
||||
}
|
||||
return nontransitive_matches_dict
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
|||
|
|
@ -0,0 +1,43 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Simple unit test for custom robust noise model.
|
||||
Author: Fan Jiang
|
||||
"""
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
|
||||
class TestRobust(GtsamTestCase):
|
||||
|
||||
def test_RobustLossAndWeight(self):
|
||||
k = 10.0
|
||||
|
||||
def custom_weight(e):
|
||||
abs_e = abs(e)
|
||||
return 1.0 if abs_e <= k else k / abs_e
|
||||
|
||||
def custom_loss(e):
|
||||
abs_e = abs(e)
|
||||
return abs_e * abs_e / 2.0 if abs_e <= k else k * abs_e - k * k / 2.0
|
||||
|
||||
custom_robust = gtsam.noiseModel.Robust.Create(
|
||||
gtsam.noiseModel.mEstimator.Custom(custom_weight, custom_loss,
|
||||
gtsam.noiseModel.mEstimator.Base.ReweightScheme.Scalar,
|
||||
"huber"),
|
||||
gtsam.noiseModel.Isotropic.Sigma(1, 2.0))
|
||||
f = gtsam.PriorFactorDouble(0, 1.0, custom_robust)
|
||||
v = gtsam.Values()
|
||||
v.insert(0, 0.0)
|
||||
|
||||
self.assertAlmostEquals(f.error(v), 0.125)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
@ -29,7 +29,7 @@ UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
|
|||
|
||||
|
||||
class TestBackwardsCompatibility(GtsamTestCase):
|
||||
"""Tests for the backwards compatibility for the Python wrapper."""
|
||||
"""Tests for backwards compatibility of the Python wrapper."""
|
||||
|
||||
def setUp(self):
|
||||
"""Setup test fixtures"""
|
||||
|
|
|
|||
|
|
@ -1,2 +1 @@
|
|||
numpy>=1.11.0
|
||||
pyparsing>=2.4.2
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue